focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public ExecuteContext after(ExecuteContext context) { ThreadLocalUtils.removeRequestData(); LogUtils.printHttpRequestAfterPoint(context); return context; }
@Test public void testAfter() { ThreadLocalUtils.setRequestData(new RequestData(Collections.emptyMap(), "", "")); interceptor.after(context); Assert.assertNull(ThreadLocalUtils.getRequestData()); }
Number evaluateOutlierValue(final Number input) { switch (outlierTreatmentMethod) { case AS_IS: KiePMMLLinearNorm[] limitLinearNorms; if (input.doubleValue() < firstLinearNorm.getOrig()) { limitLinearNorms = linearNorms.subList(0, 2).toArray(new KiePMMLLinearNorm[0]); } else { limitLinearNorms = linearNorms.subList(linearNorms.size() -2, linearNorms.size()).toArray(new KiePMMLLinearNorm[0]); } return evaluate(input, limitLinearNorms); case AS_MISSING_VALUES: return mapMissingTo; case AS_EXTREME_VALUES: return input.doubleValue() < firstLinearNorm.getOrig() ? firstLinearNorm.getNorm() : lastLinearNorm.getNorm(); default: throw new KiePMMLException("Unknown outlierTreatmentMethod " + outlierTreatmentMethod); } }
@Test void evaluateOutlierValueAsMissingValues() { Number missingValue = 45; KiePMMLNormContinuous kiePMMLNormContinuous = getKiePMMLNormContinuous(null, OUTLIER_TREATMENT_METHOD.AS_MISSING_VALUES, missingValue); Number input = 23; Number retrieved = kiePMMLNormContinuous.evaluateOutlierValue(input); assertThat(retrieved).isEqualTo(missingValue); input = 41; retrieved = kiePMMLNormContinuous.evaluateOutlierValue(input); assertThat(retrieved).isEqualTo(missingValue); }
@SuppressWarnings("MethodLength") public void onFragment(final DirectBuffer buffer, final int offset, final int length, final Header header) { messageHeaderDecoder.wrap(buffer, offset); final int templateId = messageHeaderDecoder.templateId(); final int schemaId = messageHeaderDecoder.schemaId(); if (schemaId != MessageHeaderDecoder.SCHEMA_ID) { if (listenerExtension != null) { listenerExtension.onExtensionMessage( messageHeaderDecoder.blockLength(), templateId, schemaId, messageHeaderDecoder.version(), buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, length - MessageHeaderDecoder.ENCODED_LENGTH); return; } throw new ClusterException("expected schemaId=" + MessageHeaderDecoder.SCHEMA_ID + ", actual=" + schemaId); } switch (templateId) { case SessionMessageHeaderDecoder.TEMPLATE_ID: { sessionMessageHeaderDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = sessionMessageHeaderDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { listener.onMessage( sessionId, sessionMessageHeaderDecoder.timestamp(), buffer, offset + SESSION_HEADER_LENGTH, length - SESSION_HEADER_LENGTH, header); } break; } case SessionEventDecoder.TEMPLATE_ID: { sessionEventDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = sessionEventDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { listener.onSessionEvent( sessionEventDecoder.correlationId(), sessionId, sessionEventDecoder.leadershipTermId(), sessionEventDecoder.leaderMemberId(), sessionEventDecoder.code(), sessionEventDecoder.detail()); } break; } case NewLeaderEventDecoder.TEMPLATE_ID: { newLeaderEventDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = newLeaderEventDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { listener.onNewLeader( sessionId, newLeaderEventDecoder.leadershipTermId(), newLeaderEventDecoder.leaderMemberId(), newLeaderEventDecoder.ingressEndpoints()); } break; } case AdminResponseDecoder.TEMPLATE_ID: { adminResponseDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = adminResponseDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { final long correlationId = adminResponseDecoder.correlationId(); final AdminRequestType requestType = adminResponseDecoder.requestType(); final AdminResponseCode responseCode = adminResponseDecoder.responseCode(); final String message = adminResponseDecoder.message(); final int payloadOffset = adminResponseDecoder.offset() + AdminResponseDecoder.BLOCK_LENGTH + AdminResponseDecoder.messageHeaderLength() + message.length() + AdminResponseDecoder.payloadHeaderLength(); final int payloadLength = adminResponseDecoder.payloadLength(); listener.onAdminResponse( sessionId, correlationId, requestType, responseCode, message, buffer, payloadOffset, payloadLength); } break; } default: break; } }
@Test void onFragmentIsANoOpIfSessionIdDoesNotMatchOnNewLeader() { final int offset = 0; final long clusterSessionId = -100; final long leadershipTermId = 6; final int leaderMemberId = 9999; final String ingressEndpoints = "ingress endpoints ..."; newLeaderEventEncoder .wrapAndApplyHeader(buffer, offset, messageHeaderEncoder) .leadershipTermId(leadershipTermId) .clusterSessionId(clusterSessionId) .leaderMemberId(leaderMemberId) .ingressEndpoints(ingressEndpoints); final EgressListener egressListener = mock(EgressListener.class); final Header header = new Header(1, 3); final EgressAdapter adapter = new EgressAdapter(egressListener, 0, mock(Subscription.class), 10); adapter.onFragment(buffer, offset, newLeaderEventEncoder.encodedLength(), header); verifyNoInteractions(egressListener); }
@Override public void dropTable(ObjectPath tablePath, boolean ignoreIfNotExists) throws TableNotExistException, CatalogException { if (!tableExists(tablePath)) { if (ignoreIfNotExists) { return; } else { throw new TableNotExistException(getName(), tablePath); } } Path path = new Path(inferTablePath(catalogPathStr, tablePath)); try { this.fs.delete(path, true); } catch (IOException e) { throw new CatalogException(String.format("Dropping table %s exception.", tablePath), e); } }
@Test public void testDropTable() throws Exception { ObjectPath tablePath = new ObjectPath(TEST_DEFAULT_DATABASE, "tb1"); // create table catalog.createTable(tablePath, EXPECTED_CATALOG_TABLE, true); // test drop table catalog.dropTable(tablePath, true); assertFalse(catalog.tableExists(tablePath)); // drop non-exist table assertThrows(TableNotExistException.class, () -> catalog.dropTable(new ObjectPath(TEST_DEFAULT_DATABASE, "non_exist"), false)); }
public <T> void resolve(T resolvable) { ParamResolver resolver = this; if (ParamScope.class.isAssignableFrom(resolvable.getClass())) { ParamScope newScope = (ParamScope) resolvable; resolver = newScope.applyOver(resolver); } resolveStringLeaves(resolvable, resolver); resolveNonStringLeaves(resolvable, resolver); resolveNodes(resolvable, resolver); }
@Test public void shouldNotInterpolateEscapedSequences() { PipelineConfig pipelineConfig = PipelineConfigMother.createPipelineConfig("cruise", "dev", "ant"); pipelineConfig.setLabelTemplate("2.1-${COUNT}-##{foo}-bar-#{bar}"); new ParamResolver(new ParamSubstitutionHandlerFactory(params(param("foo", "pavan"), param("bar", "jj"))), fieldCache).resolve(pipelineConfig); assertThat(pipelineConfig.getLabelTemplate(), is("2.1-${COUNT}-#{foo}-bar-jj")); }
public static <T extends Throwable> void checkMustEmpty(final Collection<?> values, final Supplier<T> exceptionSupplierIfUnexpected) throws T { if (!values.isEmpty()) { throw exceptionSupplierIfUnexpected.get(); } }
@Test void assertCheckMustEmptyWithCollectionToNotThrowException() { assertDoesNotThrow(() -> ShardingSpherePreconditions.checkMustEmpty(Collections.emptyList(), SQLException::new)); }
void addKill(final LootTrackerRecord record) { if (!matches(record)) { throw new IllegalArgumentException(record.toString()); } kills += record.getKills(); outer: for (LootTrackerItem item : record.getItems()) { final int mappedItemId = LootTrackerMapping.map(item.getId(), item.getName()); // Combine it into an existing item if one already exists for (int idx = 0; idx < items.size(); ++idx) { LootTrackerItem i = items.get(idx); if (mappedItemId == i.getId()) { items.set(idx, new LootTrackerItem(i.getId(), i.getName(), i.getQuantity() + item.getQuantity(), i.getGePrice(), i.getHaPrice(), i.isIgnored())); continue outer; } } final LootTrackerItem mappedItem = mappedItemId == item.getId() ? item // reuse existing item : new LootTrackerItem(mappedItemId, item.getName(), item.getQuantity(), item.getGePrice(), item.getHaPrice(), item.isIgnored()); items.add(mappedItem); } }
@Test public void testAddKill() { LootTrackerBox lootTrackerBox = new LootTrackerBox( mock(ItemManager.class), "Theatre of Blood", LootRecordType.EVENT, null, false, LootTrackerPriceType.GRAND_EXCHANGE, false, null, null, false); LootTrackerItem[] items = new LootTrackerItem[]{ new LootTrackerItem(ItemID.CLUE_SCROLL_MEDIUM, "Clue scroll (medium)", 1, 0, 0, false), new LootTrackerItem(ItemID.CLUE_SCROLL_MEDIUM_3602, "Clue scroll (medium)", 1, 0, 0, false), new LootTrackerItem(ItemID.GRACEFUL_HOOD_13579, "Graceful hood", 1, 0, 0, false), }; LootTrackerRecord lootTrackerRecord = new LootTrackerRecord( "Theatre of Blood", null, LootRecordType.EVENT, items, 42 ); lootTrackerBox.addKill(lootTrackerRecord); assertEquals(Arrays.asList( new LootTrackerItem(ItemID.CLUE_SCROLL_MEDIUM, "Clue scroll (medium)", 2, 0, 0, false), new LootTrackerItem(ItemID.GRACEFUL_HOOD_13579, "Graceful hood", 1, 0, 0, false) ), lootTrackerBox.getItems()); }
static File applicationFile(File parent, String path) { return applicationFile(parent, Path.fromString(path)); }
@Test public void testApplicationFile() { applicationFile(new File("foo"), ""); applicationFile(new File("foo"), "bar"); applicationFile(new File(new File(""), ""), ""); assertEquals("/ is not a child of ", assertThrows(IllegalArgumentException.class, () -> applicationFile(new File(""), "")) .getMessage()); assertEquals("'..' is not allowed in path", assertThrows(IllegalArgumentException.class, () -> applicationFile(new File("foo"), "..")) .getMessage()); }
public List<PartitionInfo> getRemotePartitions(List<Partition> partitions) { List<Path> paths = Lists.newArrayList(); for (Partition partition : partitions) { Path partitionPath = new Path(partition.getFullPath()); paths.add(partitionPath); } FileStatus[] fileStatuses = getFileStatus(paths.toArray(new Path[0])); List<PartitionInfo> result = Lists.newArrayList(); for (int i = 0; i < partitions.size(); i++) { Partition partition = partitions.get(i); FileStatus fileStatus = fileStatuses[i]; final String fullPath = partition.getFullPath(); final long time = fileStatus.getModificationTime(); result.add(new PartitionInfo() { @Override public long getModifiedTime() { return time; } @Override public String getFullPath() { return fullPath; } }); } return result; }
@Test public void testGetRemotePartitions() { List<String> partitionNames = Lists.newArrayList("dt=20200101", "dt=20200102", "dt=20200103"); List<Partition> partitionList = Lists.newArrayList(); List<FileStatus> fileStatusList = Lists.newArrayList(); long modificationTime = 1000; for (String name : partitionNames) { Map<String, String> parameters = Maps.newHashMap(); TextFileFormatDesc formatDesc = new TextFileFormatDesc("a", "b", "c", "d"); String fullPath = "hdfs://path_to_table/" + name; Partition partition = new Partition(parameters, RemoteFileInputFormat.PARQUET, formatDesc, fullPath, true); partitionList.add(partition); Path filePath = new Path(fullPath + "/00000_0"); FileStatus fileStatus = new FileStatus(100000, false, 1, 256, modificationTime++, filePath); fileStatusList.add(fileStatus); } FileStatus[] fileStatuses = fileStatusList.toArray(new FileStatus[0]); new MockUp<RemoteFileOperations>() { @Mock public FileStatus[] getFileStatus(Path... paths) { return fileStatuses; } }; RemoteFileOperations ops = new RemoteFileOperations(null, null, null, false, true, null); List<PartitionInfo> partitions = ops.getRemotePartitions(partitionList); Assert.assertEquals(3, partitions.size()); for (int i = 0; i < partitionNames.size(); i++) { Assert.assertEquals(partitions.get(i).getFullPath(), "hdfs://path_to_table/" + partitionNames.get((i))); } }
@Override public Map<MetricName, Metric> getMetrics() { final Map<MetricName, Metric> gauges = new HashMap<>(); gauges.put(MetricName.build("name"), (Gauge<String>) runtime::getName); gauges.put(MetricName.build("vendor"), (Gauge<String>) () -> String.format(Locale.US, "%s %s %s (%s)", runtime.getVmVendor(), runtime.getVmName(), runtime.getVmVersion(), runtime.getSpecVersion())); gauges.put(MetricName.build("uptime"), (Gauge<Long>) runtime::getUptime); return Collections.unmodifiableMap(gauges); }
@Test public void autoDiscoversTheRuntimeBean() throws Exception { final Gauge gauge = (Gauge) new JvmAttributeGaugeSet().getMetrics().get(MetricName.build("uptime")); assertThat((Long) gauge.getValue()) .isPositive(); }
public void removeSelectorData(final String pluginName) { SELECTOR_DATA_MAP.remove(pluginName); }
@Test public void testRemoveSelectorData() throws NoSuchFieldException, IllegalAccessException { SelectorData firstCachedSelectorData = SelectorData.builder().id("1").pluginName(mockPluginName1).sort(1).build(); MatchDataCache.getInstance().cacheSelectorData(path1, firstCachedSelectorData, 100, 100); MatchDataCache.getInstance().removeSelectorData(firstCachedSelectorData.getPluginName()); ConcurrentHashMap<String, WindowTinyLFUMap<String, SelectorData>> selectorMap = getFieldByName(selectorMapStr); assertNull(selectorMap.get(mockPluginName1)); selectorMap.clear(); }
public IssueSyncProgress getIssueSyncProgress(DbSession dbSession) { int completedCount = dbClient.projectDao().countIndexedProjects(dbSession); int total = dbClient.projectDao().countProjects(dbSession); boolean hasFailures = dbClient.ceActivityDao().hasAnyFailedOrCancelledIssueSyncTask(dbSession); boolean isCompleted = !dbClient.ceQueueDao().hasAnyIssueSyncTaskPendingOrInProgress(dbSession); return new IssueSyncProgress(isCompleted, completedCount, total, hasFailures); }
@Test public void getIssueSyncProgress_whenNoBranchesNeedsIssueSync_shouldReturnCompleted() { IntStream.range(0, 10).forEach(value -> insertProjectWithBranches(false, 1)); IntStream.range(0, 20).forEach(value -> insertProjectWithBranches(false, 2)); IssueSyncProgress result = underTest.getIssueSyncProgress(db.getSession()); assertThat(result.getCompletedCount()).isEqualTo(30); assertThat(result.getTotal()).isEqualTo(30); assertThat(result.isCompleted()).isTrue(); }
@Subscribe public void onPostMenuSort(PostMenuSort postMenuSort) { // The menu is not rebuilt when it is open, so don't swap or else it will // repeatedly swap entries if (client.isMenuOpen()) { return; } MenuEntry[] menuEntries = client.getMenuEntries(); // Build option map for quick lookup in findIndex int idx = 0; optionIndexes.clear(); for (MenuEntry entry : menuEntries) { String option = Text.removeTags(entry.getOption()).toLowerCase(); optionIndexes.put(option, idx++); } // Perform swaps idx = 0; for (MenuEntry entry : menuEntries) { swapMenuEntry(null, menuEntries, idx++, entry); } if (config.removeDeadNpcMenus()) { removeDeadNpcs(); } }
@Test public void testShiftWithdraw() { when(config.bankDepositShiftClick()).thenReturn(ShiftDepositMode.EXTRA_OP); when(client.isKeyPressed(KeyCode.KC_SHIFT)).thenReturn(true); entries = new MenuEntry[]{ menu("Cancel", "", MenuAction.CANCEL), menu("Wield", "Abyssal whip", MenuAction.CC_OP_LOW_PRIORITY, 9), menu("Deposit-1", "Abyssal whip", MenuAction.CC_OP, 2), }; menuEntrySwapperPlugin.onPostMenuSort(new PostMenuSort()); ArgumentCaptor<MenuEntry[]> argumentCaptor = ArgumentCaptor.forClass(MenuEntry[].class); verify(client).setMenuEntries(argumentCaptor.capture()); assertArrayEquals(new MenuEntry[]{ menu("Cancel", "", MenuAction.CANCEL), menu("Deposit-1", "Abyssal whip", MenuAction.CC_OP, 2), menu("Wield", "Abyssal whip", MenuAction.CC_OP, 9), }, argumentCaptor.getValue()); }
@Override public Serde<GenericKey> create( final FormatInfo format, final PersistenceSchema schema, final KsqlConfig ksqlConfig, final Supplier<SchemaRegistryClient> schemaRegistryClientFactory, final String loggerNamePrefix, final ProcessingLogContext processingLogContext, final Optional<TrackedCallback> tracker ) { return createInner( format, schema, ksqlConfig, schemaRegistryClientFactory, loggerNamePrefix, processingLogContext, tracker ); }
@Test public void shouldCreateInnerSerdeNonWindowed() { // When: factory.create(format, schema, config, srClientFactory, LOGGER_PREFIX, processingLogCxt, Optional.empty()); // Then: verify(innerFactory).createFormatSerde("Key", format, schema, config, srClientFactory, true); }
@Override public int hashCode() { return Objects.hash(timestamp); }
@Test public void testMetadataShouldNotInterfereInEquality() { final HeartbeatRecord record1 = new HeartbeatRecord( Timestamp.ofTimeMicroseconds(1L), mock(ChangeStreamRecordMetadata.class)); final HeartbeatRecord record2 = new HeartbeatRecord( Timestamp.ofTimeMicroseconds(1L), mock(ChangeStreamRecordMetadata.class)); assertEquals(record1, record2); assertEquals(record1.hashCode(), record2.hashCode()); }
protected RunnerApi.Pipeline applySdkEnvironmentOverrides( RunnerApi.Pipeline pipeline, DataflowPipelineOptions options) { String sdkHarnessContainerImageOverrides = options.getSdkHarnessContainerImageOverrides(); String[] overrides = Strings.isNullOrEmpty(sdkHarnessContainerImageOverrides) ? new String[0] : sdkHarnessContainerImageOverrides.split(",", -1); if (overrides.length % 2 != 0) { throw new RuntimeException( "invalid syntax for SdkHarnessContainerImageOverrides: " + options.getSdkHarnessContainerImageOverrides()); } RunnerApi.Pipeline.Builder pipelineBuilder = pipeline.toBuilder(); RunnerApi.Components.Builder componentsBuilder = pipelineBuilder.getComponentsBuilder(); componentsBuilder.clearEnvironments(); for (Map.Entry<String, RunnerApi.Environment> entry : pipeline.getComponents().getEnvironmentsMap().entrySet()) { RunnerApi.Environment.Builder environmentBuilder = entry.getValue().toBuilder(); if (BeamUrns.getUrn(RunnerApi.StandardEnvironments.Environments.DOCKER) .equals(environmentBuilder.getUrn())) { RunnerApi.DockerPayload dockerPayload; try { dockerPayload = RunnerApi.DockerPayload.parseFrom(environmentBuilder.getPayload()); } catch (InvalidProtocolBufferException e) { throw new RuntimeException("Error parsing environment docker payload.", e); } String containerImage = dockerPayload.getContainerImage(); boolean updated = false; for (int i = 0; i < overrides.length; i += 2) { containerImage = containerImage.replaceAll(overrides[i], overrides[i + 1]); if (!containerImage.equals(dockerPayload.getContainerImage())) { updated = true; } } if (containerImage.startsWith("apache/beam") && !updated // don't update if the container image is already configured by DataflowRunner && !containerImage.equals(getContainerImageForJob(options))) { containerImage = DataflowRunnerInfo.getDataflowRunnerInfo().getContainerImageBaseRepository() + containerImage.substring(containerImage.lastIndexOf("/")); } environmentBuilder.setPayload( RunnerApi.DockerPayload.newBuilder() .setContainerImage(containerImage) .build() .toByteString()); } componentsBuilder.putEnvironments(entry.getKey(), environmentBuilder.build()); } return pipelineBuilder.build(); }
@Test public void testApplySdkEnvironmentOverrides() throws IOException { DataflowPipelineOptions options = buildPipelineOptions(); String dockerHubPythonContainerUrl = "apache/beam_python3.8_sdk:latest"; String gcrPythonContainerUrl = "gcr.io/apache-beam-testing/beam-sdk/beam_python3.8_sdk:latest"; options.setSdkHarnessContainerImageOverrides(".*python.*," + gcrPythonContainerUrl); DataflowRunner runner = DataflowRunner.fromOptions(options); RunnerApi.Pipeline pipeline = RunnerApi.Pipeline.newBuilder() .setComponents( RunnerApi.Components.newBuilder() .putEnvironments( "env", RunnerApi.Environment.newBuilder() .setUrn( BeamUrns.getUrn(RunnerApi.StandardEnvironments.Environments.DOCKER)) .setPayload( RunnerApi.DockerPayload.newBuilder() .setContainerImage(dockerHubPythonContainerUrl) .build() .toByteString()) .build())) .build(); RunnerApi.Pipeline expectedPipeline = RunnerApi.Pipeline.newBuilder() .setComponents( RunnerApi.Components.newBuilder() .putEnvironments( "env", RunnerApi.Environment.newBuilder() .setUrn( BeamUrns.getUrn(RunnerApi.StandardEnvironments.Environments.DOCKER)) .setPayload( RunnerApi.DockerPayload.newBuilder() .setContainerImage(gcrPythonContainerUrl) .build() .toByteString()) .build())) .build(); assertThat(runner.applySdkEnvironmentOverrides(pipeline, options), equalTo(expectedPipeline)); }
static Map<String, Object> getOffsetConsumerConfig( String name, @Nullable Map<String, Object> offsetConfig, Map<String, Object> consumerConfig) { Map<String, Object> offsetConsumerConfig = new HashMap<>(consumerConfig); offsetConsumerConfig.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false); Object groupId = consumerConfig.get(ConsumerConfig.GROUP_ID_CONFIG); // override group_id and disable auto_commit so that it does not interfere with main consumer String offsetGroupId = String.format( "%s_offset_consumer_%d_%s", name, new Random().nextInt(Integer.MAX_VALUE), (groupId == null ? "none" : groupId)); offsetConsumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, offsetGroupId); if (offsetConfig != null) { offsetConsumerConfig.putAll(offsetConfig); } // Force read isolation level to 'read_uncommitted' for offset consumer. This consumer // fetches latest offset for two reasons : (a) to calculate backlog (number of records // yet to be consumed) (b) to advance watermark if the backlog is zero. The right thing to do // for (a) is to leave this config unchanged from the main config (i.e. if there are records // that can't be read because of uncommitted records before them, they shouldn't // ideally count towards backlog when "read_committed" is enabled. But (b) // requires finding out if there are any records left to be read (committed or uncommitted). // Rather than using two separate consumers we will go with better support for (b). If we do // hit a case where a lot of records are not readable (due to some stuck transactions), the // pipeline would report more backlog, but would not be able to consume it. It might be ok // since CPU consumed on the workers would be low and will likely avoid unnecessary upscale. offsetConsumerConfig.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_uncommitted"); return offsetConsumerConfig; }
@Test public void testOffsetConsumerConfigOverrides() throws Exception { KafkaIO.Read<?, ?> spec = KafkaIO.read() .withBootstrapServers("broker_1:9092,broker_2:9092") .withTopic("my_topic") .withOffsetConsumerConfigOverrides(null); Map<String, Object> offsetConfig = KafkaIOUtils.getOffsetConsumerConfig( "name", spec.getOffsetConsumerConfig(), spec.getConsumerConfig()); assertTrue( offsetConfig .get(ConsumerConfig.GROUP_ID_CONFIG) .toString() .matches("name_offset_consumer_\\d+_none")); assertEquals(false, offsetConfig.get(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)); assertEquals("read_uncommitted", offsetConfig.get(ConsumerConfig.ISOLATION_LEVEL_CONFIG)); String offsetGroupId = "group.offsetConsumer"; KafkaIO.Read<?, ?> spec2 = KafkaIO.read() .withBootstrapServers("broker_1:9092,broker_2:9092") .withTopic("my_topic") .withOffsetConsumerConfigOverrides( ImmutableMap.of(ConsumerConfig.GROUP_ID_CONFIG, offsetGroupId)); offsetConfig = KafkaIOUtils.getOffsetConsumerConfig( "name2", spec2.getOffsetConsumerConfig(), spec2.getConsumerConfig()); assertEquals(offsetGroupId, offsetConfig.get(ConsumerConfig.GROUP_ID_CONFIG)); assertEquals(false, offsetConfig.get(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)); assertEquals("read_uncommitted", offsetConfig.get(ConsumerConfig.ISOLATION_LEVEL_CONFIG)); }
public static synchronized void join(CustomEnvironmentPluginService customEnvironmentPluginService) { if (Objects.isNull(customEnvironmentPluginService)) { return; } SERVICE_LIST.add(customEnvironmentPluginService); LOGGER.info("[CustomEnvironmentPluginService] join successfully."); }
@Test void testJoin() { CustomEnvironmentPluginManager.join(new CustomEnvironmentPluginService() { @Override public Map<String, Object> customValue(Map<String, Object> property) { String pwd = (String) property.get("db.password.0"); property.put("db.password.0", "test" + pwd); return property; } @Override public Set<String> propertyKey() { Set<String> propertyKey = new HashSet<>(); propertyKey.add("db.password.0"); return propertyKey; } @Override public Integer order() { return 0; } @Override public String pluginName() { return "test"; } }); assertNotNull(CustomEnvironmentPluginManager.getInstance().getPropertyKeys()); Map<String, Object> sourcePropertyMap = new HashMap<>(); sourcePropertyMap.put("db.password.0", "nacos"); assertNotNull(CustomEnvironmentPluginManager.getInstance().getCustomValues(sourcePropertyMap)); }
@Override public void doFilter(ServletRequest req, ServletResponse resp, FilterChain chain) throws IOException, ServletException { if (bizConfig.isAdminServiceAccessControlEnabled()) { HttpServletRequest request = (HttpServletRequest) req; HttpServletResponse response = (HttpServletResponse) resp; String token = request.getHeader(HttpHeaders.AUTHORIZATION); if (!checkAccessToken(token)) { logger.warn("Invalid access token: {} for uri: {}", token, request.getRequestURI()); response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "Unauthorized"); return; } } chain.doFilter(req, resp); }
@Test public void testWithAccessControlEnabledWithNoTokenSpecifiedWithTokenPassed() throws Exception { String someToken = "someToken"; when(bizConfig.isAdminServiceAccessControlEnabled()).thenReturn(true); when(bizConfig.getAdminServiceAccessTokens()).thenReturn(null); when(servletRequest.getHeader(HttpHeaders.AUTHORIZATION)).thenReturn(someToken); authenticationFilter.doFilter(servletRequest, servletResponse, filterChain); verify(bizConfig, times(1)).isAdminServiceAccessControlEnabled(); verify(bizConfig, times(1)).getAdminServiceAccessTokens(); verify(filterChain, times(1)).doFilter(servletRequest, servletResponse); verify(servletResponse, never()).sendError(anyInt(), anyString()); }
public void walk(Handler handler) { walkSubtree(this.rootValidatable, new ConfigSaveValidationContext(null), handler); }
@Test public void shouldNotWalkSCMMaterialWhileTraversingPluggableSCMMaterial() { SCM scmConfig = mock(SCM.class); when(scmConfig.getName()).thenReturn("scm"); when(scmConfig.getId()).thenReturn("scm-id"); PluggableSCMMaterialConfig pluggableSCMMaterialConfig = new PluggableSCMMaterialConfig("scm-id"); setField(pluggableSCMMaterialConfig, "scmConfig", scmConfig); BasicCruiseConfig config = new BasicCruiseConfig(); config.getSCMs().add(scmConfig); final ConfigSaveValidationContext context = new ConfigSaveValidationContext(config); new GoConfigGraphWalker(pluggableSCMMaterialConfig).walk((validatable, ctx) -> validatable.validate(context)); verify(scmConfig, never()).validate(any(ValidationContext.class)); }
@Override public Column convert(BasicTypeDefine typeDefine) { PhysicalColumn.PhysicalColumnBuilder builder = PhysicalColumn.builder() .name(typeDefine.getName()) .sourceType(typeDefine.getColumnType()) .nullable(typeDefine.isNullable()) .defaultValue(typeDefine.getDefaultValue()) .comment(typeDefine.getComment()); String mysqlDataType = typeDefine.getDataType().toUpperCase(); if (mysqlDataType.endsWith("ZEROFILL")) { mysqlDataType = mysqlDataType.substring(0, mysqlDataType.length() - "ZEROFILL".length()).trim(); } if (typeDefine.isUnsigned() && !(mysqlDataType.endsWith(" UNSIGNED"))) { mysqlDataType = mysqlDataType + " UNSIGNED"; } switch (mysqlDataType) { case MYSQL_NULL: builder.dataType(BasicType.VOID_TYPE); break; case MYSQL_BIT: if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.dataType(BasicType.BOOLEAN_TYPE); } else if (typeDefine.getLength() == 1) { builder.dataType(BasicType.BOOLEAN_TYPE); } else { builder.dataType(PrimitiveByteArrayType.INSTANCE); // BIT(M) -> BYTE(M/8) long byteLength = typeDefine.getLength() / 8; byteLength += typeDefine.getLength() % 8 > 0 ? 1 : 0; builder.columnLength(byteLength); } break; case MYSQL_TINYINT: if (typeDefine.getColumnType().equalsIgnoreCase("tinyint(1)")) { builder.dataType(BasicType.BOOLEAN_TYPE); } else { builder.dataType(BasicType.BYTE_TYPE); } break; case MYSQL_TINYINT_UNSIGNED: case MYSQL_SMALLINT: builder.dataType(BasicType.SHORT_TYPE); break; case MYSQL_SMALLINT_UNSIGNED: case MYSQL_MEDIUMINT: case MYSQL_MEDIUMINT_UNSIGNED: case MYSQL_INT: case MYSQL_INTEGER: case MYSQL_YEAR: builder.dataType(BasicType.INT_TYPE); break; case MYSQL_INT_UNSIGNED: case MYSQL_INTEGER_UNSIGNED: case MYSQL_BIGINT: builder.dataType(BasicType.LONG_TYPE); break; case MYSQL_BIGINT_UNSIGNED: DecimalType intDecimalType = new DecimalType(20, 0); builder.dataType(intDecimalType); builder.columnLength(Long.valueOf(intDecimalType.getPrecision())); builder.scale(intDecimalType.getScale()); break; case MYSQL_FLOAT: builder.dataType(BasicType.FLOAT_TYPE); break; case MYSQL_FLOAT_UNSIGNED: log.warn("{} will probably cause value overflow.", MYSQL_FLOAT_UNSIGNED); builder.dataType(BasicType.FLOAT_TYPE); break; case MYSQL_DOUBLE: builder.dataType(BasicType.DOUBLE_TYPE); break; case MYSQL_DOUBLE_UNSIGNED: log.warn("{} will probably cause value overflow.", MYSQL_DOUBLE_UNSIGNED); builder.dataType(BasicType.DOUBLE_TYPE); break; case MYSQL_DECIMAL: Preconditions.checkArgument(typeDefine.getPrecision() > 0); DecimalType decimalType; if (typeDefine.getPrecision() > DEFAULT_PRECISION) { log.warn("{} will probably cause value overflow.", MYSQL_DECIMAL); decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE); } else { decimalType = new DecimalType( typeDefine.getPrecision().intValue(), typeDefine.getScale() == null ? 0 : typeDefine.getScale().intValue()); } builder.dataType(decimalType); builder.columnLength(Long.valueOf(decimalType.getPrecision())); builder.scale(decimalType.getScale()); break; case MYSQL_DECIMAL_UNSIGNED: Preconditions.checkArgument(typeDefine.getPrecision() > 0); log.warn("{} will probably cause value overflow.", MYSQL_DECIMAL_UNSIGNED); DecimalType decimalUnsignedType = new DecimalType( typeDefine.getPrecision().intValue() + 1, typeDefine.getScale() == null ? 0 : typeDefine.getScale().intValue()); builder.dataType(decimalUnsignedType); builder.columnLength(Long.valueOf(decimalUnsignedType.getPrecision())); builder.scale(decimalUnsignedType.getScale()); break; case MYSQL_ENUM: builder.dataType(BasicType.STRING_TYPE); if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.columnLength(100L); } else { builder.columnLength(typeDefine.getLength()); } break; case MYSQL_CHAR: case MYSQL_VARCHAR: if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.columnLength(TypeDefineUtils.charTo4ByteLength(1L)); } else { builder.columnLength(typeDefine.getLength()); } builder.dataType(BasicType.STRING_TYPE); break; case MYSQL_TINYTEXT: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_8 - 1); break; case MYSQL_TEXT: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_16 - 1); break; case MYSQL_MEDIUMTEXT: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_24 - 1); break; case MYSQL_LONGTEXT: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_32 - 1); break; case MYSQL_JSON: builder.dataType(BasicType.STRING_TYPE); break; case MYSQL_BINARY: case MYSQL_VARBINARY: if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.columnLength(1L); } else { builder.columnLength(typeDefine.getLength()); } builder.dataType(PrimitiveByteArrayType.INSTANCE); break; case MYSQL_TINYBLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(POWER_2_8 - 1); break; case MYSQL_BLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(POWER_2_16 - 1); break; case MYSQL_MEDIUMBLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(POWER_2_24 - 1); break; case MYSQL_LONGBLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(POWER_2_32 - 1); break; case MYSQL_GEOMETRY: builder.dataType(PrimitiveByteArrayType.INSTANCE); break; case MYSQL_DATE: builder.dataType(LocalTimeType.LOCAL_DATE_TYPE); break; case MYSQL_TIME: builder.dataType(LocalTimeType.LOCAL_TIME_TYPE); builder.scale(typeDefine.getScale()); break; case MYSQL_DATETIME: case MYSQL_TIMESTAMP: builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); builder.scale(typeDefine.getScale()); break; default: throw CommonError.convertToSeaTunnelTypeError( DatabaseIdentifier.MYSQL, mysqlDataType, typeDefine.getName()); } return builder.build(); }
@Test public void testConvertText() { BasicTypeDefine<Object> typeDefine = BasicTypeDefine.builder() .name("test") .columnType("tinytext") .dataType("tinytext") .build(); Column column = MySqlTypeConverter.DEFAULT_INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType()); Assertions.assertEquals(255, column.getColumnLength()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); typeDefine = BasicTypeDefine.builder().name("test").columnType("text").dataType("text").build(); column = MySqlTypeConverter.DEFAULT_INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType()); Assertions.assertEquals(65535, column.getColumnLength()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); typeDefine = BasicTypeDefine.builder() .name("test") .columnType("mediumtext") .dataType("mediumtext") .build(); column = MySqlTypeConverter.DEFAULT_INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType()); Assertions.assertEquals(16777215, column.getColumnLength()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); typeDefine = BasicTypeDefine.builder() .name("test") .columnType("longtext") .dataType("longtext") .build(); column = MySqlTypeConverter.DEFAULT_INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType()); Assertions.assertEquals(4294967295L, column.getColumnLength()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); }
public StringSubject factValue(String key) { return doFactValue(key, null); }
@Test public void factValueFailNoValue() { Object unused = expectFailureWhenTestingThat(simpleFact("foo")).factValue("foo"); assertFailureKeys( "expected to have a value", "for key", "but the key was present with no value", HOW_TO_TEST_KEYS_WITHOUT_VALUES.key); assertFailureValue("for key", "foo"); }
@Override public NSImage iconNamed(final String name, final Integer width, final Integer height) { // Search for an object whose name was set explicitly using the setName: method and currently // resides in the image cache NSImage image = this.load(name, width); if(null == image) { if(null == name) { return this.iconNamed("notfound.tiff", width, height); } else if(name.contains(PreferencesFactory.get().getProperty("local.delimiter"))) { return this.cache(FilenameUtils.getName(name), this.convert(FilenameUtils.getName(name), NSImage.imageWithContentsOfFile(name), width, height), width); } else { return this.cache(name, this.convert(name, NSImage.imageNamed(name), width, height), width); } } return image; }
@Test public void testCacheSystemIcon() { final NSImageIconCache cache = new NSImageIconCache(); final NSImage icon32 = cache.iconNamed("NSComputer", 32); assertNotNull(icon32); assertEquals(32, icon32.size().width.intValue()); assertEquals(32, icon32.size().height.intValue()); final NSImage icon16 = cache.iconNamed("NSComputer", 16); assertNotNull(icon16); assertEquals(16, icon16.size().width.intValue()); assertEquals(16, icon16.size().height.intValue()); final NSImage icon64 = cache.iconNamed("NSComputer", 64); assertNotNull(icon64); assertEquals(64, icon64.size().width.intValue()); assertEquals(64, icon64.size().height.intValue()); assertNotNull(NSImage.imageNamed("NSComputer (16px)")); assertNotNull(NSImage.imageNamed("NSComputer (32px)")); assertNotNull(NSImage.imageNamed("NSComputer (64px)")); }
public static BulkheadRegistry of(Configuration configuration, CompositeCustomizer<BulkheadConfigCustomizer> customizer){ CommonBulkheadConfigurationProperties bulkheadProperties = CommonsConfigurationBulkHeadConfiguration.of(configuration); Map<String, BulkheadConfig> bulkheadConfigMap = bulkheadProperties.getInstances() .entrySet().stream() .collect(Collectors.toMap( Map.Entry::getKey, entry -> bulkheadProperties.createBulkheadConfig(entry.getValue(), customizer, entry.getKey()))); return BulkheadRegistry.of(bulkheadConfigMap); }
@Test public void testBulkheadRegistryFromYamlFile() throws ConfigurationException { Configuration config = CommonsConfigurationUtil.getConfiguration(YAMLConfiguration.class, TestConstants.RESILIENCE_CONFIG_YAML_FILE_NAME); BulkheadRegistry registry = CommonsConfigurationBulkheadRegistry.of(config, new CompositeCustomizer<>(List.of())); Assertions.assertThat(registry.bulkhead(TestConstants.BACKEND_A).getName()).isEqualTo(TestConstants.BACKEND_A); Assertions.assertThat(registry.bulkhead(TestConstants.BACKEND_B).getName()).isEqualTo(TestConstants.BACKEND_B); }
@Override public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context, Map<String, Long> recentlyUnloadedBundles, Map<String, Long> recentlyUnloadedBrokers) { final var conf = context.brokerConfiguration(); decisionCache.clear(); stats.clear(); Map<String, BrokerLookupData> availableBrokers; try { availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync() .get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS); } catch (ExecutionException | InterruptedException | TimeoutException e) { counter.update(Failure, Unknown); log.warn("Failed to fetch available brokers. Stop unloading.", e); return decisionCache; } try { final var loadStore = context.brokerLoadDataStore(); stats.setLoadDataStore(loadStore); boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log); var skipReason = stats.update( context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf); if (skipReason.isPresent()) { if (debugMode) { log.warn(CANNOT_CONTINUE_UNLOAD_MSG + " Skipped the load stat update. Reason:{}.", skipReason.get()); } counter.update(Skip, skipReason.get()); return decisionCache; } counter.updateLoadData(stats.avg, stats.std); if (debugMode) { log.info("brokers' load stats:{}", stats); } // skip metrics int numOfBrokersWithEmptyLoadData = 0; int numOfBrokersWithFewBundles = 0; final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd(); boolean transfer = conf.isLoadBalancerTransferEnabled(); if (stats.std() > targetStd || isUnderLoaded(context, stats.peekMinBroker(), stats) || isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { unloadConditionHitCount++; } else { unloadConditionHitCount = 0; } if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Shedding condition hit count:{} is less than or equal to the threshold:{}.", unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold()); } counter.update(Skip, HitCount); return decisionCache; } while (true) { if (!stats.hasTransferableBrokers()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Exhausted target transfer brokers."); } break; } UnloadDecision.Reason reason; if (stats.std() > targetStd) { reason = Overloaded; } else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) { reason = Underloaded; if (debugMode) { log.info(String.format("broker:%s is underloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this underloaded broker.", stats.peekMinBroker(), context.brokerLoadDataStore().get(stats.peekMinBroker()).get(), stats.std(), targetStd)); } } else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { reason = Overloaded; if (debugMode) { log.info(String.format("broker:%s is overloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this overloaded broker.", stats.peekMaxBroker(), context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(), stats.std(), targetStd)); } } else { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + "The overall cluster load meets the target, std:{} <= targetStd:{}." + "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.", stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker()); } break; } String maxBroker = stats.pollMaxBroker(); String minBroker = stats.peekMinBroker(); Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker); Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker); if (maxBrokerLoadData.isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " MaxBrokerLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } if (minBrokerLoadData.isEmpty()) { log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker); numOfBrokersWithEmptyLoadData++; continue; } double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA(); double minLoad = minBrokerLoadData.get().getWeightedMaxEMA(); double offload = (maxLoad - minLoad) / 2; BrokerLoadData brokerLoadData = maxBrokerLoadData.get(); double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn() + brokerLoadData.getMsgThroughputOut(); double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn() + minBrokerLoadData.get().getMsgThroughputOut(); double offloadThroughput = maxBrokerThroughput * offload / maxLoad; if (debugMode) { log.info(String.format( "Attempting to shed load from broker:%s%s, which has the max resource " + "usage:%.2f%%, targetStd:%.2f," + " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.", maxBroker, transfer ? " to broker:" + minBroker : "", maxLoad * 100, targetStd, offload * 100, offloadThroughput / KB )); } double trafficMarkedToOffload = 0; double trafficMarkedToGain = 0; Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker); if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " TopBundlesLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData(); if (maxBrokerTopBundlesLoadData.size() == 1) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Sole namespace bundle:%s is overloading the broker. ", maxBroker, maxBrokerTopBundlesLoadData.iterator().next())); continue; } Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker); var minBrokerTopBundlesLoadDataIter = minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() : null; if (maxBrokerTopBundlesLoadData.isEmpty()) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Broker overloaded despite having no bundles", maxBroker)); continue; } int remainingTopBundles = maxBrokerTopBundlesLoadData.size(); for (var e : maxBrokerTopBundlesLoadData) { String bundle = e.bundleName(); if (channel != null && !channel.isOwner(bundle, maxBroker)) { if (debugMode) { log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " MaxBroker:%s is not the owner.", bundle, maxBroker)); } continue; } if (recentlyUnloadedBundles.containsKey(bundle)) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " Bundle has been recently unloaded at ts:%d.", bundle, recentlyUnloadedBundles.get(bundle))); } continue; } if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " This unload can't meet " + "affinity(isolation) or anti-affinity group policies.", bundle)); } continue; } if (remainingTopBundles <= 1) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is" + " less than or equal to 1.", bundle, maxBroker)); } break; } var bundleData = e.stats(); double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut; boolean swap = false; List<Unload> minToMaxUnloads = new ArrayList<>(); double minBrokerBundleSwapThroughput = 0.0; if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) { // see if we can swap bundles from min to max broker to balance better. if (transfer && minBrokerTopBundlesLoadDataIter != null) { var maxBrokerNewThroughput = maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain - maxBrokerBundleThroughput; var minBrokerNewThroughput = minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput; while (minBrokerTopBundlesLoadDataIter.hasNext()) { var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next(); if (!isTransferable(context, availableBrokers, minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) { continue; } var minBrokerBundleThroughput = minBrokerBundleData.stats().msgThroughputIn + minBrokerBundleData.stats().msgThroughputOut; var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput; var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput; if (maxBrokerNewThroughputTmp < maxBrokerThroughput && minBrokerNewThroughputTmp < maxBrokerThroughput) { minToMaxUnloads.add(new Unload(minBroker, minBrokerBundleData.bundleName(), Optional.of(maxBroker))); maxBrokerNewThroughput = maxBrokerNewThroughputTmp; minBrokerNewThroughput = minBrokerNewThroughputTmp; minBrokerBundleSwapThroughput += minBrokerBundleThroughput; if (minBrokerNewThroughput <= maxBrokerNewThroughput && maxBrokerNewThroughput < maxBrokerThroughput * 0.75) { swap = true; break; } } } } if (!swap) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is " + "greater than the target :%.2f KByte/s.", bundle, (trafficMarkedToOffload + maxBrokerBundleThroughput) / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB, offloadThroughput / KB)); } break; } } Unload unload; if (transfer) { if (swap) { minToMaxUnloads.forEach(minToMaxUnload -> { if (debugMode) { log.info("Decided to gain bundle:{} from min broker:{}", minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker()); } var decision = new UnloadDecision(); decision.setUnload(minToMaxUnload); decision.succeed(reason); decisionCache.add(decision); }); if (debugMode) { log.info(String.format( "Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.", minBrokerBundleSwapThroughput / KB, minBroker, maxBroker)); trafficMarkedToGain += minBrokerBundleSwapThroughput; } } unload = new Unload(maxBroker, bundle, Optional.of(minBroker)); } else { unload = new Unload(maxBroker, bundle); } var decision = new UnloadDecision(); decision.setUnload(unload); decision.succeed(reason); decisionCache.add(decision); trafficMarkedToOffload += maxBrokerBundleThroughput; remainingTopBundles--; if (debugMode) { log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s." + " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s." + " Target:%.2f KByte/s.", bundle, maxBrokerBundleThroughput / KB, trafficMarkedToOffload / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain) / KB, offloadThroughput / KB)); } } if (trafficMarkedToOffload > 0) { var adjustedOffload = (trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput; stats.offload(maxLoad, minLoad, adjustedOffload); if (debugMode) { log.info( String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}", stats, maxLoad, minLoad, adjustedOffload)); } } else { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " There is no bundle that can be unloaded in top bundles load data. " + "Consider splitting bundles owned by the broker " + "to make each bundle serve less traffic " + "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport" + " to report more bundles in the top bundles load data.", maxBroker)); } } // while end if (debugMode) { log.info("decisionCache:{}", decisionCache); } if (decisionCache.isEmpty()) { UnloadDecision.Reason reason; if (numOfBrokersWithEmptyLoadData > 0) { reason = NoLoadData; } else if (numOfBrokersWithFewBundles > 0) { reason = NoBundles; } else { reason = HitCount; } counter.update(Skip, reason); } else { unloadConditionHitCount = 0; } } catch (Throwable e) { log.error("Failed to process unloading. ", e); this.counter.update(Failure, Unknown); } return decisionCache; }
@Test public void testRandomLoad() throws IllegalAccessException { UnloadCounter counter = new UnloadCounter(); TransferShedder transferShedder = new TransferShedder(counter); for (int i = 0; i < 5; i++) { var ctx = setupContext(10); var conf = ctx.brokerConfiguration(); transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var stats = (TransferShedder.LoadStats) FieldUtils.readDeclaredField(transferShedder, "stats", true); assertTrue(stats.std() <= conf.getLoadBalancerBrokerLoadTargetStd() || (!stats.hasTransferableBrokers())); } }
public OkHttpClient get(boolean keepAlive, boolean skipTLSVerify) { try { return cache.get(Parameters.fromBoolean(keepAlive, skipTLSVerify)); } catch (ExecutionException e) { throw new RuntimeException(e); } }
@Test public void tesTlsVerifyAndKeepAlive() throws IOException, NoSuchAlgorithmException, KeyStoreException, KeyManagementException { final ParameterizedHttpClientProvider provider = new ParameterizedHttpClientProvider(client(null)); final OkHttpClient okHttpClient = provider.get(true, false); assertThat(okHttpClient.sslSocketFactory().createSocket().getOption(StandardSocketOptions.SO_KEEPALIVE)).isTrue(); assertThrows(SSLHandshakeException.class, () -> okHttpClient.newCall(new Request.Builder().url(server.url("/")).get().build()).execute(), "should not have succeeded"); }
@Override public void validatePostList(Collection<Long> ids) { if (CollUtil.isEmpty(ids)) { return; } // 获得岗位信息 List<PostDO> posts = postMapper.selectBatchIds(ids); Map<Long, PostDO> postMap = convertMap(posts, PostDO::getId); // 校验 ids.forEach(id -> { PostDO post = postMap.get(id); if (post == null) { throw exception(POST_NOT_FOUND); } if (!CommonStatusEnum.ENABLE.getStatus().equals(post.getStatus())) { throw exception(POST_NOT_ENABLE, post.getName()); } }); }
@Test public void testValidatePostList_notFound() { // 准备参数 List<Long> ids = singletonList(randomLongId()); // 调用, 并断言异常 assertServiceException(() -> postService.validatePostList(ids), POST_NOT_FOUND); }
public static String md5(String... inputs) { try { String input = String.join(JOIN_DELIMITER, inputs); MessageDigest md5 = MessageDigest.getInstance(MD5_ALGORITHM); byte[] bytes = md5.digest(input.getBytes(StandardCharsets.UTF_8)); return byteToString(bytes).toLowerCase(Locale.US); } catch (NoSuchAlgorithmException e) { throw new MaestroInternalError(e, "cannot find hash algorithm: " + MD5_ALGORITHM); } }
@Test public void testStringMD5() { String digest = HashHelper.md5("hello world"); assertEquals("5eb63bbbe01eeed093cb22bb8f5acdc3", digest); }
static BsonTimestamp startAtTimestamp(Map<String, String> options) { String startAtValue = options.get(START_AT_OPTION); if (isNullOrEmpty(startAtValue)) { throw QueryException.error("startAt property is required for MongoDB stream. " + POSSIBLE_VALUES); } if ("now".equalsIgnoreCase(startAtValue)) { return MongoUtilities.bsonTimestampFromTimeMillis(System.currentTimeMillis()); } else { try { return MongoUtilities.bsonTimestampFromTimeMillis(Long.parseLong(startAtValue)); } catch (NumberFormatException e) { try { return MongoUtilities.bsonTimestampFromTimeMillis(Instant.parse(startAtValue).toEpochMilli()); } catch (DateTimeParseException ex) { throw QueryException.error("Invalid startAt value: '" + startAtValue + "'. " + POSSIBLE_VALUES); } } } }
@Test public void parses_milis_startAt() { // given long time = System.currentTimeMillis(); LocalDateTime timeDate = LocalDateTime.ofEpochSecond(time / 1000, 0, UTC); // when BsonTimestamp startAt = Options.startAtTimestamp(ImmutableMap.of(Options.START_AT_OPTION, String.valueOf(time))); // then LocalDateTime instant = LocalDateTime.ofEpochSecond(startAt.getTime(), 0, UTC); assertThat(instant).isEqualToIgnoringNanos(timeDate); }
@Bean("Configuration") public Configuration provide(Settings settings) { return new ServerConfigurationAdapter(settings); }
@Test @UseDataProvider("trimFieldsAndEncodedCommas") public void getStringArray_split_on_comma_trim_and_support_encoded_comma_as_Settings_getStringArray(String idemUseCase) { settings.setProperty(nonDeclaredKey, idemUseCase); settings.setProperty(nonMultivalueKey, idemUseCase); settings.setProperty(multivalueKey, idemUseCase); Configuration configuration = underTest.provide(settings); getStringArrayBehaviorIsTheSame(configuration, nonDeclaredKey); getStringArrayBehaviorIsTheSame(configuration, nonMultivalueKey); getStringArrayBehaviorIsTheSame(configuration, multivalueKey); }
public static <K, V> Reshuffle<K, V> of() { return new Reshuffle<>(); }
@Test @Category(ValidatesRunner.class) public void testReshufflePreservesTimestamps() { PCollection<KV<String, TimestampedValue<String>>> input = pipeline .apply( Create.timestamped( TimestampedValue.of("foo", BoundedWindow.TIMESTAMP_MIN_VALUE), TimestampedValue.of("foo", new Instant(0)), TimestampedValue.of("bar", new Instant(33)), TimestampedValue.of("bar", GlobalWindow.INSTANCE.maxTimestamp())) .withCoder(StringUtf8Coder.of())) .apply( WithKeys.<String, String>of(input12 -> input12) .withKeyType(TypeDescriptors.strings())) .apply("ReifyOriginalTimestamps", Reify.timestampsInValue()); // The outer TimestampedValue is the reified timestamp post-reshuffle. The inner // TimestampedValue is the pre-reshuffle timestamp. PCollection<TimestampedValue<TimestampedValue<String>>> output = input .apply(Reshuffle.of()) .apply("ReifyReshuffledTimestamps", Reify.timestampsInValue()) .apply(Values.create()); PAssert.that(output) .satisfies( input1 -> { for (TimestampedValue<TimestampedValue<String>> elem : input1) { Instant originalTimestamp = elem.getValue().getTimestamp(); Instant afterReshuffleTimestamp = elem.getTimestamp(); assertThat( "Reshuffle must preserve element timestamps", afterReshuffleTimestamp, equalTo(originalTimestamp)); } return null; }); pipeline.run(); }
@Override public String toString() { return "PolarisMetadataRouterProperties{" + "enabled=" + enabled + '}'; }
@Test public void testToString() { assertThat(properties.toString()).isEqualTo("PolarisMetadataRouterProperties{enabled=true}"); }
@Operation(summary = "秒杀场景九(rabbitmq)") @PostMapping("/rabbitmq") public Result doWithRabbitmq(@RequestBody @Valid SeckillWebMockRequestDTO dto) { processSeckill(dto, RABBIT_MQ, () -> { String phoneNumber = String.valueOf(SECKILL_PHONE_NUM_COUNTER.incrementAndGet()); String taskId = String.valueOf(stringRedisTemplate.opsForValue().get("SECKILL_TASK_ID_COUNTER")); SeckillMockRequestDTO payload = new SeckillMockRequestDTO(dto.getSeckillId(), 1, phoneNumber, taskId); streamBridge.send("seckill-out-0", payload); }); return Result.ok(); //待mq监听器处理完成打印日志,不在此处打印日志 }
@Test void doWithRabbitmq() { SeckillWebMockRequestDTO requestDTO = new SeckillWebMockRequestDTO(); requestDTO.setSeckillId(1L); requestDTO.setRequestCount(1); SeckillMockRequestDTO any = new SeckillMockRequestDTO(); any.setSeckillId(1L); Result response = seckillMockController.doWithRabbitmq(requestDTO); verify(seckillService, times(0)).execute(any(SeckillMockRequestDTO.class), anyInt()); assertEquals(0, response.getCode()); }
@Override public List<Field> fields() { if (type != Type.STRUCT) throw new DataException("Cannot list fields on non-struct type"); return fields; }
@Test public void testEmptyStruct() { final ConnectSchema emptyStruct = new ConnectSchema(Schema.Type.STRUCT, false, null, null, null, null); assertEquals(0, emptyStruct.fields().size()); new Struct(emptyStruct); }
@Override public void onProjectsRekeyed(Set<RekeyedProject> rekeyedProjects) { checkNotNull(rekeyedProjects, "rekeyedProjects can't be null"); if (rekeyedProjects.isEmpty()) { return; } Arrays.stream(listeners) .forEach(safelyCallListener(listener -> listener.onProjectsRekeyed(rekeyedProjects))); }
@Test @UseDataProvider("oneOrManyRekeyedProjects") public void onProjectsRekeyed_does_not_fail_if_there_is_no_listener(Set<RekeyedProject> projects) { assertThatNoException().isThrownBy(() -> underTestNoListeners.onProjectsRekeyed(projects)); }
public List<RunConfiguration> load() { return configurationManager.load(); }
@Test public void testLoad() { delegate.load(); verify( service, times( 1 ) ).load(); }
public TolerantFloatComparison isNotWithin(float tolerance) { return new TolerantFloatComparison() { @Override public void of(float expected) { Float actual = FloatSubject.this.actual; checkNotNull( actual, "actual value cannot be null. tolerance=%s expected=%s", tolerance, expected); checkTolerance(tolerance); if (!notEqualWithinTolerance(actual, expected, tolerance)) { failWithoutActual( fact("expected not to be", floatToString(expected)), butWas(), fact("within tolerance", floatToString(tolerance))); } } }; }
@Test public void isNotWithinZeroTolerance() { float max = Float.MAX_VALUE; assertThatIsNotWithinFails(max, 0.0f, max); assertThatIsNotWithinFails(NEARLY_MAX, 0.0f, NEARLY_MAX); assertThat(max).isNotWithin(0.0f).of(NEARLY_MAX); assertThat(NEARLY_MAX).isNotWithin(0.0f).of(max); float min = Float.MIN_VALUE; assertThatIsNotWithinFails(min, 0.0f, min); assertThatIsNotWithinFails(JUST_OVER_MIN, 0.0f, JUST_OVER_MIN); assertThat(min).isNotWithin(0.0f).of(JUST_OVER_MIN); assertThat(JUST_OVER_MIN).isNotWithin(0.0f).of(min); }
@Override public String createBuiltinStorageVolume() throws DdlException, AlreadyExistsException { if (!Config.enable_load_volume_from_conf) { return ""; } try (LockCloseable lock = new LockCloseable(rwLock.writeLock())) { StorageVolume sv = getStorageVolumeByName(BUILTIN_STORAGE_VOLUME); if (sv != null) { return sv.getId(); } validateStorageVolumeConfig(); List<String> locations = parseLocationsFromConfig(); Map<String, String> params = parseParamsFromConfig(); FileStoreInfo fileStoreInfo = StorageVolume.createFileStoreInfo(BUILTIN_STORAGE_VOLUME, Config.cloud_native_storage_type, locations, params, true, ""); String fsKey = parseBuiltinFsKeyFromConfig(); fileStoreInfo = fileStoreInfo.toBuilder().setFsKey(fsKey).build(); String svId = GlobalStateMgr.getCurrentState().getStarOSAgent().addFileStore(fileStoreInfo); if (getDefaultStorageVolumeId().isEmpty()) { setDefaultStorageVolume(BUILTIN_STORAGE_VOLUME); } return svId; } }
@Test public void testCreateBuiltinStorageVolume() throws DdlException, AlreadyExistsException, MetaNotFoundException { new Expectations() { { editLog.logSetDefaultStorageVolume((SetDefaultStorageVolumeLog) any); } }; SharedDataStorageVolumeMgr sdsvm = new SharedDataStorageVolumeMgr(); Assert.assertFalse(sdsvm.exists(StorageVolumeMgr.BUILTIN_STORAGE_VOLUME)); Config.enable_load_volume_from_conf = false; sdsvm.createBuiltinStorageVolume(); Assert.assertFalse(sdsvm.exists(StorageVolumeMgr.BUILTIN_STORAGE_VOLUME)); Config.enable_load_volume_from_conf = true; String id = sdsvm.createBuiltinStorageVolume(); String[] bucketAndPrefix = Deencapsulation.invoke(sdsvm, "getBucketAndPrefix"); Assert.assertEquals(bucketAndPrefix[0], id); Assert.assertTrue(sdsvm.exists(StorageVolumeMgr.BUILTIN_STORAGE_VOLUME)); StorageVolume sv = sdsvm.getStorageVolumeByName(StorageVolumeMgr.BUILTIN_STORAGE_VOLUME); Assert.assertEquals(id, sdsvm.getDefaultStorageVolumeId()); FileStoreInfo fsInfo = sv.getCloudConfiguration().toFileStoreInfo(); Assert.assertEquals("region", fsInfo.getS3FsInfo().getRegion()); Assert.assertEquals("endpoint", fsInfo.getS3FsInfo().getEndpoint()); Assert.assertTrue(fsInfo.getS3FsInfo().hasCredential()); Assert.assertTrue(fsInfo.getS3FsInfo().getCredential().hasSimpleCredential()); Assert.assertFalse(fsInfo.getS3FsInfo().getPartitionedPrefixEnabled()); Assert.assertEquals(0, fsInfo.getS3FsInfo().getNumPartitionedPrefix()); // Builtin storage volume has existed, the conf will be ignored Config.aws_s3_region = "region1"; Config.aws_s3_endpoint = "endpoint1"; sdsvm.createBuiltinStorageVolume(); sv = sdsvm.getStorageVolumeByName(StorageVolumeMgr.BUILTIN_STORAGE_VOLUME); Assert.assertTrue(sv.getCloudConfiguration().toFileStoreInfo().getS3FsInfo().hasCredential()); Assert.assertEquals("region", sv.getCloudConfiguration().toFileStoreInfo().getS3FsInfo().getRegion()); Assert.assertEquals("endpoint", sv.getCloudConfiguration().toFileStoreInfo().getS3FsInfo().getEndpoint()); Assert.assertTrue(sv.getCloudConfiguration().toFileStoreInfo().getS3FsInfo().hasCredential()); Assert.assertTrue(sv.getCloudConfiguration().toFileStoreInfo().getS3FsInfo().getCredential().hasSimpleCredential()); String svName = "test"; List<String> locations = Arrays.asList("s3://abc"); Map<String, String> storageParams = new HashMap<>(); storageParams.put(AWS_S3_REGION, "region"); storageParams.put(AWS_S3_ENDPOINT, "endpoint"); storageParams.put(AWS_S3_USE_AWS_SDK_DEFAULT_BEHAVIOR, "true"); sdsvm.createStorageVolume(svName, "S3", locations, storageParams, Optional.empty(), ""); sdsvm.setDefaultStorageVolume(svName); Config.aws_s3_use_instance_profile = true; Config.aws_s3_use_aws_sdk_default_behavior = false; sdsvm.removeStorageVolume(StorageVolumeMgr.BUILTIN_STORAGE_VOLUME); sdsvm.createBuiltinStorageVolume(); sv = sdsvm.getStorageVolumeByName(StorageVolumeMgr.BUILTIN_STORAGE_VOLUME); Assert.assertTrue(sv.getCloudConfiguration().toFileStoreInfo().getS3FsInfo().hasCredential()); Assert.assertTrue(sv.getCloudConfiguration().toFileStoreInfo().getS3FsInfo().getCredential().hasProfileCredential()); Config.aws_s3_iam_role_arn = "role_arn"; Config.aws_s3_external_id = "external_id"; sdsvm.removeStorageVolume(StorageVolumeMgr.BUILTIN_STORAGE_VOLUME); sdsvm.createBuiltinStorageVolume(); sv = sdsvm.getStorageVolumeByName(StorageVolumeMgr.BUILTIN_STORAGE_VOLUME); Assert.assertTrue(sv.getCloudConfiguration().toFileStoreInfo().getS3FsInfo().hasCredential()); Assert.assertTrue(sv.getCloudConfiguration().toFileStoreInfo().getS3FsInfo().getCredential().hasAssumeRoleCredential()); Config.cloud_native_storage_type = "hdfs"; Config.cloud_native_hdfs_url = "hdfs://url"; sdsvm.removeStorageVolume(StorageVolumeMgr.BUILTIN_STORAGE_VOLUME); id = sdsvm.createBuiltinStorageVolume(); Assert.assertEquals(Config.cloud_native_hdfs_url, id); sv = sdsvm.getStorageVolumeByName(StorageVolumeMgr.BUILTIN_STORAGE_VOLUME); Assert.assertTrue(sv.getCloudConfiguration().toFileStoreInfo().hasHdfsFsInfo()); Config.cloud_native_storage_type = "azblob"; Config.azure_blob_shared_key = "shared_key"; Config.azure_blob_sas_token = "sas_token"; Config.azure_blob_endpoint = "endpoint"; Config.azure_blob_path = "path"; sdsvm.removeStorageVolume(StorageVolumeMgr.BUILTIN_STORAGE_VOLUME); sdsvm.createBuiltinStorageVolume(); sv = sdsvm.getStorageVolumeByName(StorageVolumeMgr.BUILTIN_STORAGE_VOLUME); Assert.assertEquals("endpoint", sv.getCloudConfiguration().toFileStoreInfo().getAzblobFsInfo().getEndpoint()); Assert.assertEquals("shared_key", sv.getCloudConfiguration().toFileStoreInfo().getAzblobFsInfo().getCredential().getSharedKey()); Assert.assertEquals("sas_token", sv.getCloudConfiguration().toFileStoreInfo().getAzblobFsInfo().getCredential().getSasToken()); }
@Override public String toString() { return "OpenFileSupport{" + "changePolicy=" + changePolicy + ", defaultReadAhead=" + defaultReadAhead + ", defaultBufferSize=" + defaultBufferSize + ", defaultAsyncDrainThreshold=" + defaultAsyncDrainThreshold + ", defaultInputPolicy=" + defaultInputPolicy + '}'; }
@Test public void testSplitEndSetsLength() throws Throwable { long bigFile = 2L ^ 34; assertOpenFile(FS_OPTION_OPENFILE_SPLIT_END, Long.toString(bigFile)) .matches(p -> p.getSplitEnd() == bigFile, "split end") .matches(p -> p.getFileLength() == -1, "file length") .matches(p -> p.getStatus() == null, "status"); }
@Override public Batch toBatch() { return new SparkBatch( sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode()); }
@Test public void testUnpartitionedHours() throws Exception { createUnpartitionedTable(spark, tableName); SparkScanBuilder builder = scanBuilder(); HoursFunction.TimestampToHoursFunction function = new HoursFunction.TimestampToHoursFunction(); UserDefinedScalarFunc udf = toUDF(function, expressions(fieldRef("ts"))); Predicate predicate = new Predicate( ">=", expressions( udf, intLit(timestampStrToHourOrdinal("2017-11-22T06:02:09.243857+00:00")))); pushFilters(builder, predicate); Batch scan = builder.build().toBatch(); assertThat(scan.planInputPartitions().length).isEqualTo(10); // NOT GTEQ builder = scanBuilder(); predicate = new Not(predicate); pushFilters(builder, predicate); scan = builder.build().toBatch(); assertThat(scan.planInputPartitions().length).isEqualTo(10); }
public static ApplicationConfigCache getInstance() { return ApplicationConfigCacheInstance.INSTANCE; }
@Test public void testGetInstance() { final ApplicationConfigCache result = ApplicationConfigCache.getInstance(); assertNotNull(result); }
private ByteBuf buffer(int i) { ByteBuf b = buffers[i]; return b instanceof Component ? ((Component) b).buf : b; }
@Test public void testGatheringWritesMixes() throws Exception { testGatheringWrites(buffer(), directBuffer()); }
@Override public UserInfo getByUsername(String username) { return userInfoRepository.getByUsername(username); }
@Test public void loadByUsername_regular_success() { Mockito.when(userInfoRepository.getByUsername(regularUsername)).thenReturn(userInfoRegular); UserInfo user = service.getByUsername(regularUsername); assertEquals(user.getSub(), regularSub); }
public Future<Void> migrateFromDeploymentToStrimziPodSets(Deployment deployment, StrimziPodSet podSet) { if (deployment == null) { // Deployment does not exist anymore => no migration needed return Future.succeededFuture(); } else { int depReplicas = deployment.getSpec().getReplicas(); int podSetReplicas = podSet != null ? podSet.getSpec().getPods().size() : 0; return moveOnePodFromDeploymentToStrimziPodSet(depReplicas - 1, Math.min(podSetReplicas + 1, connect.getReplicas())); } }
@Test public void testMigrationToPodSetsInTheMiddle(VertxTestContext context) { DeploymentOperator mockDepOps = mock(DeploymentOperator.class); StrimziPodSetOperator mockPodSetOps = mock(StrimziPodSetOperator.class); PodOperator mockPodOps = mock(PodOperator.class); LinkedList<String> events = mockKubernetes(mockDepOps, mockPodSetOps, mockPodOps); KafkaConnectMigration migration = new KafkaConnectMigration( RECONCILIATION, CLUSTER, null, null, 1_000L, false, null, null, null, mockDepOps, mockPodSetOps, mockPodOps ); Checkpoint async = context.checkpoint(); migration.migrateFromDeploymentToStrimziPodSets( new DeploymentBuilder(DEPLOYMENT).editSpec().withReplicas(2).endSpec().build(), CLUSTER.generatePodSet(1, null, null, false, null, null, null) ).onComplete(context.succeeding(v -> context.verify(() -> { assertThat(events.size(), is(7)); assertThat(events.poll(), is("POD-SET-RECONCILE-TO-2")); assertThat(events.poll(), is("POD-READINESS-my-connect-connect-1")); assertThat(events.poll(), is("DEP-SCALE-DOWN-TO-1")); assertThat(events.poll(), is("DEP-READINESS-" + COMPONENT_NAME)); assertThat(events.poll(), is("POD-SET-RECONCILE-TO-3")); assertThat(events.poll(), is("POD-READINESS-my-connect-connect-2")); assertThat(events.poll(), is("DEP-DELETE-" + COMPONENT_NAME)); async.flag(); }))); }
@Override public void accept(@Nullable T item) { assert this.item == null : "accept() called, but previous item still present. Previous: " + this.item + ", new: " + item; this.item = item; }
@Test(expected = AssertionError.class) public void when_itemNotConsumed_thenAcceptFails() { trav.accept(1); trav.accept(2); }
List<Endpoint> endpoints() { try { String urlString = String.format("%s/api/v1/namespaces/%s/pods", kubernetesMaster, namespace); return enrichWithPublicAddresses(parsePodsList(callGet(urlString))); } catch (RestClientException e) { return handleKnownException(e); } }
@Test public void forbidden() { // given String forbiddenBody = "\"reason\":\"Forbidden\""; stub(String.format("/api/v1/namespaces/%s/pods", NAMESPACE), HttpURLConnection.HTTP_FORBIDDEN, forbiddenBody); // when List<Endpoint> result = kubernetesClient.endpoints(); // then assertEquals(emptyList(), result); }
@Override public AttributedList<Path> read(final Path directory, final List<String> replies) throws FTPInvalidListException { final AttributedList<Path> children = new AttributedList<>(); if(replies.isEmpty()) { return children; } // At least one entry successfully parsed boolean success = false; for(String line : replies) { final Map<String, Map<String, String>> file = this.parseFacts(line); if(null == file) { log.error(String.format("Error parsing line %s", line)); continue; } for(Map.Entry<String, Map<String, String>> f : file.entrySet()) { final String name = f.getKey(); // size -- Size in octets // modify -- Last modification time // create -- Creation time // type -- Entry type // unique -- Unique id of file/directory // perm -- File permissions, whether read, write, execute is allowed for the login id. // lang -- Language of the file name per IANA [11] registry. // media-type -- MIME media-type of file contents per IANA registry. // charset -- Character set per IANA registry (if not UTF-8) final Map<String, String> facts = f.getValue(); if(!facts.containsKey("type")) { log.error(String.format("No type fact in line %s", line)); continue; } final Path parsed; if("dir".equals(facts.get("type").toLowerCase(Locale.ROOT))) { parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.directory)); } else if("file".equals(facts.get("type").toLowerCase(Locale.ROOT))) { parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file)); } else if(facts.get("type").toLowerCase(Locale.ROOT).matches("os\\.unix=slink:.*")) { parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file, Path.Type.symboliclink)); // Parse symbolic link target in Type=OS.unix=slink:/foobar;Perm=;Unique=keVO1+4G4; foobar final String[] type = facts.get("type").split(":"); if(type.length == 2) { final String target = type[1]; if(target.startsWith(String.valueOf(Path.DELIMITER))) { parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(target), EnumSet.of(Path.Type.file))); } else { parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(String.format("%s/%s", directory.getAbsolute(), target)), EnumSet.of(Path.Type.file))); } } else { log.warn(String.format("Missing symbolic link target for type %s in line %s", facts.get("type"), line)); continue; } } else { log.warn(String.format("Ignored type %s in line %s", facts.get("type"), line)); continue; } if(!success) { if(parsed.isDirectory() && directory.getName().equals(name)) { log.warn(String.format("Possibly bogus response line %s", line)); } else { success = true; } } if(name.equals(".") || name.equals("..")) { if(log.isDebugEnabled()) { log.debug(String.format("Skip %s", name)); } continue; } if(facts.containsKey("size")) { parsed.attributes().setSize(Long.parseLong(facts.get("size"))); } if(facts.containsKey("unix.uid")) { parsed.attributes().setOwner(facts.get("unix.uid")); } if(facts.containsKey("unix.owner")) { parsed.attributes().setOwner(facts.get("unix.owner")); } if(facts.containsKey("unix.gid")) { parsed.attributes().setGroup(facts.get("unix.gid")); } if(facts.containsKey("unix.group")) { parsed.attributes().setGroup(facts.get("unix.group")); } if(facts.containsKey("unix.mode")) { parsed.attributes().setPermission(new Permission(facts.get("unix.mode"))); } else if(facts.containsKey("perm")) { if(PreferencesFactory.get().getBoolean("ftp.parser.mlsd.perm.enable")) { Permission.Action user = Permission.Action.none; final String flags = facts.get("perm"); if(StringUtils.contains(flags, 'r') || StringUtils.contains(flags, 'l')) { // RETR command may be applied to that object // Listing commands, LIST, NLST, and MLSD may be applied user = user.or(Permission.Action.read); } if(StringUtils.contains(flags, 'w') || StringUtils.contains(flags, 'm') || StringUtils.contains(flags, 'c')) { user = user.or(Permission.Action.write); } if(StringUtils.contains(flags, 'e')) { // CWD command naming the object should succeed user = user.or(Permission.Action.execute); if(parsed.isDirectory()) { user = user.or(Permission.Action.read); } } final Permission permission = new Permission(user, Permission.Action.none, Permission.Action.none); parsed.attributes().setPermission(permission); } } if(facts.containsKey("modify")) { // Time values are always represented in UTC parsed.attributes().setModificationDate(this.parseTimestamp(facts.get("modify"))); } if(facts.containsKey("create")) { // Time values are always represented in UTC parsed.attributes().setCreationDate(this.parseTimestamp(facts.get("create"))); } children.add(parsed); } } if(!success) { throw new FTPInvalidListException(children); } return children; }
@Test(expected = FTPInvalidListException.class) public void testMlsdCdir1() throws Exception { Path path = new Path( "/www", EnumSet.of(Path.Type.directory)); String[] replies = new String[]{ "Type=cdir;Perm=el;Unique=keVO1+ZF4; test", //skipped }; new FTPMlsdListResponseReader().read(path, Arrays.asList(replies)); }
public static DateTime parse(CharSequence dateStr, DateFormat dateFormat) { return new DateTime(dateStr, dateFormat); }
@Test public void parseEmptyTest() { final String str = " "; final DateTime dateTime = DateUtil.parse(str); assertNull(dateTime); }
public static byte[] encodeObjectIdentifier(String oid) { try (final ByteArrayOutputStream bos = new ByteArrayOutputStream(oid.length() / 3 + 1)) { encodeObjectIdentifier(oid, bos); return bos.toByteArray(); } catch (IOException e) { throw new Asn1Exception("Unexpected IO exception", e); } }
@Test public void encodeObjectIdentifierOfSingleBytes() { assertArrayEquals(new byte[] { 0x01, 0x02 }, Asn1Utils.encodeObjectIdentifier("0.1.2")); }
boolean shouldDelete(CopyListingFileStatus status) { Path path = status.getPath(); Preconditions.checkArgument(!path.isRoot(), "Root Dir"); if (status.isDirectory()) { boolean deleted = isDirectoryOrAncestorDeleted(path); // even if an ancestor has been deleted, add this entry as // a deleted directory. directories.put(path, path); return !deleted; } else { return !isInDeletedDirectory(path); } }
@Test(expected = IllegalArgumentException.class) public void testNoRootDir() throws Throwable { shouldDelete(ROOT, true); }
@Override public Result invoke(Invocation invocation) throws RpcException { // When broadcasting, it should be called remotely. if (isBroadcast()) { if (logger.isDebugEnabled()) { logger.debug("Performing broadcast call for method: " + RpcUtils.getMethodName(invocation) + " of service: " + getUrl().getServiceKey()); } return invoker.invoke(invocation); } if (peerFlag) { if (logger.isDebugEnabled()) { logger.debug("Performing point-to-point call for method: " + RpcUtils.getMethodName(invocation) + " of service: " + getUrl().getServiceKey()); } // If it's a point-to-point direct connection, invoke the original Invoker return invoker.invoke(invocation); } if (isInjvmExported()) { if (logger.isDebugEnabled()) { logger.debug("Performing local JVM call for method: " + RpcUtils.getMethodName(invocation) + " of service: " + getUrl().getServiceKey()); } // If it's exported to the local JVM, invoke the corresponding Invoker return injvmInvoker.invoke(invocation); } if (logger.isDebugEnabled()) { logger.debug("Performing remote call for method: " + RpcUtils.getMethodName(invocation) + " of service: " + getUrl().getServiceKey()); } // Otherwise, delegate the invocation to the original Invoker return invoker.invoke(invocation); }
@Test void testPeerInvoke() { URL url = URL.valueOf("remote://1.2.3.4/" + DemoService.class.getName()); url = url.addParameter(REFER_KEY, URL.encode(PATH_KEY + "=" + DemoService.class.getName())); Map<String, Object> peer = new HashMap<>(); peer.put(PEER_KEY, true); url = url.addAttributes(peer); url = url.setScopeModel(ApplicationModel.defaultModel().getDefaultModule()); Invoker<DemoService> cluster = getClusterInvoker(url); invokers.add(cluster); // Configured with mock RpcInvocation invocation = new RpcInvocation(); invocation.setMethodName("doSomething6"); invocation.setParameterTypes(new Class[] {}); Result ret = cluster.invoke(invocation); Assertions.assertEquals("doSomething6", ret.getValue()); }
@Override public Map<String, Integer> getSummaryStats() { return summaryCache.get(); }
@Test public void calculateSummaryStats() { Map<String, Integer> stats = service.getSummaryStats(); assertThat(stats.get("approvalCount"), is(4)); assertThat(stats.get("userCount"), is(2)); assertThat(stats.get("clientCount"), is(3)); }
public static void main(String[] args) { var commander = new Commander( new Sergeant(new Soldier(), new Soldier(), new Soldier()), new Sergeant(new Soldier(), new Soldier(), new Soldier()) ); commander.accept(new SoldierVisitor()); commander.accept(new SergeantVisitor()); commander.accept(new CommanderVisitor()); }
@Test void shouldExecuteWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
public static List<Criterion> parse(String filter) { return StreamSupport.stream(CRITERIA_SPLITTER.split(filter).spliterator(), false) .map(FilterParser::parseCriterion) .toList(); }
@Test public void parse_filter_having_operator_and_value_ignores_white_spaces() { List<Criterion> criterion = FilterParser.parse(" ncloc > 10 "); assertThat(criterion) .extracting(Criterion::getKey, Criterion::getOperator, Criterion::getValue) .containsOnly( tuple("ncloc", GT, "10")); }
int syncBackups(int requestedSyncBackups, int requestedAsyncBackups, boolean syncForced) { if (syncForced) { // if force sync enabled, then the sum of the backups requestedSyncBackups += requestedAsyncBackups; } InternalPartitionService partitionService = node.getPartitionService(); int maxBackupCount = partitionService.getMaxAllowedBackupCount(); return min(maxBackupCount, requestedSyncBackups); }
@Test public void syncBackups_whenForceSyncDisabled() { setup(BACKPRESSURE_ENABLED); // when force-sync disabled, we only look at the sync backups assertEquals(0, backupHandler.syncBackups(0, 0, FORCE_SYNC_DISABLED)); assertEquals(1, backupHandler.syncBackups(1, 0, FORCE_SYNC_DISABLED)); assertEquals(0, backupHandler.syncBackups(0, 0, FORCE_SYNC_DISABLED)); assertEquals(1, backupHandler.syncBackups(1, 1, FORCE_SYNC_DISABLED)); // checking to see what happens when we are at or above the maximum number of backups assertEquals(BACKUPS, backupHandler.syncBackups(BACKUPS, 0, FORCE_SYNC_DISABLED)); assertEquals(BACKUPS, backupHandler.syncBackups(BACKUPS + 1, 0, FORCE_SYNC_DISABLED)); }
@VisibleForTesting void validateClientIdExists(Long id, String clientId) { OAuth2ClientDO client = oauth2ClientMapper.selectByClientId(clientId); if (client == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的客户端 if (id == null) { throw exception(OAUTH2_CLIENT_EXISTS); } if (!client.getId().equals(id)) { throw exception(OAUTH2_CLIENT_EXISTS); } }
@Test public void testValidateClientIdExists_withId() { // mock 数据 OAuth2ClientDO client = randomPojo(OAuth2ClientDO.class).setClientId("tudou"); oauth2ClientMapper.insert(client); // 准备参数 Long id = randomLongId(); String clientId = "tudou"; // 调用,不会报错 assertServiceException(() -> oauth2ClientService.validateClientIdExists(id, clientId), OAUTH2_CLIENT_EXISTS); }
@Override public void loginSuccess(HttpRequest request, @Nullable String login, Source source) { checkRequest(request); requireNonNull(source, "source can't be null"); LOGGER.atDebug().setMessage("login success [method|{}][provider|{}|{}][IP|{}|{}][login|{}]") .addArgument(source::getMethod) .addArgument(source::getProvider) .addArgument(source::getProviderName) .addArgument(request::getRemoteAddr) .addArgument(() -> getAllIps(request)) .addArgument(() -> preventLogFlood(sanitizeLog(emptyIfNull(login)))) .log(); }
@Test public void login_success_logs_remote_ip_from_request() { underTest.loginSuccess(mockRequest("1.2.3.4"), "foo", Source.realm(Method.EXTERNAL, "bar")); verifyLog("login success [method|EXTERNAL][provider|REALM|bar][IP|1.2.3.4|][login|foo]", Set.of("logout", "login failure")); }
protected void fireTransFinishedListeners() throws KettleException { // PDI-5229 sync added synchronized ( transListeners ) { if ( transListeners.size() == 0 ) { return; } // prevent Exception from one listener to block others execution List<KettleException> badGuys = new ArrayList<>( transListeners.size() ); for ( TransListener transListener : transListeners ) { try { transListener.transFinished( this ); } catch ( KettleException e ) { badGuys.add( e ); } } if ( transFinishedBlockingQueue != null ) { // Signal for the the waitUntilFinished blocker... transFinishedBlockingQueue.add( new Object() ); } if ( !badGuys.isEmpty() ) { // FIFO throw new KettleException( badGuys.get( 0 ) ); } } }
@Test public void testFireTransFinishedListeners() throws Exception { TransListener mockListener = mock( TransListener.class ); trans.setTransListeners( Collections.singletonList( mockListener ) ); trans.fireTransFinishedListeners(); verify( mockListener ).transFinished( trans ); }
public Object evaluate( final GenericRow row, final Object defaultValue, final ProcessingLogger logger, final Supplier<String> errorMsg ) { try { return expressionEvaluator.evaluate(new Object[]{ spec.resolveArguments(row), defaultValue, logger, row }); } catch (final Exception e) { final Throwable cause = e instanceof InvocationTargetException ? e.getCause() : e; logger.error(RecordProcessingError.recordProcessingError(errorMsg.get(), cause, row)); return defaultValue; } }
@Test public void shouldReturnDefaultIfThrowsGettingParams() { // Given: spec.addParameter( ColumnName.of("foo1"), Integer.class, 0 ); compiledExpression = new CompiledExpression( expressionEvaluator, spec.build(), EXPRESSION_TYPE, expression ); // When: final Object result = compiledExpression .evaluate(null, DEFAULT_VAL, processingLogger, errorMsgSupplier); // Then: assertThat(result, is(DEFAULT_VAL)); }
public List<CompactionTask> produce() { // get all CF files sorted by key range start (L1+) List<SstFileMetaData> sstSortedByCfAndStartingKeys = metadataSupplier.get().stream() .filter(l -> l.level() > 0) // let RocksDB deal with L0 .sorted(SST_COMPARATOR) .collect(Collectors.toList()); LOG.trace("Input files: {}", sstSortedByCfAndStartingKeys.size()); List<CompactionTask> tasks = groupIntoTasks(sstSortedByCfAndStartingKeys); tasks.sort(Comparator.<CompactionTask>comparingInt(t -> t.files.size()).reversed()); return tasks.subList(0, Math.min(tasks.size(), settings.maxManualCompactions)); }
@Test void testEmpty() { assertThat(produce(configBuilder().build())).isEmpty(); }
@Override protected void encode(ChannelHandlerContext ctx, CharSequence msg, List<Object> out) throws Exception { if (msg.length() == 0) { return; } out.add(ByteBufUtil.encodeString(ctx.alloc(), CharBuffer.wrap(msg), charset)); }
@Test public void testEncode() { String msg = "Test"; EmbeddedChannel channel = new EmbeddedChannel(new StringEncoder()); Assertions.assertTrue(channel.writeOutbound(msg)); Assertions.assertTrue(channel.finish()); ByteBuf buf = channel.readOutbound(); byte[] data = new byte[buf.readableBytes()]; buf.readBytes(data); Assertions.assertArrayEquals(msg.getBytes(CharsetUtil.UTF_8), data); Assertions.assertNull(channel.readOutbound()); buf.release(); assertFalse(channel.finish()); }
public static void checkMetaDir() throws InvalidMetaDirException, IOException { // check meta dir // if metaDir is the default config: StarRocksFE.STARROCKS_HOME_DIR + "/meta", // we should check whether both the new default dir (STARROCKS_HOME_DIR + "/meta") // and the old default dir (DORIS_HOME_DIR + "/doris-meta") are present. If both are present, // we need to let users keep only one to avoid starting from outdated metadata. Path oldDefaultMetaDir = Paths.get(System.getenv("DORIS_HOME") + "/doris-meta"); Path newDefaultMetaDir = Paths.get(System.getenv("STARROCKS_HOME") + "/meta"); Path metaDir = Paths.get(Config.meta_dir); if (metaDir.equals(newDefaultMetaDir)) { File oldMeta = new File(oldDefaultMetaDir.toUri()); File newMeta = new File(newDefaultMetaDir.toUri()); if (oldMeta.exists() && newMeta.exists()) { LOG.error("New default meta dir: {} and Old default meta dir: {} are both present. " + "Please make sure {} has the latest data, and remove the another one.", newDefaultMetaDir, oldDefaultMetaDir, newDefaultMetaDir); throw new InvalidMetaDirException(); } } File meta = new File(metaDir.toUri()); if (!meta.exists()) { // If metaDir is not the default config, it means the user has specified the other directory // We should not use the oldDefaultMetaDir. // Just exit in this case if (!metaDir.equals(newDefaultMetaDir)) { LOG.error("meta dir {} dose not exist", metaDir); throw new InvalidMetaDirException(); } File oldMeta = new File(oldDefaultMetaDir.toUri()); if (oldMeta.exists()) { // For backward compatible Config.meta_dir = oldDefaultMetaDir.toString(); } else { LOG.error("meta dir {} does not exist", meta.getAbsolutePath()); throw new InvalidMetaDirException(); } } long lowerFreeDiskSize = Long.parseLong(EnvironmentParams.FREE_DISK.getDefault()); FileStore store = Files.getFileStore(Paths.get(Config.meta_dir)); if (store.getUsableSpace() < lowerFreeDiskSize) { LOG.error("Free capacity left for meta dir: {} is less than {}", Config.meta_dir, new ByteSizeValue(lowerFreeDiskSize)); throw new InvalidMetaDirException(); } Path imageDir = Paths.get(Config.meta_dir + GlobalStateMgr.IMAGE_DIR); Path bdbDir = Paths.get(BDBEnvironment.getBdbDir()); boolean haveImageData = false; if (Files.exists(imageDir)) { try (Stream<Path> stream = Files.walk(imageDir)) { haveImageData = stream.anyMatch(path -> path.getFileName().toString().startsWith("image.")); } } boolean haveBDBData = false; if (Files.exists(bdbDir)) { try (Stream<Path> stream = Files.walk(bdbDir)) { haveBDBData = stream.anyMatch(path -> path.getFileName().toString().endsWith(".jdb")); } } if (haveImageData && !haveBDBData && !Config.start_with_incomplete_meta) { LOG.error("image exists, but bdb dir is empty, " + "set start_with_incomplete_meta to true if you want to forcefully recover from image data, " + "this may end with stale meta data, so please be careful."); throw new InvalidMetaDirException(); } }
@Test public void testImageExistBDBExist() throws IOException, InvalidMetaDirException { Config.start_with_incomplete_meta = false; Config.meta_dir = testDir + "/meta"; mkdir(Config.meta_dir + "/image"); File fileImage = new File(Config.meta_dir + "/image/image.123"); Assert.assertTrue(fileImage.createNewFile()); mkdir(Config.meta_dir + "/bdb"); File fileBDB = new File(Config.meta_dir + "/bdb/EF889.jdb"); Assert.assertTrue(fileBDB.createNewFile()); try { MetaHelper.checkMetaDir(); } finally { deleteDir(new File(testDir + "/")); } }
@Override public int open(String path, FuseFileInfo fi) { return AlluxioFuseUtils.call(LOG, () -> createOrOpenInternal(path, fi, AlluxioFuseUtils.MODE_NOT_SET_VALUE), "Fuse.Open", "path=%s,flags=0x%x", path, fi.flags.get()); }
@Test @DoraTestTodoItem(action = DoraTestTodoItem.Action.FIX, owner = "LuQQiu") @Ignore public void openWithoutDelay() throws Exception { AlluxioURI expectedPath = BASE_EXPECTED_URI.join("/foo/bar"); setUpOpenMock(expectedPath); FileInStream is = mock(FileInStream.class); URIStatus status = mFileSystem.getStatus(expectedPath); OpenFilePOptions options = OpenFilePOptions.getDefaultInstance(); when(mFileSystem.openFile(status, options)).thenReturn(is); mFuseFs.open("/foo/bar", mFileInfo); verify(mFileSystem).openFile(status, options); }
public void close() throws IOException { try { closeAsync().get(); } catch (ExecutionException e) { if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); } else { throw new PulsarServerException(e.getCause()); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); } }
@Test public void testUnsubscribeNonDurableSub() throws Exception { final String ns = "prop/ns-test"; final String topic = ns + "/testUnsubscribeNonDurableSub"; admin.namespaces().createNamespace(ns, 2); admin.topics().createPartitionedTopic(String.format("persistent://%s", topic), 1); pulsarClient.newProducer(Schema.STRING).topic(topic).create().close(); @Cleanup Consumer<String> consumer = pulsarClient .newConsumer(Schema.STRING) .topic(topic) .subscriptionMode(SubscriptionMode.NonDurable) .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .subscriptionName("sub1") .subscriptionType(SubscriptionType.Shared) .subscribe(); try { consumer.unsubscribe(); } catch (Exception ex) { fail("Unsubscribe failed"); } }
@Override public Catalog createCatalog(Context context) { final FactoryUtil.CatalogFactoryHelper helper = FactoryUtil.createCatalogFactoryHelper(this, context); helper.validate(); return new HiveCatalog( context.getName(), helper.getOptions().get(DEFAULT_DATABASE), helper.getOptions().get(HIVE_CONF_DIR), helper.getOptions().get(HADOOP_CONF_DIR), helper.getOptions().get(HIVE_VERSION)); }
@Test public void testCreateHiveCatalog() { final String catalogName = "mycatalog"; final HiveCatalog expectedCatalog = HiveTestUtils.createHiveCatalog(catalogName, null); final Map<String, String> options = new HashMap<>(); options.put(CommonCatalogOptions.CATALOG_TYPE.key(), HiveCatalogFactoryOptions.IDENTIFIER); options.put(HiveCatalogFactoryOptions.HIVE_CONF_DIR.key(), CONF_DIR.getPath()); final Catalog actualCatalog = FactoryUtil.createCatalog( catalogName, options, null, Thread.currentThread().getContextClassLoader()); assertThat( ((HiveCatalog) actualCatalog) .getHiveConf() .getVar(HiveConf.ConfVars.METASTOREURIS)) .isEqualTo("dummy-hms"); checkEquals(expectedCatalog, (HiveCatalog) actualCatalog); }
public static String getClientId(final String token) { DecodedJWT jwt = JWT.decode(token); return Optional.of(jwt).map(item -> item.getClaim("clientId").asString()).orElse(""); }
@Test public void testGetClientId() { assertThat(JwtUtils.getClientId(TOKEN), is("")); }
static boolean containsIn(CloneGroup first, CloneGroup second) { if (first.getCloneUnitLength() > second.getCloneUnitLength()) { return false; } List<ClonePart> firstParts = first.getCloneParts(); List<ClonePart> secondParts = second.getCloneParts(); return SortedListsUtils.contains(secondParts, firstParts, new ContainsInComparator(second.getCloneUnitLength(), first.getCloneUnitLength())) && SortedListsUtils.contains(firstParts, secondParts, ContainsInComparator.RESOURCE_ID_COMPARATOR); }
@Test public void second_part_of_C2_covers_first_part_of_C1() { CloneGroup c1 = newCloneGroup(1, newClonePart("a", 2)); CloneGroup c2 = newCloneGroup(2, newClonePart("a", 0), newClonePart("a", 2)); assertThat(Filter.containsIn(c1, c2), is(true)); assertThat(Filter.containsIn(c2, c1), is(false)); }
@Override public Optional<IndexSet> get(final String indexSetId) { return this.indexSetsCache.get() .stream() .filter(indexSet -> Objects.equals(indexSet.id(), indexSetId)) .map(indexSetConfig -> (IndexSet) mongoIndexSetFactory.create(indexSetConfig)) .findFirst(); }
@Test public void indexSetsCacheShouldBeInvalidatedForIndexSetCreation() { final IndexSetConfig indexSetConfig = mock(IndexSetConfig.class); final List<IndexSetConfig> indexSetConfigs = Collections.singletonList(indexSetConfig); when(indexSetService.findAll()).thenReturn(indexSetConfigs); final List<IndexSetConfig> result = this.indexSetsCache.get(); assertThat(result) .isNotNull() .hasSize(1) .containsExactly(indexSetConfig); this.indexSetsCache.handleIndexSetCreation(mock(IndexSetCreatedEvent.class)); final IndexSetConfig newIndexSetConfig = mock(IndexSetConfig.class); final List<IndexSetConfig> newIndexSetConfigs = Collections.singletonList(newIndexSetConfig); when(indexSetService.findAll()).thenReturn(newIndexSetConfigs); final List<IndexSetConfig> newResult = this.indexSetsCache.get(); assertThat(newResult) .isNotNull() .hasSize(1) .containsExactly(newIndexSetConfig); verify(indexSetService, times(2)).findAll(); }
@Override public PGTypeCastExpr clone() { PGTypeCastExpr x = new PGTypeCastExpr(); x.isTry = isTry; if (expr != null) { x.setExpr(expr.clone()); } if (dataType != null) { x.setDataType(dataType.clone()); } return x; }
@Test public void testClone() { // '100'::INT SQLExpr sqlCharExpr = new SQLCharExpr("100"); SQLDataType sqlDataType = new SQLDataTypeImpl(SQLDataType.Constants.INT); PGTypeCastExpr pgTypeCastExpr = new PGTypeCastExpr(sqlCharExpr, sqlDataType); PGTypeCastExpr pgTypeCastExprClone = pgTypeCastExpr.clone(); assertNotNull(pgTypeCastExprClone); assertEquals(pgTypeCastExpr, pgTypeCastExprClone); assertEquals(pgTypeCastExpr.getExpr(), pgTypeCastExprClone.getExpr()); assertEquals(pgTypeCastExpr.getDataType(), pgTypeCastExprClone.getDataType()); }
@Override public boolean supportsResultSetConcurrency(final int type, final int concurrency) { return false; }
@Test void assertSupportsResultSetConcurrency() { assertFalse(metaData.supportsResultSetConcurrency(0, 0)); }
@ThriftField(1) public List<PrestoThriftRange> getRanges() { return ranges; }
@Test public void testFromValueSetOfRangesUnbounded() { PrestoThriftValueSet thriftValueSet = fromValueSet(ValueSet.ofRanges(Range.greaterThanOrEqual(BIGINT, 0L))); assertNotNull(thriftValueSet.getRangeValueSet()); assertEquals(thriftValueSet.getRangeValueSet().getRanges(), ImmutableList.of( new PrestoThriftRange(new PrestoThriftMarker(longValue(0), EXACTLY), new PrestoThriftMarker(null, BELOW)))); }
public static <T> Result<T> errorWithArgs(Status status, Object... args) { return new Result<>(status.getCode(), MessageFormat.format(status.getMsg(), args)); }
@Test public void errorWithArgs() { Result ret = Result.errorWithArgs(Status.INTERNAL_SERVER_ERROR_ARGS, "test internal server error"); Assertions.assertEquals(Status.INTERNAL_SERVER_ERROR_ARGS.getCode(), ret.getCode().intValue()); }
@Override public AppResponse process(Flow flow, ActivateWithCodeRequest request) throws FlowNotDefinedException, IOException, NoSuchAlgorithmException { Map<String, Object> result = digidClient.activateAccountWithCode(appSession.getAccountId(), request.getActivationCode()); if (result.get(lowerUnderscore(STATUS)).equals("OK")) { appAuthenticator.setIssuerType((String) result.get(lowerUnderscore(ISSUER_TYPE))); return new OkResponse(); } if (result.get(lowerUnderscore(STATUS)).equals("NOK") && result.get(ERROR) != null ) { final var error = result.get(ERROR); if (ERROR_CODE_NOT_CORRECT.equals(error)) { // Logcode 88 is already logged in x, can be changed when switching to account microservice : return new EnterActivationResponse(ERROR_CODE_NOT_CORRECT, Map.of(REMAINING_ATTEMPTS, result.get(lowerUnderscore(REMAINING_ATTEMPTS)))); } else if (ERROR_CODE_BLOCKED.equals(error)) { digidClient.remoteLog("87", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId())); return new NokResponse((String) result.get(ERROR)); } else if (ERROR_CODE_INVALID.equals(error)) { digidClient.remoteLog("90", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId())); return new EnterActivationResponse(ERROR_CODE_INVALID, Map.of(DAYS_VALID, result.get(lowerUnderscore(DAYS_VALID)))); } } return new NokResponse(); }
@Test public void responseBInvalidTest() throws FlowNotDefinedException, IOException, NoSuchAlgorithmException { //given when(digidClientMock.activateAccountWithCode(anyLong(), any())).thenReturn(Map.of( lowerUnderscore(STATUS), "NOK", lowerUnderscore(ERROR), "activation_code_invalid", lowerUnderscore(ERROR_CODE_BLOCKED), "activation_code_invalid", lowerUnderscore(DAYS_VALID), 1 )); //when AppResponse result = activationCodeChecked.process(mockedFlow, activateWithCodeRequest); //then verify(digidClientMock, times(1)).remoteLog("90", ImmutableMap.of(lowerUnderscore(ACCOUNT_ID), mockedAppSession.getAccountId())); assertTrue(result instanceof NokResponse); assertEquals("activation_code_invalid", ((NokResponse) result).getError()); }
@Override public int run(String launcherVersion, String launcherMd5, ServerUrlGenerator urlGenerator, Map<String, String> env, Map<String, String> context) { int exitValue = 0; LOG.info("Agent launcher is version: {}", CurrentGoCDVersion.getInstance().fullVersion()); String[] command = new String[]{}; try { AgentBootstrapperArgs bootstrapperArgs = AgentBootstrapperArgs.fromProperties(context); ServerBinaryDownloader agentDownloader = new ServerBinaryDownloader(urlGenerator, bootstrapperArgs); agentDownloader.downloadIfNecessary(DownloadableFile.AGENT); ServerBinaryDownloader pluginZipDownloader = new ServerBinaryDownloader(urlGenerator, bootstrapperArgs); pluginZipDownloader.downloadIfNecessary(DownloadableFile.AGENT_PLUGINS); ServerBinaryDownloader tfsImplDownloader = new ServerBinaryDownloader(urlGenerator, bootstrapperArgs); tfsImplDownloader.downloadIfNecessary(DownloadableFile.TFS_IMPL); command = agentInvocationCommand(agentDownloader.getMd5(), launcherMd5, pluginZipDownloader.getMd5(), tfsImplDownloader.getMd5(), env, context, agentDownloader.getExtraProperties()); LOG.info("Launching Agent with command: {}", join(command, " ")); Process agent = invoke(command); // The next lines prevent the child process from blocking on Windows AgentOutputAppender agentOutputAppenderForStdErr = new AgentOutputAppender(GO_AGENT_STDERR_LOG); AgentOutputAppender agentOutputAppenderForStdOut = new AgentOutputAppender(GO_AGENT_STDOUT_LOG); if (new SystemEnvironment().consoleOutToStdout()) { agentOutputAppenderForStdErr.writeTo(AgentOutputAppender.Outstream.STDERR); agentOutputAppenderForStdOut.writeTo(AgentOutputAppender.Outstream.STDOUT); } agent.getOutputStream().close(); AgentConsoleLogThread stdErrThd = new AgentConsoleLogThread(agent.getErrorStream(), agentOutputAppenderForStdErr); stdErrThd.start(); AgentConsoleLogThread stdOutThd = new AgentConsoleLogThread(agent.getInputStream(), agentOutputAppenderForStdOut); stdOutThd.start(); Shutdown shutdownHook = new Shutdown(agent); Runtime.getRuntime().addShutdownHook(shutdownHook); try { exitValue = agent.waitFor(); } catch (InterruptedException ie) { LOG.error("Agent was interrupted. Terminating agent and respawning. {}", ie.toString()); agent.destroy(); } finally { removeShutdownHook(shutdownHook); stdErrThd.stopAndJoin(); stdOutThd.stopAndJoin(); } } catch (Exception e) { LOG.error("Exception while executing command: {} - {}", join(command, " "), e.toString()); exitValue = EXCEPTION_OCCURRED; } return exitValue; }
@Test public void shouldClose_STDIN_and_STDOUT_ofSubprocess() throws InterruptedException { final List<String> cmd = new ArrayList<>(); final OutputStream stdin = mock(OutputStream.class); Process subProcess = mockProcess(new ByteArrayInputStream(new byte[0]), new ByteArrayInputStream(new byte[0]), stdin); when(subProcess.waitFor()).thenAnswer(invocation -> { verify(stdin).close(); return 21; }); AgentProcessParentImpl bootstrapper = createBootstrapper(cmd, subProcess); int returnCode = bootstrapper.run("bootstrapper_version", "bar", getURLGenerator(), new HashMap<>(), context()); assertThat(returnCode, is(21)); }
@Override public Iterable<SingleTableInventoryCalculatedResult> calculate(final SingleTableInventoryCalculateParameter param) { PipelineDataConsistencyCalculateSQLBuilder pipelineSQLBuilder = new PipelineDataConsistencyCalculateSQLBuilder(param.getDatabaseType()); List<CalculatedItem> calculatedItems = param.getColumnNames().stream().map(each -> calculateCRC32(pipelineSQLBuilder, param, each)).collect(Collectors.toList()); return Collections.singletonList(new CalculatedResult(calculatedItems.get(0).getRecordsCount(), calculatedItems.stream().map(CalculatedItem::getCrc32).collect(Collectors.toList()))); }
@Test void assertCalculateSuccess() throws SQLException { PreparedStatement preparedStatement0 = mockPreparedStatement(123L, 10); when(connection.prepareStatement("SELECT CRC32(foo_col) FROM foo_tbl")).thenReturn(preparedStatement0); PreparedStatement preparedStatement1 = mockPreparedStatement(456L, 10); when(connection.prepareStatement("SELECT CRC32(bar_col) FROM foo_tbl")).thenReturn(preparedStatement1); Iterator<SingleTableInventoryCalculatedResult> actual = new CRC32SingleTableInventoryCalculator().calculate(parameter).iterator(); assertThat(actual.next().getRecordsCount(), is(10)); assertFalse(actual.hasNext()); }
public static Ip6Address valueOf(byte[] value) { return new Ip6Address(value); }
@Test public void testValueOfStringIPv6() { Ip6Address ipAddress; ipAddress = Ip6Address.valueOf("1111:2222:3333:4444:5555:6666:7777:8888"); assertThat(ipAddress.toString(), is("1111:2222:3333:4444:5555:6666:7777:8888")); ipAddress = Ip6Address.valueOf("::"); assertThat(ipAddress.toString(), is("::")); ipAddress = Ip6Address.valueOf("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"); assertThat(ipAddress.toString(), is("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")); }
public Optional<ScimGroupDto> findByScimUuid(DbSession dbSession, String scimGroupUuid) { return Optional.ofNullable(mapper(dbSession).findByScimUuid(scimGroupUuid)); }
@Test void findByScimUuid_whenScimUuidFound_shouldReturnDto() { ScimGroupDto scimGroupDto = db.users().insertScimGroup(db.users().insertGroup()); db.users().insertScimGroup(db.users().insertGroup()); ScimGroupDto underTest = scimGroupDao.findByScimUuid(db.getSession(), scimGroupDto.getScimGroupUuid()) .orElseGet(() -> fail("Group not found")); assertThat(underTest.getScimGroupUuid()).isEqualTo(scimGroupDto.getScimGroupUuid()); assertThat(underTest.getGroupUuid()).isEqualTo(scimGroupDto.getGroupUuid()); }
@Override public String execute(CommandContext commandContext, String[] args) { StringBuilder buf = new StringBuilder(); String port = null; boolean detail = false; if (args.length > 0) { for (String part : args) { if ("-l".equals(part)) { detail = true; } else { if (!StringUtils.isNumber(part)) { return "Illegal port " + part + ", must be integer."; } port = part; } } } if (StringUtils.isEmpty(port)) { for (ProtocolServer server : dubboProtocol.getServers()) { if (buf.length() > 0) { buf.append("\r\n"); } if (detail) { buf.append(server.getUrl().getProtocol()) .append("://") .append(server.getUrl().getAddress()); } else { buf.append(server.getUrl().getPort()); } } } else { int p = Integer.parseInt(port); ProtocolServer protocolServer = null; for (ProtocolServer s : dubboProtocol.getServers()) { if (p == s.getUrl().getPort()) { protocolServer = s; break; } } if (protocolServer != null) { ExchangeServer server = (ExchangeServer) protocolServer.getRemotingServer(); Collection<ExchangeChannel> channels = server.getExchangeChannels(); for (ExchangeChannel c : channels) { if (buf.length() > 0) { buf.append("\r\n"); } if (detail) { buf.append(c.getRemoteAddress()).append(" -> ").append(c.getLocalAddress()); } else { buf.append(c.getRemoteAddress()); } } } else { buf.append("No such port ").append(port); } } return buf.toString(); }
@Test void testListAllPort() throws RemotingException { String result = port.execute(mockCommandContext, new String[0]); assertEquals("" + availablePort + "", result); }
public void setIsGooglePlayServicesAvailable(int availabilityCode) { this.availabilityCode = availabilityCode; }
@Test public void setIsGooglePlayServicesAvailable() { // Given an expected and injected ConnectionResult code final ShadowGoogleApiAvailability shadowGoogleApiAvailability = Shadows.shadowOf(GoogleApiAvailability.getInstance()); final int expectedCode = ConnectionResult.SUCCESS; shadowGoogleApiAvailability.setIsGooglePlayServicesAvailable(expectedCode); // When getting the actual ConnectionResult code final int actualCode = GoogleApiAvailability.getInstance().isGooglePlayServicesAvailable(roboContext); // Then verify that we got back our expected code and not the default one. assertThat(actualCode).isEqualTo(expectedCode); }
public StainingRule getStainingRule() { return stainingRule; }
@Test public void testNormalRule() { RuleStainingProperties ruleStainingProperties = new RuleStainingProperties(); ruleStainingProperties.setNamespace(testNamespace); ruleStainingProperties.setGroup(testGroup); ruleStainingProperties.setFileName(testFileName); ConfigFile configFile = Mockito.mock(ConfigFile.class); when(configFile.getContent()).thenReturn("{\n" + " \"rules\":[\n" + " {\n" + " \"conditions\":[\n" + " {\n" + " \"key\":\"${http.query.uid}\",\n" + " \"values\":[\"1000\"],\n" + " \"operation\":\"EQUALS\"\n" + " }\n" + " ],\n" + " \"labels\":[\n" + " {\n" + " \"key\":\"env\",\n" + " \"value\":\"blue\"\n" + " }\n" + " ]\n" + " }\n" + " ]\n" + "}"); when(configFileService.getConfigFile(testNamespace, testGroup, testFileName)).thenReturn(configFile); StainingRuleManager stainingRuleManager = new StainingRuleManager(ruleStainingProperties, configFileService); StainingRule stainingRule = stainingRuleManager.getStainingRule(); assertThat(stainingRule).isNotNull(); assertThat(stainingRule.getRules().size()).isEqualTo(1); StainingRule.Rule rule = stainingRule.getRules().get(0); assertThat(rule.getConditions().size()).isEqualTo(1); assertThat(rule.getLabels().size()).isEqualTo(1); }
@Override public void updateBrand(ProductBrandUpdateReqVO updateReqVO) { // 校验存在 validateBrandExists(updateReqVO.getId()); validateBrandNameUnique(updateReqVO.getId(), updateReqVO.getName()); // 更新 ProductBrandDO updateObj = ProductBrandConvert.INSTANCE.convert(updateReqVO); brandMapper.updateById(updateObj); }
@Test public void testUpdateBrand_success() { // mock 数据 ProductBrandDO dbBrand = randomPojo(ProductBrandDO.class); brandMapper.insert(dbBrand);// @Sql: 先插入出一条存在的数据 // 准备参数 ProductBrandUpdateReqVO reqVO = randomPojo(ProductBrandUpdateReqVO.class, o -> { o.setId(dbBrand.getId()); // 设置更新的 ID }); // 调用 brandService.updateBrand(reqVO); // 校验是否更新正确 ProductBrandDO brand = brandMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, brand); }
@Override public boolean acquirePermit(String nsId) { if (contains(nsId)) { return super.acquirePermit(nsId); } return super.acquirePermit(DEFAULT_NS); }
@Test public void testAllocationHandlersGreaterThanCount() { Configuration conf = createConf(40); conf.setDouble(DFS_ROUTER_FAIR_HANDLER_PROPORTION_KEY_PREFIX + "ns1", 0.8); conf.setDouble(DFS_ROUTER_FAIR_HANDLER_PROPORTION_KEY_PREFIX + "ns2", 0.8); conf.setDouble(DFS_ROUTER_FAIR_HANDLER_PROPORTION_KEY_PREFIX + CONCURRENT_NS, 1); RouterRpcFairnessPolicyController routerRpcFairnessPolicyController = FederationUtil.newFairnessPolicyController(conf); // ns1 32 permit allocated // ns2 32 permit allocated for (int i = 0; i < 32; i++) { assertTrue(routerRpcFairnessPolicyController.acquirePermit("ns1")); assertTrue(routerRpcFairnessPolicyController.acquirePermit("ns2")); } // CONCURRENT_NS 40 permit allocated for (int i=0; i < 40; i++) { assertTrue(routerRpcFairnessPolicyController.acquirePermit(CONCURRENT_NS)); } }
@Override public void handle(final Callback[] callbacks) throws IOException, UnsupportedCallbackException { for (final Callback callback : callbacks) { if (callback instanceof NameCallback) { final NameCallback nc = (NameCallback) callback; nc.setName(getUserName()); } else if (callback instanceof ObjectCallback) { final ObjectCallback oc = (ObjectCallback)callback; oc.setObject(getCredential()); } else if (callback instanceof PasswordCallback) { final PasswordCallback pc = (PasswordCallback) callback; pc.setPassword(((String) getCredential()).toCharArray()); } else if (callback instanceof TextOutputCallback) { final TextOutputCallback toc = (TextOutputCallback) callback; switch (toc.getMessageType()) { case TextOutputCallback.ERROR: log.error(toc.getMessage()); break; case TextOutputCallback.WARNING: log.warn(toc.getMessage()); break; case TextOutputCallback.INFORMATION: log.info(toc.getMessage()); break; default: throw new IOException("Unsupported message type: " + toc.getMessageType()); } } else { // We ignore unknown callback types - e.g. Jetty implementation might pass us Jetty specific // stuff which we can't deal with } } }
@Test public void shouldHandleObjectCallback() throws Exception { // When: callbackHandler.handle(new Callback[]{nameCallback, objectCallback}); // Then: verify(nameCallback).setName(USERNAME); verify(objectCallback).setObject(PASSWORD); }
@Override public void resetAnonymousId() { }
@Test public void resetAnonymousId() { mSensorsAPI.resetAnonymousId(); Assert.assertNull(mSensorsAPI.getAnonymousId()); }
public List<NamespaceBundle> getBundles() { return bundles; }
@Test public void testSplitBundleByFixBoundary() throws Exception { NamespaceName nsname = NamespaceName.get("pulsar/global/ns1"); NamespaceBundles bundles = factory.getBundles(nsname); NamespaceBundle bundleToSplit = bundles.getBundles().get(0); try { factory.splitBundles(bundleToSplit, 0, Collections.singletonList(bundleToSplit.getLowerEndpoint())); } catch (IllegalArgumentException e) { //No-op } try { factory.splitBundles(bundleToSplit, 0, Collections.singletonList(bundleToSplit.getUpperEndpoint())); } catch (IllegalArgumentException e) { //No-op } Long fixBoundary = bundleToSplit.getLowerEndpoint() + 10; Pair<NamespaceBundles, List<NamespaceBundle>> splitBundles = factory.splitBundles(bundleToSplit, 0, Collections.singletonList(fixBoundary)).join(); assertEquals(splitBundles.getRight().get(0).getLowerEndpoint(), bundleToSplit.getLowerEndpoint()); assertEquals(splitBundles.getRight().get(1).getLowerEndpoint().longValue(), bundleToSplit.getLowerEndpoint() + fixBoundary); }
@Override public CompletableFuture<RemotingCommand> electMaster(final ElectMasterRequestHeader request) { return this.scheduler.appendEvent("electMaster", () -> { ControllerResult<ElectMasterResponseHeader> electResult = this.replicasInfoManager.electMaster(request, this.electPolicy); AttributesBuilder attributesBuilder = ControllerMetricsManager.newAttributesBuilder() .put(LABEL_CLUSTER_NAME, request.getClusterName()) .put(LABEL_BROKER_SET, request.getBrokerName()); switch (electResult.getResponseCode()) { case ResponseCode.SUCCESS: ControllerMetricsManager.electionTotal.add(1, attributesBuilder.put(LABEL_ELECTION_RESULT, ControllerMetricsConstant.ElectionResult.NEW_MASTER_ELECTED.getLowerCaseName()).build()); break; case ResponseCode.CONTROLLER_MASTER_STILL_EXIST: ControllerMetricsManager.electionTotal.add(1, attributesBuilder.put(LABEL_ELECTION_RESULT, ControllerMetricsConstant.ElectionResult.KEEP_CURRENT_MASTER.getLowerCaseName()).build()); break; case ResponseCode.CONTROLLER_MASTER_NOT_AVAILABLE: case ResponseCode.CONTROLLER_ELECT_MASTER_FAILED: ControllerMetricsManager.electionTotal.add(1, attributesBuilder.put(LABEL_ELECTION_RESULT, ControllerMetricsConstant.ElectionResult.NO_MASTER_ELECTED.getLowerCaseName()).build()); break; default: break; } return electResult; }, true); }
@Test public void testElectMaster() throws Exception { final DLedgerController leader = mockMetaData(false); final ElectMasterRequestHeader request = ElectMasterRequestHeader.ofControllerTrigger(DEFAULT_BROKER_NAME); setBrokerElectPolicy(leader, 1L); final RemotingCommand resp = leader.electMaster(request).get(10, TimeUnit.SECONDS); final ElectMasterResponseHeader response = (ElectMasterResponseHeader) resp.readCustomHeader(); assertEquals(2, response.getMasterEpoch().intValue()); assertNotEquals(1L, response.getMasterBrokerId().longValue()); assertNotEquals(DEFAULT_IP[0], response.getMasterAddress()); }
@Udf public Integer len( @UdfParameter(description = "The input string") final String input) { if (input == null) { return null; } return input.length(); }
@Test public void shouldReturnZeroForEmptyInput() { final Integer result = udf.len(""); assertThat(result, is(0)); }
@BuildStep AdditionalBeanBuildItem addMetrics(Optional<MetricsCapabilityBuildItem> metricsCapability, JobRunrBuildTimeConfiguration jobRunrBuildTimeConfiguration) { if (metricsCapability.isPresent() && metricsCapability.get().metricsSupported(MetricsFactory.MICROMETER)) { final AdditionalBeanBuildItem.Builder additionalBeanBuildItemBuilder = AdditionalBeanBuildItem.builder() .setUnremovable() .addBeanClasses(JobRunrMetricsStarter.class) .addBeanClasses(JobRunrMetricsProducer.StorageProviderMetricsProducer.class); if (jobRunrBuildTimeConfiguration.backgroundJobServer().enabled()) { additionalBeanBuildItemBuilder.addBeanClasses(JobRunrMetricsProducer.BackgroundJobServerMetricsProducer.class); } return additionalBeanBuildItemBuilder .build(); } return null; }
@Test void addMetricsDoesNotAddMetricsIfNotEnabled() { final AdditionalBeanBuildItem metricsBeanBuildItem = jobRunrExtensionProcessor.addMetrics(Optional.empty(), jobRunrBuildTimeConfiguration); assertThat(metricsBeanBuildItem).isNull(); }
public CoercedExpressionResult coerce() { final Class<?> leftClass = left.getRawClass(); final Class<?> nonPrimitiveLeftClass = toNonPrimitiveType(leftClass); final Class<?> rightClass = right.getRawClass(); final Class<?> nonPrimitiveRightClass = toNonPrimitiveType(rightClass); boolean sameClass = leftClass == rightClass; boolean isUnificationExpression = left instanceof UnificationTypedExpression || right instanceof UnificationTypedExpression; if (sameClass || isUnificationExpression) { return new CoercedExpressionResult(left, right); } if (!canCoerce()) { throw new CoercedExpressionException(new InvalidExpressionErrorResult("Comparison operation requires compatible types. Found " + leftClass + " and " + rightClass)); } if ((nonPrimitiveLeftClass == Integer.class || nonPrimitiveLeftClass == Long.class) && nonPrimitiveRightClass == Double.class) { CastExpr castExpression = new CastExpr(PrimitiveType.doubleType(), this.left.getExpression()); return new CoercedExpressionResult( new TypedExpression(castExpression, double.class, left.getType()), right, false); } final boolean leftIsPrimitive = leftClass.isPrimitive() || Number.class.isAssignableFrom( leftClass ); final boolean canCoerceLiteralNumberExpr = canCoerceLiteralNumberExpr(leftClass); boolean rightAsStaticField = false; final Expression rightExpression = right.getExpression(); final TypedExpression coercedRight; if (leftIsPrimitive && canCoerceLiteralNumberExpr && rightExpression instanceof LiteralStringValueExpr) { final Expression coercedLiteralNumberExprToType = coerceLiteralNumberExprToType((LiteralStringValueExpr) right.getExpression(), leftClass); coercedRight = right.cloneWithNewExpression(coercedLiteralNumberExprToType); coercedRight.setType( leftClass ); } else if (shouldCoerceBToString(left, right)) { coercedRight = coerceToString(right); } else if (isNotBinaryExpression(right) && canBeNarrowed(leftClass, rightClass) && right.isNumberLiteral()) { coercedRight = castToClass(leftClass); } else if (leftClass == long.class && rightClass == int.class) { coercedRight = right.cloneWithNewExpression(new CastExpr(PrimitiveType.longType(), right.getExpression())); } else if (leftClass == Date.class && rightClass == String.class) { coercedRight = coerceToDate(right); rightAsStaticField = true; } else if (leftClass == LocalDate.class && rightClass == String.class) { coercedRight = coerceToLocalDate(right); rightAsStaticField = true; } else if (leftClass == LocalDateTime.class && rightClass == String.class) { coercedRight = coerceToLocalDateTime(right); rightAsStaticField = true; } else if (shouldCoerceBToMap()) { coercedRight = castToClass(toNonPrimitiveType(leftClass)); } else if (isBoolean(leftClass) && !isBoolean(rightClass)) { coercedRight = coerceBoolean(right); } else { coercedRight = right; } final TypedExpression coercedLeft; if (nonPrimitiveLeftClass == Character.class && shouldCoerceBToString(right, left)) { coercedLeft = coerceToString(left); } else { coercedLeft = left; } return new CoercedExpressionResult(coercedLeft, coercedRight, rightAsStaticField); }
@Test public void doNotCastNumberLiteralShort() { final TypedExpression left = expr("getValue()", java.lang.Object.class); final TypedExpression right = expr("20", short.class); final CoercedExpression.CoercedExpressionResult coerce = new CoercedExpression(left, right, false).coerce(); assertThat(coerce.getCoercedRight()).isEqualTo(expr("20", short.class)); }
@Override public void fail() { // ignore fail if the buffer already in a terminal state. if (state.setIf(FAILED, oldState -> !oldState.isTerminal())) { memoryManager.setNoBlockOnFull(); forceFreeMemory(); // DO NOT destroy buffers or set no more pages. The coordinator manages the teardown of failed queries. } }
@Test public void testInvalidConstructorArg() { try { createArbitraryBuffer(createInitialEmptyOutputBuffers(ARBITRARY).withBuffer(FIRST, BROADCAST_PARTITION_ID).withNoMoreBufferIds(), new DataSize(0, BYTE)); fail("Expected IllegalStateException"); } catch (IllegalArgumentException ignored) { } try { createArbitraryBuffer(createInitialEmptyOutputBuffers(ARBITRARY), new DataSize(0, BYTE)); fail("Expected IllegalStateException"); } catch (IllegalArgumentException ignored) { } }
@Override public ResultSet getImportedKeys(final String catalog, final String schema, final String table) { return null; }
@Test void assertGetImportedKeys() { assertNull(metaData.getImportedKeys("", "", "")); }
public static MemberVersion of(int major, int minor, int patch) { if (major == 0 && minor == 0 && patch == 0) { return MemberVersion.UNKNOWN; } else { return new MemberVersion(major, minor, patch); } }
@Test public void testVersionOf_whenVersionIsUnknown() { assertEquals(MemberVersion.UNKNOWN, MemberVersion.of(0, 0, 0)); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStartBounds, final Range<Instant> windowEndBounds, final Optional<Position> position ) { try { final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds); final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds); final WindowKeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = WindowKeyQuery.withKeyAndWindowStartRange(key, lower, upper); StateQueryRequest<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> request = inStore(stateStore.getStateStoreName()).withQuery(query); if (position.isPresent()) { request = request.withPositionBound(PositionBound.at(position.get())); } final KafkaStreams streams = stateStore.getKafkaStreams(); final StateQueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> result = streams.query(request); final QueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> queryResult = result.getPartitionResults().get(partition); if (queryResult.isFailure()) { throw failedQueryException(queryResult); } if (queryResult.getResult() == null) { return KsMaterializedQueryResult.rowIteratorWithPosition( Collections.emptyIterator(), queryResult.getPosition()); } try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it = queryResult.getResult()) { final Builder<WindowedRow> builder = ImmutableList.builder(); while (it.hasNext()) { final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next(); final Instant windowStart = Instant.ofEpochMilli(next.key); if (!windowStartBounds.contains(windowStart)) { continue; } final Instant windowEnd = windowStart.plus(windowSize); if (!windowEndBounds.contains(windowEnd)) { continue; } final TimeWindow window = new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli()); final WindowedRow row = WindowedRow.of( stateStore.schema(), new Windowed<>(key, window), next.value.value(), next.value.timestamp() ); builder.add(row); } return KsMaterializedQueryResult.rowIteratorWithPosition( builder.build().iterator(), queryResult.getPosition()); } } catch (final NotUpToBoundException | MaterializationException e) { throw e; } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test @SuppressWarnings("unchecked") public void shouldReturnValuesForClosedStartBounds() { // Given: final Range<Instant> start = Range.closed( Instant.ofEpochMilli(System.currentTimeMillis()), NOW.plusSeconds(10) ); final StateQueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> partitionResult = new StateQueryResult<>(); final QueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> result = QueryResult.forResult(fetchIterator); result.setPosition(POSITION); partitionResult.addResult(PARTITION, result); when(kafkaStreams.query(any(StateQueryRequest.class))).thenReturn(partitionResult); when(fetchIterator.hasNext()).thenReturn(true, true, false); when(fetchIterator.next()) .thenReturn(new KeyValue<>(start.lowerEndpoint().toEpochMilli(), VALUE_1)) .thenReturn(new KeyValue<>(start.upperEndpoint().toEpochMilli(), VALUE_2)) .thenThrow(new AssertionError()); // When: final Iterator<WindowedRow> rowIterator = table.get(A_KEY, PARTITION, start, Range.all()).rowIterator; // Then: assertThat(rowIterator.hasNext(), is(true)); final List<WindowedRow> resultList = Lists.newArrayList(rowIterator); assertThat(resultList, contains( WindowedRow.of( SCHEMA, windowedKey(start.lowerEndpoint()), VALUE_1.value(), VALUE_1.timestamp() ), WindowedRow.of( SCHEMA, windowedKey(start.upperEndpoint()), VALUE_2.value(), VALUE_2.timestamp() ) )); }
public URLNormalizer upperCaseEscapeSequence() { if (url.contains("%")) { StringBuffer sb = new StringBuffer(); Matcher m = PATTERN_PERCENT_ENCODED_CHAR.matcher(url); while (m.find()) { m.appendReplacement(sb, m.group(1).toUpperCase()); } url = m.appendTail(sb).toString(); } return this; }
@Test public void testUpperCaseEscapeSequence() { s = "http://www.example.com/a%c2%b1b"; t = "http://www.example.com/a%C2%B1b"; assertEquals(t, n(s).upperCaseEscapeSequence().toString()); }
@Udf(description = "Converts the number of days since 1970-01-01 00:00:00 UTC/GMT to a date " + "string using the given format pattern. The format pattern should be in the format" + " expected by java.time.format.DateTimeFormatter") public String formatDate( @UdfParameter( description = "The date to convert") final Date date, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.time.format.DateTimeFormatter.") final String formatPattern) { if (date == null || formatPattern == null) { return null; } try { final DateTimeFormatter formatter = formatters.get(formatPattern); return LocalDate.ofEpochDay(TimeUnit.MILLISECONDS.toDays(date.getTime())).format(formatter); } catch (final ExecutionException | RuntimeException e) { throw new KsqlFunctionException("Failed to format date " + date + " with formatter '" + formatPattern + "': " + e.getMessage(), e); } }
@Test public void shouldSupportEmbeddedChars() { // When: final Object result = udf.formatDate(Date.valueOf("2014-11-09"), "yyyy-dd-MM'Fred'"); // Then: assertThat(result, is("2014-09-11Fred")); }
@SuppressFBWarnings("NP_BOOLEAN_RETURN_NULL") static Boolean isHotSpotCompressedOopsOrNull() { try { MBeanServer server = ManagementFactory.getPlatformMBeanServer(); ObjectName mbean = new ObjectName("com.sun.management:type=HotSpotDiagnostic"); Object[] objects = {"UseCompressedOops"}; String[] strings = {"java.lang.String"}; String operation = "getVMOption"; CompositeDataSupport compressedOopsValue = (CompositeDataSupport) server.invoke(mbean, operation, objects, strings); return Boolean.valueOf(compressedOopsValue.get("value").toString()); } catch (Exception e) { getLogger(JVMUtil.class).fine("Failed to read HotSpot specific configuration: " + e.getMessage()); } return null; }
@Test public void testIsHotSpotCompressedOopsOrNull() { JVMUtil.isHotSpotCompressedOopsOrNull(); }
@Override public <T> Exporter<T> export(Invoker<T> invoker) throws RpcException { return new InjvmExporter<>(invoker, invoker.getUrl().getServiceKey(), exporterMap); }
@Test void testLocalProtocolWithToken() { DemoService service = new DemoServiceImpl(); Invoker<?> invoker = proxy.getInvoker( service, DemoService.class, URL.valueOf("injvm://127.0.0.1/TestService?token=abc") .addParameter(INTERFACE_KEY, DemoService.class.getName()) .setScopeModel(ApplicationModel.defaultModel().getDefaultModule())); assertTrue(invoker.isAvailable()); Exporter<?> exporter = protocol.export(invoker); exporters.add(exporter); service = proxy.getProxy(protocol.refer( DemoService.class, URL.valueOf("injvm://127.0.0.1/TestService") .addParameter(INTERFACE_KEY, DemoService.class.getName()) .setScopeModel(ApplicationModel.defaultModel().getDefaultModule()))); assertEquals(service.getSize(new String[] {"", "", ""}), 3); }
@Override public int hashCode() { if (value == null) { return 31; } // Using recommended hashing algorithm from Effective Java for longs and doubles if (isIntegral(this)) { long value = getAsNumber().longValue(); return (int) (value ^ (value >>> 32)); } if (value instanceof Number) { long value = Double.doubleToLongBits(getAsNumber().doubleValue()); return (int) (value ^ (value >>> 32)); } return value.hashCode(); }
@Test public void testDoubleEqualsBigDecimal() { JsonPrimitive p1 = new JsonPrimitive(10.25D); JsonPrimitive p2 = new JsonPrimitive(new BigDecimal("10.25")); assertThat(p1).isEqualTo(p2); assertThat(p1.hashCode()).isEqualTo(p2.hashCode()); }
@Override public String convertToDatabaseColumn(Map<String, String> attribute) { return GSON.toJson(attribute); }
@Test void convertToDatabaseColumn_twoElement() throws IOException { Map<String, String> map = new LinkedHashMap<>(8); map.put("a", "1"); map.put("disableCheck", "true"); String expected = readAllContentOf("json/converter/element.2.json"); assertEquals(expected, this.converter.convertToDatabaseColumn(map)); }
static ProjectMeasuresQuery newProjectMeasuresQuery(List<Criterion> criteria, @Nullable Set<String> projectUuids) { ProjectMeasuresQuery query = new ProjectMeasuresQuery(); Optional.ofNullable(projectUuids).ifPresent(query::setProjectUuids); criteria.forEach(criterion -> processCriterion(criterion, query)); return query; }
@Test public void fail_when_no_value() { assertThatThrownBy(() -> { newProjectMeasuresQuery(singletonList(Criterion.builder().setKey("ncloc").setOperator(GT).setValue(null).build()), emptySet()); }) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Value cannot be null for 'ncloc'"); }