focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public FEELFnResult<List<Object>> invoke(@ParameterName("list") Object[] lists) { if ( lists == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "lists", "cannot be null")); } final Set<Object> resultSet = new LinkedHashSet<>(); for ( final Object list : lists ) { if ( list instanceof Collection ) { resultSet.addAll((Collection) list); } else { resultSet.add(list); } } // spec requires us to return a new list return FEELFnResult.ofResult( new ArrayList<>(resultSet) ); }
@Test void invokeListAndSingleObjectWithDuplicates() { FunctionTestUtil.assertResultList(unionFunction.invoke(new Object[]{5, Arrays.asList(10, 4, 5), 10}), Arrays.asList(5, 10, 4)); }
public boolean isValidatedPath(final String path) { return pathPattern.matcher(path).find(); }
@Test void assertIsValidatedPath() { assertTrue(nodePath.isValidatedPath("/metadata/foo_db/rules/foo/tables/foo_table")); }
public static <T> T toObj(byte[] json, Class<T> cls) { try { return mapper.readValue(json, cls); } catch (Exception e) { throw new NacosDeserializationException(cls, e); } }
@Test void testToObject14() { assertThrows(Exception.class, () -> { JacksonUtils.toObj((InputStream) null, Object.class.getGenericSuperclass()); }); }
void addEndpointHeaders(ComponentModel componentModel, UriEndpoint uriEndpoint, String scheme) { final Class<?> headersClass = uriEndpoint.headersClass(); if (headersClass == void.class) { getLog().debug(String.format("The endpoint %s has not defined any headers class", uriEndpoint.scheme())); return; } if (!addEndpointHeaders(componentModel, scheme, headersClass, uriEndpoint.headersNameProvider())) { getLog().debug(String.format("No headers have been detected in the headers class %s", headersClass.getName())); } }
@Test void testEndpointWithoutHeadersAreIgnored() { mojo.addEndpointHeaders(model, SomeEndpointWithoutHeaders.class.getAnnotation(UriEndpoint.class), "some"); assertEquals(0, model.getEndpointHeaders().size()); }
public static String extractPercentComplete(String line) { Matcher jar = JAR_COMPLETE.matcher(line); if (jar.find()) return jar.group().trim(); Matcher pig = PIG_COMPLETE.matcher(line); if (pig.find()) return pig.group().trim(); Matcher beeline = HIVE_BEELINE_COMPLETE.matcher(line); if (beeline.find()) { return beeline.group(1).trim() + " complete"; } Matcher hive = HIVE_COMPLETE.matcher(line); if(hive.find()) { return "map " + hive.group(1) + " reduce " + hive.group(2); } Matcher hiveTez = HIVE_TEZ_COMPLETE.matcher(line); if(hiveTez.find()) { int totalTasks = 0; int completedTasks = 0; do { //here each group looks something like "Map 2: 2/4" "Reducer 3: 1(+2)/4" //just parse the numbers and ignore one from "Map 2" and from "(+2)" if it's there Matcher counts = TEZ_COUNTERS.matcher(hiveTez.group()); List<String> items = new ArrayList<String>(4); while(counts.find()) { items.add(counts.group()); } completedTasks += Integer.parseInt(items.get(1)); if(items.size() == 3) { totalTasks += Integer.parseInt(items.get(2)); } else { totalTasks += Integer.parseInt(items.get(3)); } } while(hiveTez.find()); if(totalTasks == 0) { return "0% complete (0 total tasks)"; } return completedTasks * 100 / totalTasks + "% complete"; } Matcher pigTez = PIG_TEZ_COMPLETE.matcher(line); if(pigTez.find()) { int totalTasks = Integer.parseInt(pigTez.group(1)); int completedTasks = Integer.parseInt(pigTez.group(2)); if(totalTasks == 0) { return "0% complete (0 total tasks)"; } return completedTasks * 100 / totalTasks + "% complete"; } return null; }
@Test public void testExtractPercentComplete() { Assert.assertNull(TempletonUtils.extractPercentComplete("fred")); for (String line : CONTROLLER_LINES) { Assert.assertNull(TempletonUtils.extractPercentComplete(line)); } String fifty = "2011-12-15 18:12:36,333 [main] INFO org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 50% complete"; Assert.assertEquals("50% complete", TempletonUtils.extractPercentComplete(fifty)); String beeline = "VERTICES: 01/02 [==========================>>] 70% ELAPSED TIME: 3.79 s"; Assert.assertEquals("70% complete", TempletonUtils.extractPercentComplete(beeline)); }
public static String toFilename( final String appId, final String clusterName, final String namespace, final ConfigFileFormat configFileFormat ) { final String suffix; if (ConfigFileFormat.Properties.equals(configFileFormat)) { suffix = "." + ConfigFileFormat.Properties.getValue(); } else { suffix = ""; } return appId + "+" + clusterName + "+" + namespace + suffix; }
@Test public void toFilename() { final String propertiesFilename0 = ConfigFileUtils.toFilename("123", "default", "application", ConfigFileFormat.Properties); logger.info("propertiesFilename0 {}", propertiesFilename0); assertEquals("123+default+application.properties", propertiesFilename0); final String ymlFilename0 = ConfigFileUtils.toFilename("666", "none", "cc.yml", ConfigFileFormat.YML); logger.info("ymlFilename0 {}", ymlFilename0); assertEquals("666+none+cc.yml", ymlFilename0); }
@Override public String getUrl() { return url.originalArgument(); }
@Test void shouldBeAbleToConvertToJson() { Map<String, Object> json = new LinkedHashMap<>(); final GitMaterial git = new GitMaterial("http://0.0.0.0"); git.toJson(json, new StringRevision("123")); assertThatJson(json) .node("scmType").isEqualTo("Git") .node("location").isEqualTo(git.getUrl()) .node("action").isEqualTo("Modified"); }
@SuppressWarnings("unchecked") public static <T> boolean isAllNotNull(T... array) { return false == hasNull(array); }
@Test public void isAllNotNullTest() { String[] allNotNull = {"aa", "bb", "cc", "dd", "bb", "dd"}; assertTrue(ArrayUtil.isAllNotNull(allNotNull)); String[] hasNull = {"aa", "bb", "cc", null, "bb", "dd"}; assertFalse(ArrayUtil.isAllNotNull(hasNull)); }
@ScalarOperator(CAST) @SqlType(StandardTypes.TINYINT) public static long castToTinyint(@SqlType(StandardTypes.REAL) long value) { try { return SignedBytes.checkedCast(DoubleMath.roundToInt(intBitsToFloat((int) value), HALF_UP)); } catch (ArithmeticException | IllegalArgumentException e) { throw new PrestoException(INVALID_CAST_ARGUMENT, format("Unable to cast %s to tinyint", intBitsToFloat((int) value)), e); } }
@Test public void testCastToTinyint() { assertFunction("CAST(REAL'127.45' AS TINYINT)", TINYINT, (byte) 127); assertFunction("CAST(REAL'-128.234' AS TINYINT)", TINYINT, (byte) -128); assertFunction("CAST(REAL'9.99' AS TINYINT)", TINYINT, (byte) 10); assertFunction("CAST(REAL'-0.0' AS TINYINT)", TINYINT, (byte) 0); assertInvalidFunction("CAST(cast(nan() AS REAL) as TINYINT)", INVALID_CAST_ARGUMENT, "Unable to cast NaN to tinyint"); assertInvalidFunction("CAST(cast(infinity() AS REAL) as TINYINT)", INVALID_CAST_ARGUMENT, "Unable to cast Infinity to tinyint"); assertInvalidFunction("CAST(cast(-infinity() AS REAL) as TINYINT)", INVALID_CAST_ARGUMENT, "Unable to cast -Infinity to tinyint"); assertInvalidFunction("CAST(REAL '" + (Byte.MAX_VALUE + 0.6) + "' as TINYINT)", INVALID_CAST_ARGUMENT, "Unable to cast 127.6 to tinyint"); }
@Description("test if value is not-a-number") @ScalarFunction("is_nan") @SqlType(StandardTypes.BOOLEAN) public static boolean isNaN(@SqlType(StandardTypes.DOUBLE) double num) { return Double.isNaN(num); }
@Test public void testIsNaN() { assertFunction("is_nan(0.0E0 / 0.0E0)", BOOLEAN, true); assertFunction("is_nan(0.0E0 / 1.0E0)", BOOLEAN, false); assertFunction("is_nan(infinity() / infinity())", BOOLEAN, true); assertFunction("is_nan(nan())", BOOLEAN, true); assertFunction("is_nan(REAL '0.0' / REAL '0.0')", BOOLEAN, true); assertFunction("is_nan(REAL '0.0' / 1.0E0)", BOOLEAN, false); assertFunction("is_nan(infinity() / infinity())", BOOLEAN, true); assertFunction("is_nan(nan())", BOOLEAN, true); assertFunction("is_nan(NULL)", BOOLEAN, null); }
@Nonnull @SuppressWarnings("checkstyle:needbraces") public static <T> AggregateOperation1<T, PickAnyAccumulator<T>, T> pickAny() { return AggregateOperation .withCreate(PickAnyAccumulator<T>::new) .<T>andAccumulate(PickAnyAccumulator::accumulate) .andCombine(PickAnyAccumulator::combine) .andExportFinish(PickAnyAccumulator::get); }
@Test public void when_pickAny_noInput_then_nullResult() { // Given AggregateOperation1<Object, PickAnyAccumulator<Object>, Object> aggrOp = pickAny(); // When Object result = aggrOp.finishFn().apply(aggrOp.createFn().get()); // Then assertNull(result); }
public boolean checkIfEnabled() { try { this.gitCommand = locateDefaultGit(); MutableString stdOut = new MutableString(); this.processWrapperFactory.create(null, l -> stdOut.string = l, gitCommand, "--version").execute(); return stdOut.string != null && stdOut.string.startsWith("git version") && isCompatibleGitVersion(stdOut.string); } catch (Exception e) { LOG.debug("Failed to find git native client", e); return false; } }
@Test public void git_should_not_be_enabled_if_version_command_is_not_found() { ProcessWrapperFactory mockedCmd = mockGitVersionCommand("error: unknown option `version'"); NativeGitBlameCommand blameCommand = new NativeGitBlameCommand(System2.INSTANCE, mockedCmd); assertThat(blameCommand.checkIfEnabled()).isFalse(); }
@Override public <T> Future<T> submit(Callable<T> task) { submitted.mark(); return delegate.submit(new InstrumentedCallable<>(task)); }
@Test public void testSubmitCallable() throws Exception { assertThat(submitted.getCount()).isZero(); assertThat(running.getCount()).isZero(); assertThat(completed.getCount()).isZero(); assertThat(duration.getCount()).isZero(); assertThat(scheduledOnce.getCount()).isZero(); assertThat(scheduledRepetitively.getCount()).isZero(); assertThat(scheduledOverrun.getCount()).isZero(); assertThat(percentOfPeriod.getCount()).isZero(); final Object obj = new Object(); Future<Object> theFuture = instrumentedScheduledExecutor.submit(() -> { assertThat(submitted.getCount()).isEqualTo(1); assertThat(running.getCount()).isEqualTo(1); assertThat(completed.getCount()).isZero(); assertThat(duration.getCount()).isZero(); assertThat(scheduledOnce.getCount()).isZero(); assertThat(scheduledRepetitively.getCount()).isZero(); assertThat(scheduledOverrun.getCount()).isZero(); assertThat(percentOfPeriod.getCount()).isZero(); return obj; }); assertThat(theFuture.get()).isEqualTo(obj); assertThat(submitted.getCount()).isEqualTo(1); assertThat(running.getCount()).isZero(); assertThat(completed.getCount()).isEqualTo(1); assertThat(duration.getCount()).isEqualTo(1); assertThat(duration.getSnapshot().size()).isEqualTo(1); assertThat(scheduledOnce.getCount()).isZero(); assertThat(scheduledRepetitively.getCount()).isZero(); assertThat(scheduledOverrun.getCount()).isZero(); assertThat(percentOfPeriod.getCount()).isZero(); }
@Override public String execute(CommandContext commandContext, String[] args) { if (ArrayUtils.isEmpty(args)) { return "Please input method name, eg: \r\ninvoke xxxMethod(1234, \"abcd\", {\"prop\" : \"value\"})\r\n" + "invoke XxxService.xxxMethod(1234, \"abcd\", {\"prop\" : \"value\"})\r\n" + "invoke com.xxx.XxxService.xxxMethod(1234, \"abcd\", {\"prop\" : \"value\"})"; } Channel channel = commandContext.getRemote(); String service = channel.attr(ChangeTelnet.SERVICE_KEY) != null ? channel.attr(ChangeTelnet.SERVICE_KEY).get() : null; String message = args[0]; int i = message.indexOf("("); if (i < 0 || !message.endsWith(")")) { return "Invalid parameters, format: service.method(args)"; } String method = message.substring(0, i).trim(); String param = message.substring(i + 1, message.length() - 1).trim(); i = method.lastIndexOf("."); if (i >= 0) { service = method.substring(0, i).trim(); method = method.substring(i + 1).trim(); } if (StringUtils.isEmpty(service)) { return "If you want to invoke like [invoke sayHello(\"xxxx\")], please execute cd command first," + " or you can execute it like [invoke IHelloService.sayHello(\"xxxx\")]"; } List<Object> list; try { list = JsonUtils.toJavaList("[" + param + "]", Object.class); } catch (Throwable t) { return "Invalid json argument, cause: " + t.getMessage(); } StringBuilder buf = new StringBuilder(); Method invokeMethod = null; ProviderModel selectedProvider = null; if (isInvokedSelectCommand(channel)) { selectedProvider = channel.attr(INVOKE_METHOD_PROVIDER_KEY).get(); invokeMethod = channel.attr(SelectTelnet.SELECT_METHOD_KEY).get(); } else { for (ProviderModel provider : frameworkModel.getServiceRepository().allProviderModels()) { if (!isServiceMatch(service, provider)) { continue; } selectedProvider = provider; List<Method> methodList = findSameSignatureMethod(provider.getAllMethods(), method, list); if (CollectionUtils.isEmpty(methodList)) { break; } if (methodList.size() == 1) { invokeMethod = methodList.get(0); } else { List<Method> matchMethods = findMatchMethods(methodList, list); if (CollectionUtils.isEmpty(matchMethods)) { break; } if (matchMethods.size() == 1) { invokeMethod = matchMethods.get(0); } else { // exist overridden method channel.attr(INVOKE_METHOD_PROVIDER_KEY).set(provider); channel.attr(INVOKE_METHOD_LIST_KEY).set(matchMethods); channel.attr(INVOKE_MESSAGE_KEY).set(message); printSelectMessage(buf, matchMethods); return buf.toString(); } } break; } } if (!StringUtils.isEmpty(service)) { buf.append("Use default service ").append(service).append('.'); } if (selectedProvider == null) { buf.append("\r\nNo such service ").append(service); return buf.toString(); } if (invokeMethod == null) { buf.append("\r\nNo such method ") .append(method) .append(" in service ") .append(service); return buf.toString(); } try { Object[] array = realize(list.toArray(), invokeMethod.getParameterTypes(), invokeMethod.getGenericParameterTypes()); long start = System.currentTimeMillis(); AppResponse result = new AppResponse(); try { Object o = invokeMethod.invoke(selectedProvider.getServiceInstance(), array); boolean setValueDone = false; if (RpcContext.getServerAttachment().isAsyncStarted()) { AsyncContext asyncContext = RpcContext.getServerAttachment().getAsyncContext(); if (asyncContext instanceof AsyncContextImpl) { CompletableFuture<Object> internalFuture = ((AsyncContextImpl) asyncContext).getInternalFuture(); result.setValue(internalFuture.get()); setValueDone = true; } } if (!setValueDone) { result.setValue(o); } } catch (Throwable t) { result.setException(t); if (t instanceof InterruptedException) { Thread.currentThread().interrupt(); } } finally { RpcContext.removeContext(); } long end = System.currentTimeMillis(); buf.append("\r\nresult: "); buf.append(JsonUtils.toJson(result.recreate())); buf.append("\r\nelapsed: "); buf.append(end - start); buf.append(" ms."); } catch (Throwable t) { return "Failed to invoke method " + invokeMethod.getName() + ", cause: " + StringUtils.toString(t); } return buf.toString(); }
@Test void testInvokeWithoutServicePrefixAndWithoutDefaultService() throws RemotingException { registerProvider(DemoService.class.getName(), new DemoServiceImpl(), DemoService.class); String result = invoke.execute(mockCommandContext, new String[] {"echo(\"ok\")"}); assertTrue(result.contains( "If you want to invoke like [invoke sayHello(\"xxxx\")], please execute cd command first," + " or you can execute it like [invoke IHelloService.sayHello(\"xxxx\")]")); }
public MetadataReportBuilder password(String password) { this.password = password; return getThis(); }
@Test void password() { MetadataReportBuilder builder = new MetadataReportBuilder(); builder.password("password"); Assertions.assertEquals("password", builder.build().getPassword()); }
@Override public void check(final String databaseName, final ShardingRuleConfiguration ruleConfig, final Map<String, DataSource> dataSourceMap, final Collection<ShardingSphereRule> builtRules) { checkShardingAlgorithms(ruleConfig.getShardingAlgorithms().values()); checkKeyGeneratorAlgorithms(ruleConfig.getKeyGenerators().values()); Collection<String> keyGenerators = ruleConfig.getKeyGenerators().keySet(); Collection<String> auditors = ruleConfig.getAuditors().keySet(); Collection<String> shardingAlgorithms = ruleConfig.getShardingAlgorithms().keySet(); checkTables(databaseName, ruleConfig.getTables(), ruleConfig.getAutoTables(), keyGenerators, auditors, shardingAlgorithms); checkKeyGenerateStrategy(databaseName, ruleConfig.getDefaultKeyGenerateStrategy(), keyGenerators); checkAuditStrategy(databaseName, ruleConfig.getDefaultAuditStrategy(), auditors); checkShardingStrategy(databaseName, ruleConfig.getDefaultDatabaseShardingStrategy(), shardingAlgorithms); checkShardingStrategy(databaseName, ruleConfig.getDefaultTableShardingStrategy(), shardingAlgorithms); }
@SuppressWarnings("unchecked") @Test void assertCheckSuccess() { ShardingRuleConfiguration ruleConfig = createRuleConfiguration(); ShardingAuditStrategyConfiguration shardingAuditStrategyConfig = new ShardingAuditStrategyConfiguration(Collections.singletonList("foo_audit"), false); ShardingStrategyConfiguration shardingStrategyConfig = createShardingStrategyConfiguration(); ruleConfig.setTables(Collections.singleton(createShardingTableRuleConfiguration(shardingStrategyConfig, shardingAuditStrategyConfig, ruleConfig.getDefaultKeyGenerateStrategy()))); ruleConfig.setAutoTables(Collections.singleton(createShardingAutoTableRuleConfiguration(shardingStrategyConfig, shardingAuditStrategyConfig, ruleConfig.getDefaultKeyGenerateStrategy()))); RuleConfigurationChecker<ShardingRuleConfiguration> checker = OrderedSPILoader.getServicesByClass( RuleConfigurationChecker.class, Collections.singleton(ruleConfig.getClass())).get(ruleConfig.getClass()); checker.check("foo_db", ruleConfig, Collections.emptyMap(), Collections.emptyList()); }
static String getUnresolvedSchemaName(final Schema schema) { if (!isUnresolvedSchema(schema)) { throw new IllegalArgumentException("Not a unresolved schema: " + schema); } return schema.getProp(UR_SCHEMA_ATTR); }
@Test void isUnresolvedSchemaError1() { assertThrows(IllegalArgumentException.class, () -> { // No "org.apache.avro.compiler.idl.unresolved.name" property Schema s = SchemaBuilder.record("R").fields().endRecord(); SchemaResolver.getUnresolvedSchemaName(s); }); }
public static Date millisStrToDate(String millisDateStr) throws ParseException { return strToDate(millisDateStr, DATE_FORMAT_MILLS_TIME); }
@Test public void millisStrToDate() throws Exception { long d0 = 0l; long d1 = 1501127802975l; // 2017-07-27 11:56:42:975 +8 long d2 = 1501127835658l; // 2017-07-27 11:57:15:658 +8 TimeZone timeZone = TimeZone.getDefault(); Date date0 = new Date(d0 - timeZone.getOffset(d0)); Date date1 = new Date(d1 - timeZone.getOffset(d1)); Date date2 = new Date(d2 - timeZone.getOffset(d2)); String s0 = "1970-01-01 00:00:00.000"; String s1 = "2017-07-27 03:56:42.975"; String s2 = "2017-07-27 03:57:15.658"; Assert.assertEquals(DateUtils.millisStrToDate(s0).getTime(), date0.getTime()); Assert.assertEquals(DateUtils.millisStrToDate(s1).getTime(), date1.getTime()); Assert.assertEquals(DateUtils.millisStrToDate(s2).getTime(), date2.getTime()); }
@Override public String getCommandWithArguments() { List<String> argList = new ArrayList<>(); argList.add(super.getCommandWithArguments()); argList.add(image); if (overrideCommandWithArgs != null) { argList.addAll(overrideCommandWithArgs); } return StringUtils.join(argList, " "); }
@Test public void getCommandWithArguments() { assertEquals("run", dockerRunCommand.getCommandOption()); }
@Override public MapSettings setProperty(String key, String value) { return (MapSettings) super.setProperty(key, value); }
@Test public void setStringArray() { Settings settings = new MapSettings(definitions); settings.setProperty("multi_values", new String[]{"A", "B"}); String[] array = settings.getStringArray("multi_values"); assertThat(array).isEqualTo(new String[]{"A", "B"}); }
@Benchmark @Threads(16) // Use several threads since we expect contention during logging public void testLoggingWithAllOptionalParameters( ManyExpectedCallsLoggingClientAndService client, ManageExecutionState executionState) throws Exception { BeamFnLoggingMDC.setInstructionId("instruction id"); try (Closeable state = executionState.executionStateTracker.enterState(executionState.simpleExecutionState)) { LOG.warn("log me"); } BeamFnLoggingMDC.setInstructionId(null); }
@Test public void testLoggingWithAllOptionalParameters() throws Exception { ManyExpectedCallsLoggingClientAndService service = new ManyExpectedCallsLoggingClientAndService(); ManageExecutionState state = new ManageExecutionState(); new BeamFnLoggingClientBenchmark().testLoggingWithAllOptionalParameters(service, state); state.tearDown(); service.tearDown(); }
@Override public Clob getClob(final int columnIndex) throws SQLException { return (Clob) mergeResultSet.getValue(columnIndex, Clob.class); }
@Test void assertGetClobWithColumnIndex() throws SQLException { Clob clob = mock(Clob.class); when(mergeResultSet.getValue(1, Clob.class)).thenReturn(clob); assertThat(shardingSphereResultSet.getClob(1), is(clob)); }
public static StatementExecutorResponse execute( final ConfiguredStatement<CreateConnector> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { final CreateConnector createConnector = statement.getStatement(); final ConnectClient client = serviceContext.getConnectClient(); final Optional<KsqlEntity> connectorsResponse = handleIfNotExists( statement, createConnector, client); if (connectorsResponse.isPresent()) { return StatementExecutorResponse.handled(connectorsResponse); } final ConnectResponse<ConnectorInfo> response = client.create( createConnector.getName(), buildConnectorConfig(createConnector)); if (response.datum().isPresent()) { return StatementExecutorResponse.handled(Optional.of( new CreateConnectorEntity( statement.getMaskedStatementText(), response.datum().get() ) )); } if (response.error().isPresent()) { final String errorMsg = "Failed to create connector: " + response.error().get(); throw new KsqlRestException(EndpointResponse.create() .status(response.httpCode()) .entity(new KsqlErrorMessage(Errors.toErrorCode(response.httpCode()), errorMsg)) .build() ); } throw new IllegalStateException("Either response.datum() or response.error() must be present"); }
@Test public void shouldReturnConnectorInfoEntityOnSuccess() { // Given: givenCreationSuccess(); // When: final Optional<KsqlEntity> entity = ConnectExecutor .execute(CREATE_CONNECTOR_CONFIGURED, mock(SessionProperties.class), null, serviceContext).getEntity(); // Then: assertThat("Expected non-empty response", entity.isPresent()); assertThat(entity.get(), instanceOf(CreateConnectorEntity.class)); }
@Override public void validateDeptList(Collection<Long> ids) { if (CollUtil.isEmpty(ids)) { return; } // 获得科室信息 Map<Long, DeptDO> deptMap = getDeptMap(ids); // 校验 ids.forEach(id -> { DeptDO dept = deptMap.get(id); if (dept == null) { throw exception(DEPT_NOT_FOUND); } if (!CommonStatusEnum.ENABLE.getStatus().equals(dept.getStatus())) { throw exception(DEPT_NOT_ENABLE, dept.getName()); } }); }
@Test public void testValidateDeptList_success() { // mock 数据 DeptDO deptDO = randomPojo(DeptDO.class).setStatus(CommonStatusEnum.ENABLE.getStatus()); deptMapper.insert(deptDO); // 准备参数 List<Long> ids = singletonList(deptDO.getId()); // 调用,无需断言 deptService.validateDeptList(ids); }
public static String errorString(final long position) { if (MAX_POSITION_EXCEEDED <= position && position < 0) { final int errorCode = (int)position; switch (errorCode) { case (int)NOT_CONNECTED: return "NOT_CONNECTED"; case (int)BACK_PRESSURED: return "BACK_PRESSURED"; case (int)ADMIN_ACTION: return "ADMIN_ACTION"; case (int)CLOSED: return "CLOSED"; case (int)MAX_POSITION_EXCEEDED: return "MAX_POSITION_EXCEEDED"; default: return "UNKNOWN"; } } else if (0 <= position) { return "NONE"; } else { return "UNKNOWN"; } }
@Test void shouldReturnErrorMessages() { assertEquals("NOT_CONNECTED", errorString(-1L)); assertEquals("BACK_PRESSURED", errorString(-2L)); assertEquals("ADMIN_ACTION", errorString(-3L)); assertEquals("CLOSED", errorString(-4L)); assertEquals("MAX_POSITION_EXCEEDED", errorString(-5L)); assertEquals("NONE", errorString(0L)); assertEquals("NONE", errorString(1L)); assertEquals("UNKNOWN", errorString(-6L)); assertEquals("UNKNOWN", errorString(Long.MIN_VALUE)); }
static <T> RackAwareGraphConstructor<T> create(final AssignmentConfigs assignmentConfigs, final Map<Subtopology, Set<TaskId>> tasksForTopicGroup) { return create(assignmentConfigs.rackAwareAssignmentStrategy(), new ArrayList<>(new TreeMap<>(tasksForTopicGroup).values())); }
@Test public void shouldReturnMinCostConstructor() { final AssignmentConfigs config = new AssignorConfiguration( new StreamsConfig(configProps(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC)).originals()).assignmentConfigs(); final RackAwareGraphConstructor constructor = RackAwareGraphConstructorFactory.create(config, mkMap()); assertThat(constructor, instanceOf(MinTrafficGraphConstructor.class)); }
@GET @Path("{path:.*}") @Produces({MediaType.APPLICATION_OCTET_STREAM + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8}) public Response get(@PathParam("path") String path, @Context UriInfo uriInfo, @QueryParam(OperationParam.NAME) OperationParam op, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException { // Restrict access to only GETFILESTATUS and LISTSTATUS in write-only mode if((op.value() != HttpFSFileSystem.Operation.GETFILESTATUS) && (op.value() != HttpFSFileSystem.Operation.LISTSTATUS) && accessMode == AccessMode.WRITEONLY) { return Response.status(Response.Status.FORBIDDEN).build(); } UserGroupInformation user = HttpUserGroupInformation.get(); Response response; path = makeAbsolute(path); MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name()); MDC.put("hostname", request.getRemoteAddr()); switch (op.value()) { case OPEN: { Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { //Invoking the command directly using an unmanaged FileSystem that is // released by the FileSystemReleaseFilter final FSOperations.FSOpen command = new FSOperations.FSOpen(path); final FileSystem fs = createFileSystem(user); InputStream is = null; UserGroupInformation ugi = UserGroupInformation .createProxyUser(user.getShortUserName(), UserGroupInformation.getLoginUser()); try { is = ugi.doAs(new PrivilegedExceptionAction<InputStream>() { @Override public InputStream run() throws Exception { return command.execute(fs); } }); } catch (InterruptedException ie) { LOG.warn("Open interrupted.", ie); Thread.currentThread().interrupt(); } Long offset = params.get(OffsetParam.NAME, OffsetParam.class); Long len = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[] { path, offset, len }); InputStreamEntity entity = new InputStreamEntity(is, offset, len); response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM) .build(); } break; } case GETFILESTATUS: { FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS: { String filter = params.get(FilterParam.NAME, FilterParam.class); FSOperations.FSListStatus command = new FSOperations.FSListStatus(path, filter); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] filter [{}]", path, (filter != null) ? filter : "-"); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETHOMEDIRECTORY: { enforceRootPath(op.value(), path); FSOperations.FSHomeDir command = new FSOperations.FSHomeDir(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("Home Directory for [{}]", user); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case INSTRUMENTATION: { enforceRootPath(op.value(), path); Groups groups = HttpFSServerWebApp.get().get(Groups.class); Set<String> userGroups = groups.getGroupsSet(user.getShortUserName()); if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) { throw new AccessControlException( "User not in HttpFSServer admin group"); } Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class); Map snapshot = instrumentation.getSnapshot(); response = Response.ok(snapshot).build(); break; } case GETCONTENTSUMMARY: { FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Content summary for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETQUOTAUSAGE: { FSOperations.FSQuotaUsage command = new FSOperations.FSQuotaUsage(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Quota Usage for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILECHECKSUM: { FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path); Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); AUDIT_LOG.info("[{}]", path); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { Map json = fsExecute(user, command); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); } break; } case GETFILEBLOCKLOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocations command = new FSOperations.FSFileBlockLocations(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("BlockLocations", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETACLSTATUS: { FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("ACL status for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETXATTRS: { List<String> xattrNames = params.getValues(XAttrNameParam.NAME, XAttrNameParam.class); XAttrCodec encoding = params.get(XAttrEncodingParam.NAME, XAttrEncodingParam.class); FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, xattrNames, encoding); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttrs for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTXATTRS: { FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttr names for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS_BATCH: { String startAfter = params.get( HttpFSParametersProvider.StartAfterParam.NAME, HttpFSParametersProvider.StartAfterParam.class); byte[] token = HttpFSUtils.EMPTY_BYTES; if (startAfter != null) { token = startAfter.getBytes(StandardCharsets.UTF_8); } FSOperations.FSListStatusBatch command = new FSOperations .FSListStatusBatch(path, token); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] token [{}]", path, token); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOT: { FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETALLSTORAGEPOLICY: { FSOperations.FSGetAllStoragePolicies command = new FSOperations.FSGetAllStoragePolicies(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTORAGEPOLICY: { FSOperations.FSGetStoragePolicy command = new FSOperations.FSGetStoragePolicy(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFF: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); FSOperations.FSGetSnapshotDiff command = new FSOperations.FSGetSnapshotDiff(path, oldSnapshotName, snapshotName); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFFLISTING: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); String snapshotDiffStartPath = params .get(HttpFSParametersProvider.SnapshotDiffStartPathParam.NAME, HttpFSParametersProvider.SnapshotDiffStartPathParam.class); Integer snapshotDiffIndex = params.get(HttpFSParametersProvider.SnapshotDiffIndexParam.NAME, HttpFSParametersProvider.SnapshotDiffIndexParam.class); FSOperations.FSGetSnapshotDiffListing command = new FSOperations.FSGetSnapshotDiffListing(path, oldSnapshotName, snapshotName, snapshotDiffStartPath, snapshotDiffIndex); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTTABLEDIRECTORYLIST: { FSOperations.FSGetSnapshottableDirListing command = new FSOperations.FSGetSnapshottableDirListing(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTLIST: { FSOperations.FSGetSnapshotListing command = new FSOperations.FSGetSnapshotListing(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSERVERDEFAULTS: { FSOperations.FSGetServerDefaults command = new FSOperations.FSGetServerDefaults(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case CHECKACCESS: { String mode = params.get(FsActionParam.NAME, FsActionParam.class); FsActionParam fsparam = new FsActionParam(mode); FSOperations.FSAccess command = new FSOperations.FSAccess(path, FsAction.getFsAction(fsparam.value())); fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok().build(); break; } case GETECPOLICY: { FSOperations.FSGetErasureCodingPolicy command = new FSOperations.FSGetErasureCodingPolicy(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECPOLICIES: { FSOperations.FSGetErasureCodingPolicies command = new FSOperations.FSGetErasureCodingPolicies(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECCODECS: { FSOperations.FSGetErasureCodingCodecs command = new FSOperations.FSGetErasureCodingCodecs(); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GET_BLOCK_LOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocationsLegacy command = new FSOperations.FSFileBlockLocationsLegacy(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("LocatedBlocks", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILELINKSTATUS: { FSOperations.FSFileLinkStatus command = new FSOperations.FSFileLinkStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTATUS: { FSOperations.FSStatus command = new FSOperations.FSStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOTS: { Boolean allUsers = params.get(AllUsersParam.NAME, AllUsersParam.class); FSOperations.FSGetTrashRoots command = new FSOperations.FSGetTrashRoots(allUsers); Map json = fsExecute(user, command); AUDIT_LOG.info("allUsers [{}]", allUsers); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } default: { throw new IOException( MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value())); } } return response; }
@Test @TestDir @TestJetty @TestHdfs public void testCreateFileWithUnmaskedPermissions() throws Exception { createHttpFSServer(false, false); FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); // Create a folder with a default acl default:user2:rw- fs.mkdirs(new Path("/tmp")); AclEntry acl = new org.apache.hadoop.fs.permission.AclEntry.Builder() .setType(AclEntryType.USER) .setScope(AclEntryScope.DEFAULT) .setName("user2") .setPermission(FsAction.READ_WRITE) .build(); fs.setAcl(new Path("/tmp"), new ArrayList<AclEntry>(Arrays.asList(acl))); String notUnmaskedFile = "/tmp/notUnmasked"; String unmaskedFile = "/tmp/unmasked"; // Create a file inside the folder. It should inherit the default acl // but the mask should affect the ACL permissions. The mask is controlled // by the group permissions, which are 0, and hence the mask will make // the effective permission of the inherited ACL be NONE. createWithHttp(notUnmaskedFile, "700"); // Pull the relevant ACL from the FS object and check the mask has affected // its permissions. AclStatus aclStatus = fs.getAclStatus(new Path(notUnmaskedFile)); AclEntry theAcl = findAclWithName(aclStatus, "user2"); Assert.assertNotNull(theAcl); Assert.assertEquals(FsAction.NONE, aclStatus.getEffectivePermission(theAcl)); // Create another file, this time pass a mask of 777. Now the inherited // permissions should be as expected createWithHttp(unmaskedFile, "700", "777"); aclStatus = fs.getAclStatus(new Path(unmaskedFile)); theAcl = findAclWithName(aclStatus, "user2"); Assert.assertNotNull(theAcl); Assert.assertEquals(FsAction.READ_WRITE, aclStatus.getEffectivePermission(theAcl)); }
public static String initEndpoint(final NacosClientProperties properties) { if (properties == null) { return ""; } // Whether to enable domain name resolution rules String isUseEndpointRuleParsing = properties.getProperty(PropertyKeyConst.IS_USE_ENDPOINT_PARSING_RULE, properties.getProperty(SystemPropertyKeyConst.IS_USE_ENDPOINT_PARSING_RULE, String.valueOf(ParamUtil.USE_ENDPOINT_PARSING_RULE_DEFAULT_VALUE))); boolean isUseEndpointParsingRule = Boolean.parseBoolean(isUseEndpointRuleParsing); String endpointUrl; if (isUseEndpointParsingRule) { // Get the set domain name information endpointUrl = ParamUtil.parsingEndpointRule(properties.getProperty(PropertyKeyConst.ENDPOINT)); if (StringUtils.isBlank(endpointUrl)) { return ""; } } else { endpointUrl = properties.getProperty(PropertyKeyConst.ENDPOINT); } if (StringUtils.isBlank(endpointUrl)) { return ""; } String endpointPort = TemplateUtils .stringEmptyAndThenExecute(properties.getProperty(PropertyKeyConst.SystemEnv.ALIBABA_ALIWARE_ENDPOINT_PORT), () -> properties.getProperty(PropertyKeyConst.ENDPOINT_PORT)); endpointPort = TemplateUtils.stringEmptyAndThenExecute(endpointPort, () -> DEFAULT_END_POINT_PORT); return endpointUrl + ":" + endpointPort; }
@Test void testInitEndpointFromPropertiesWithoutCloudParsing() { System.setProperty(SystemPropertyKeyConst.IS_USE_ENDPOINT_PARSING_RULE, "false"); final NacosClientProperties properties = NacosClientProperties.PROTOTYPE.derive(); String endpoint = "1.1.1.1"; String endpointPort = "1234"; properties.setProperty(PropertyKeyConst.ENDPOINT, endpoint); properties.setProperty(PropertyKeyConst.ENDPOINT_PORT, endpointPort); String actual = InitUtils.initEndpoint(properties); assertEquals(endpoint + ":" + endpointPort, actual); }
public String writeCommandToTempFile(DockerCommand cmd, ContainerId containerId, Context nmContext) throws ContainerExecutionException { String filePrefix = containerId.toString(); ApplicationId appId = containerId.getApplicationAttemptId() .getApplicationId(); File dockerCommandFile; File cmdDir = null; if(nmContext == null || nmContext.getLocalDirsHandler() == null) { throw new ContainerExecutionException( "Unable to write temporary docker command"); } try { String cmdDirPath = nmContext.getLocalDirsHandler().getLocalPathForWrite( ResourceLocalizationService.NM_PRIVATE_DIR + Path.SEPARATOR + appId + Path.SEPARATOR + filePrefix + Path.SEPARATOR).toString(); cmdDir = new File(cmdDirPath); if (!cmdDir.mkdirs() && !cmdDir.exists()) { throw new IOException("Cannot create container private directory " + cmdDir); } dockerCommandFile = File.createTempFile(TMP_FILE_PREFIX + filePrefix, TMP_FILE_SUFFIX, cmdDir); try ( Writer writer = new OutputStreamWriter( new FileOutputStream(dockerCommandFile.toString()), StandardCharsets.UTF_8); PrintWriter printWriter = new PrintWriter(writer); ) { printWriter.println("[docker-command-execution]"); for (Map.Entry<String, List<String>> entry : cmd.getDockerCommandWithArguments().entrySet()) { if (entry.getKey().contains("=")) { throw new ContainerExecutionException( "'=' found in entry for docker command file, key = " + entry .getKey() + "; value = " + entry.getValue()); } String value = StringUtils.join(",", entry.getValue()); if (value.contains("\n")) { throw new ContainerExecutionException( "'\\n' found in entry for docker command file, key = " + entry .getKey() + "; value = " + value); } printWriter.println(" " + entry.getKey() + "=" + value); } if (cmd instanceof DockerRunCommand) { DockerRunCommand runCommand = (DockerRunCommand) cmd; if (runCommand.containsEnv()) { String path = writeEnvFile(runCommand, filePrefix, cmdDir); printWriter.println(" environ=" + path); } } return dockerCommandFile.toString(); } } catch (IOException e) { LOG.warn("Unable to write docker command to " + cmdDir); throw new ContainerExecutionException(e); } }
@Test public void testWriteCommandToTempFile() throws Exception { String absRoot = TEST_ROOT_DIR.getAbsolutePath(); ApplicationId appId = ApplicationId.newInstance(1, 1); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1); ContainerId cid = ContainerId.newContainerId(attemptId, 1); DockerCommand dockerCmd = new DockerInspectCommand(cid.toString()); Configuration conf = new Configuration(); conf.set("hadoop.tmp.dir", absRoot); conf.set(YarnConfiguration.NM_LOCAL_DIRS, absRoot); conf.set(YarnConfiguration.NM_LOG_DIRS, absRoot); LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService(); Context mockContext = mock(Context.class); doReturn(conf).when(mockContext).getConf(); doReturn(dirsHandler).when(mockContext).getLocalDirsHandler(); DockerClient dockerClient = new DockerClient(); dirsHandler.init(conf); dirsHandler.start(); String tmpPath = dockerClient.writeCommandToTempFile(dockerCmd, cid, mockContext); dirsHandler.stop(); File tmpFile = new File(tmpPath); assertTrue(tmpFile + " was not created", tmpFile.exists()); }
@Override public void report(SortedMap<MetricName, Gauge> gauges, SortedMap<MetricName, Counter> counters, SortedMap<MetricName, Histogram> histograms, SortedMap<MetricName, Meter> meters, SortedMap<MetricName, Timer> timers) { if (loggerProxy.isEnabled(marker)) { for (Entry<MetricName, Gauge> entry : gauges.entrySet()) { logGauge(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Counter> entry : counters.entrySet()) { logCounter(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Histogram> entry : histograms.entrySet()) { logHistogram(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Meter> entry : meters.entrySet()) { logMeter(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Timer> entry : timers.entrySet()) { logTimer(entry.getKey(), entry.getValue()); } } }
@Test public void reportsGaugeValuesAtError() throws Exception { when(logger.isErrorEnabled(marker)).thenReturn(true); errorReporter.report(map("gauge", gauge("value")), this.map(), this.map(), this.map(), this.map()); verify(logger).error(marker, "type={}, name={}, value={}", "GAUGE", "gauge", "value"); }
@Override public TreeEntryNode<K, V> putNode(K key, V value) { TreeEntryNode<K, V> target = nodes.get(key); if (ObjectUtil.isNotNull(target)) { final V oldVal = target.getValue(); target.setValue(value); return target.copy(oldVal); } target = new TreeEntryNode<>(null, key, value); nodes.put(key, target); return null; }
@Test public void putNodeTest() { final ForestMap<String, String> map = new LinkedForestMap<>(false); assertNull(map.get("a")); map.putNode("a", "aaa"); assertNotNull(map.get("a")); assertEquals("aaa", map.get("a").getValue()); map.putNode("a", "aaaa"); assertNotNull(map.get("a")); assertEquals("aaaa", map.get("a").getValue()); }
public static Expression convert(Predicate[] predicates) { Expression expression = Expressions.alwaysTrue(); for (Predicate predicate : predicates) { Expression converted = convert(predicate); Preconditions.checkArgument( converted != null, "Cannot convert Spark predicate to Iceberg expression: %s", predicate); expression = Expressions.and(expression, converted); } return expression; }
@Test public void testDateFilterConversion() { LocalDate localDate = LocalDate.parse("2018-10-18"); long epochDay = localDate.toEpochDay(); NamedReference namedReference = FieldReference.apply("x"); LiteralValue ts = new LiteralValue(epochDay, DataTypes.DateType); org.apache.spark.sql.connector.expressions.Expression[] attrAndValue = new org.apache.spark.sql.connector.expressions.Expression[] {namedReference, ts}; Predicate predicate = new Predicate(">", attrAndValue); Expression dateExpression = SparkV2Filters.convert(predicate); Expression rawExpression = Expressions.greaterThan("x", epochDay); Assert.assertEquals( "Generated date expression should be correct", rawExpression.toString(), dateExpression.toString()); }
static KiePMMLRow getKiePMMLRow(final Row row) { return new KiePMMLRow(getRowDataMap(row)); }
@Test void getKiePMMLRow() { Row toConvert = getRandomRow(); KiePMMLRow retrieved = KiePMMLRowInstanceFactory.getKiePMMLRow(toConvert); commonVerifyKiePMMLRow(retrieved, toConvert); toConvert = getRandomRowWithCells(); retrieved = KiePMMLRowInstanceFactory.getKiePMMLRow(toConvert); commonVerifyKiePMMLRowWithCells(retrieved, toConvert); }
@Override public List<ConfigAllInfo> findAllConfigInfo4Export(final String dataId, final String group, final String tenant, final String appName, final List<Long> ids) { String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant; ConfigInfoMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO); MapperContext context = new MapperContext(); if (!CollectionUtils.isEmpty(ids)) { context.putWhereParameter(FieldConstant.IDS, ids); } else { context.putWhereParameter(FieldConstant.TENANT_ID, tenantTmp); if (!StringUtils.isBlank(dataId)) { context.putWhereParameter(FieldConstant.DATA_ID, generateLikeArgument(dataId)); } if (StringUtils.isNotBlank(group)) { context.putWhereParameter(FieldConstant.GROUP_ID, group); } if (StringUtils.isNotBlank(appName)) { context.putWhereParameter(FieldConstant.APP_NAME, appName); } } MapperResult mapperResult = configInfoMapper.findAllConfigInfo4Export(context); try { return this.jt.query(mapperResult.getSql(), mapperResult.getParamList().toArray(), CONFIG_ALL_INFO_ROW_MAPPER); } catch (CannotGetJdbcConnectionException e) { LogUtil.FATAL_LOG.error("[db-error] " + e, e); throw e; } }
@Test void testFindAllConfigInfo4Export() { //mock select config state List<ConfigAllInfo> mockConfigs = new ArrayList<>(); mockConfigs.add(createMockConfigAllInfo(0)); mockConfigs.add(createMockConfigAllInfo(1)); mockConfigs.add(createMockConfigAllInfo(2)); String dataId = "dataId1324"; String group = "group23546"; String tenant = "tenant13245"; String appName = "appName1243"; List<Long> ids = Arrays.asList(132L, 1343L, 245L); when(jdbcTemplate.query(anyString(), eq(new Object[] {132L, 1343L, 245L}), eq(CONFIG_ALL_INFO_ROW_MAPPER))).thenReturn(mockConfigs); //execute return mock obj List<ConfigAllInfo> configAllInfosIds = externalConfigInfoPersistService.findAllConfigInfo4Export(dataId, group, tenant, appName, ids); //expect check assertEquals(mockConfigs, configAllInfosIds); when(jdbcTemplate.query(anyString(), eq(new Object[] {tenant, dataId, group, appName}), eq(CONFIG_ALL_INFO_ROW_MAPPER))).thenReturn( mockConfigs); //execute return mock obj List<ConfigAllInfo> configAllInfosWithDataId = externalConfigInfoPersistService.findAllConfigInfo4Export(dataId, group, tenant, appName, null); //expect check assertEquals(mockConfigs, configAllInfosWithDataId); //mock CannotGetJdbcConnectionException when(jdbcTemplate.query(anyString(), eq(new Object[] {132L, 1343L, 245L}), eq(CONFIG_ALL_INFO_ROW_MAPPER))).thenThrow( new CannotGetJdbcConnectionException("mock exp11")); //expect throw exception. try { externalConfigInfoPersistService.findAllConfigInfo4Export(dataId, group, tenant, appName, ids); assertFalse(true); } catch (Exception e) { assertTrue(e instanceof CannotGetJdbcConnectionException); assertTrue(e.getMessage().endsWith("mock exp11")); } }
public static Map<String, Object> convert(FormData data) { Map<String, Object> map = new HashMap<>(); for (String key : data) { if (data.get(key).size() == 1) { // If the form data is file, read it as FileItem, else read as String. if (data.getFirst(key).getFileName() == null) { String value = data.getFirst(key).getValue(); map.put(key, value); } else { FormData.FileItem value = data.getFirst(key).getFileItem(); map.put(key, value); } } else if (data.get(key).size() > 1) { List<Object> list = new ArrayList<>(); for (FormData.FormValue value : data.get(key)) { // If the form data is file, read it as FileItem, else read as String. if (value.getFileName() == null) { list.add(value.getValue()); } else { list.add(value.getFileItem()); } } map.put(key, list); } // ignore size == 0 } return map; }
@Test public void shouldToGetEmptyMapWhenFormDataIsEmpty() { FormData formData = new FormData(99); Map<String, Object> bodyMap = BodyConverter.convert(formData); Assert.assertEquals(0, bodyMap.size()); }
@Override public ByteBuf getBytes(int index, byte[] dst) { getBytes(index, dst, 0, dst.length); return this; }
@Test public void testGetBytesAfterRelease5() { assertThrows(IllegalReferenceCountException.class, new Executable() { @Override public void execute() { releasedBuffer().getBytes(0, new byte[8], 0, 1); } }); }
public static List<MetricsPublisher> getMetricsPublishers( List<JavaInformations> javaInformationsList) { assert javaInformationsList != null && !javaInformationsList.isEmpty(); final StringBuilder sb = new StringBuilder(); for (final JavaInformations javaInformations : javaInformationsList) { if (sb.length() != 0) { sb.append('_'); } sb.append(javaInformations.getHost().replaceFirst("@.*", "")); } String contextPath = Parameter.APPLICATION_NAME.getValue(); if (contextPath == null) { contextPath = javaInformationsList.get(0).getContextPath(); } if (contextPath == null) { // for NodesCollector in Jenkins, contextPath is null contextPath = "NA"; } else if (contextPath.isEmpty()) { // for CloudWatch, InfluxDB, Datadog, a tag/dimension is not supposed to be empty contextPath = "/"; } final String hosts = sb.toString(); return getMetricsPublishers(contextPath, hosts); }
@Test public void test() { final List<JavaInformations> javaInformationsList = new ArrayList<>(); javaInformationsList.add(new JavaInformations(null, false)); javaInformationsList.add(new JavaInformations(null, false)); assertEquals("getMetricsPublishers", 0, MetricsPublisher.getMetricsPublishers(javaInformationsList).size()); setProperty(Parameter.GRAPHITE_ADDRESS, "localhost:2003"); setProperty(Parameter.STATSD_ADDRESS, "localhost:8125"); setProperty(Parameter.CLOUDWATCH_NAMESPACE, "MyCompany/MyAppDomain"); System.setProperty("aws.region", "us-west-1"); setProperty(Parameter.INFLUXDB_URL, "http://localhost:8086/write?db=mydb"); setProperty(Parameter.DATADOG_API_KEY, "9775a026f1ca7d1c6c5af9d94d9595a4"); assertEquals("getMetricsPublishers", 5, MetricsPublisher.getMetricsPublishers(javaInformationsList).size()); }
public void go(PrintStream out) { KieServices ks = KieServices.Factory.get(); KieContainer kContainer = ks.getKieClasspathContainer(); KieSession kSession = kContainer.newKieSession(); kSession.setGlobal("out", out); kSession.insert(new Message("Dave", "Hello, HAL. Do you read me, HAL?")); kSession.fireAllRules(); }
@Test public void testGo() { ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintStream ps = new PrintStream(baos); new DefaultKieSessionExample().go(ps); ps.close(); String actual = baos.toString(); String expected = "" + "Dave: Hello, HAL. Do you read me, HAL?" + NL + "HAL: Dave. I read you." + NL; assertEquals(expected, actual); }
@Override public void trackTimerEnd(final String eventName, JSONObject properties) { }
@Test public void testTrackTimerEnd() { mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { Assert.fail(); return false; } }); mSensorsAPI.trackTimerStart("TestTimerEvent"); mSensorsAPI.trackTimerEnd("TestTimerEvent", new JSONObject()); }
public Optional<Measure> toMeasure(@Nullable LiveMeasureDto measureDto, Metric metric) { requireNonNull(metric); if (measureDto == null) { return Optional.empty(); } Double value = measureDto.getValue(); String data = measureDto.getDataAsString(); switch (metric.getType().getValueType()) { case INT: return toIntegerMeasure(value, data); case LONG: return toLongMeasure(value, data); case DOUBLE: return toDoubleMeasure(value, data); case BOOLEAN: return toBooleanMeasure(value, data); case STRING: return toStringMeasure(data); case LEVEL: return toLevelMeasure(data); case NO_VALUE: return toNoValueMeasure(); default: throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType()); } }
@Test public void toMeasure_throws_NPE_if_both_arguments_are_null() { assertThatThrownBy(() -> underTest.toMeasure(null, null)) .isInstanceOf(NullPointerException.class); }
@Override public ProviderInfo doSelect(SofaRequest request, List<ProviderInfo> providerInfos) { String interfaceId = request.getInterfaceName(); String method = request.getMethodName(); String key = interfaceId + "#" + method; int hashcode = providerInfos.hashCode(); // 判断是否同样的服务列表 Selector selector = selectorCache.get(key); if (selector == null // 原来没有 || selector.getHashCode() != hashcode) { // 或者服务列表已经变化 selector = new Selector(interfaceId, method, providerInfos, hashcode); selectorCache.put(key, selector); } return selector.select(request); }
@Test public void doSelect() throws Exception { ConsistentHashLoadBalancer loadBalancer = new ConsistentHashLoadBalancer(null); Map<Integer, Integer> cnt = new HashMap<Integer, Integer>(); int size = 20; int total = 100000; SofaRequest request = new SofaRequest(); request.setInterfaceName(ConsistentHashLoadBalancerTest.class.getName()); request.setMethod(ConsistentHashLoadBalancerTest.class.getMethod("doSelect")); for (int i = 0; i < size; i++) { cnt.put(9000 + i, 0); } List<ProviderInfo> providers = buildSameWeightProviderList(size); long start = System.currentTimeMillis(); for (int i = 0; i < total; i++) { ProviderInfo provider = loadBalancer.doSelect(request, providers); int port = provider.getPort(); cnt.put(port, cnt.get(port) + 1); } long end = System.currentTimeMillis(); LOGGER.info("elapsed" + (end - start) + "ms"); LOGGER.info("avg " + (end - start) * 1000 * 1000 / total + "ns"); int count = 0; for (int i = 0; i < size; i++) { if (cnt.get(9000 + i) > 0) { count++; } } Assert.assertTrue(count == 1);// 应该落在一台机器上 }
@Override public ProxyInvocationHandler parserInterfaceToProxy(Object target, String objectName) { // eliminate the bean without two phase annotation. Set<String> methodsToProxy = this.tccProxyTargetMethod(target); if (methodsToProxy.isEmpty()) { return null; } // register resource and enhance with interceptor DefaultResourceRegisterParser.get().registerResource(target, objectName); return new TccActionInterceptorHandler(target, methodsToProxy); }
@Test public void testNestTcc_should_commit() throws Exception { //given RootContext.unbind(); DefaultResourceManager.get(); DefaultResourceManager.mockResourceManager(BranchType.TCC, resourceManager); TransactionManagerHolder.set(transactionManager); TccActionImpl tccAction = new TccActionImpl(); TccAction tccActionProxy = ProxyUtil.createProxy(tccAction); Assertions.assertNotNull(tccActionProxy); NestTccActionImpl nestTccAction = new NestTccActionImpl(); nestTccAction.setTccAction(tccActionProxy); //when ProxyInvocationHandler proxyInvocationHandler = DefaultInterfaceParser.get().parserInterfaceToProxy(nestTccAction, nestTccAction.getClass().getName()); //then Assertions.assertNotNull(proxyInvocationHandler); //when NestTccAction nestTccActionProxy = ProxyUtil.createProxy(nestTccAction); //then Assertions.assertNotNull(nestTccActionProxy); // transaction commit test GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate(); try { tx.begin(60000, "testBiz"); boolean result = nestTccActionProxy.prepare(null, 2); Assertions.assertTrue(result); if (result) { tx.commit(); } else { tx.rollback(); } } catch (Exception exx) { tx.rollback(); throw exx; } Assertions.assertTrue(nestTccAction.isCommit()); Assertions.assertTrue(tccAction.isCommit()); }
public static CommonsConfigurationTimeLimiterConfiguration of(final Configuration configuration) throws ConfigParseException { CommonsConfigurationTimeLimiterConfiguration obj = new CommonsConfigurationTimeLimiterConfiguration(); try { obj.getConfigs().putAll(obj.getProperties(configuration.subset(TIME_LIMITER_CONFIGS_PREFIX))); obj.getInstances().putAll(obj.getProperties(configuration.subset(TIME_LIMITER_INSTANCES_PREFIX))); return obj; } catch (Exception ex) { throw new ConfigParseException("Error creating timelimiter configuration", ex); } }
@Test public void testFromYamlFile() throws ConfigurationException { Configuration config = CommonsConfigurationUtil.getConfiguration(YAMLConfiguration.class, TestConstants.RESILIENCE_CONFIG_YAML_FILE_NAME); CommonsConfigurationTimeLimiterConfiguration timeLimiterConfiguration = CommonsConfigurationTimeLimiterConfiguration.of(config); assertConfigs(timeLimiterConfiguration.getConfigs()); assertInstances(timeLimiterConfiguration.getInstances()); }
public static Write write() { return new AutoValue_MongoDbIO_Write.Builder() .setMaxConnectionIdleTime(60000) .setBatchSize(1024L) .setSslEnabled(false) .setIgnoreSSLCertificate(false) .setSslInvalidHostNameAllowed(false) .setOrdered(true) .build(); }
@Test public void testWriteUnordered() { final String collectionName = "testWriteUnordered"; Document doc = Document.parse("{\"_id\":\"521df3a4300466f1f2b5ae82\",\"scientist\":\"Test %s\"}"); pipeline .apply(Create.of(doc, doc)) .apply( MongoDbIO.write() .withUri("mongodb://localhost:" + port) .withDatabase(DATABASE_NAME) .withOrdered(false) .withCollection(collectionName)); pipeline.run(); assertEquals(1, countElements(collectionName)); }
public static void checkHttpTimeoutProperty() throws NumberFormatException { checkNumericSystemProperty(HTTP_TIMEOUT, Range.atLeast(0)); }
@Test public void testCheckHttpTimeoutProperty_okWhenUndefined() throws NumberFormatException { System.clearProperty(JibSystemProperties.HTTP_TIMEOUT); JibSystemProperties.checkHttpTimeoutProperty(); }
public static UserAgent parse(String userAgentString) { return UserAgentParser.parse(userAgentString); }
@Test public void parseEdgeOnLumia950XLTest() { final String uaStr = "Mozilla/5.0 (Windows Phone 10.0; Android 6.0.1; Microsoft; Lumia 950XL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Mobile Safari/537.36 Edge/15.14900"; final UserAgent ua = UserAgentUtil.parse(uaStr); assertEquals("MSEdge", ua.getBrowser().toString()); assertEquals("15.14900", ua.getVersion()); assertEquals("Webkit", ua.getEngine().toString()); assertEquals("537.36", ua.getEngineVersion()); assertEquals("Windows Phone", ua.getOs().toString()); assertEquals("10.0", ua.getOsVersion()); assertEquals("Windows Phone", ua.getPlatform().toString()); assertTrue(ua.isMobile()); }
public static List<RemoteNameNodeInfo> getRemoteNameNodes(Configuration conf) throws IOException { String nsId = DFSUtil.getNamenodeNameServiceId(conf); return getRemoteNameNodes(conf, nsId); }
@Test public void testParseMultipleNameNodes() throws Exception { // start with an empty configuration Configuration conf = new Configuration(false); // add in keys for each of the NNs String nameservice = "ns1"; MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf(nameservice) .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001)) .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002)) .addNN(new MiniDFSNNTopology.NNConf("nn3").setIpcPort(10003))); // add the configurations of the NNs to the passed conf, so we can parse it back out MiniDFSCluster.configureNameNodes(topology, false, conf); // set the 'local' one as nn1 conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1"); List<RemoteNameNodeInfo> nns = RemoteNameNodeInfo.getRemoteNameNodes(conf); // make sure it matches when we pass in the nameservice List<RemoteNameNodeInfo> nns2 = RemoteNameNodeInfo.getRemoteNameNodes(conf, nameservice); assertEquals(nns, nns2); }
private ListenableFuture<TbAlarmResult> clearAlarm(TbContext ctx, TbMsg msg, Alarm alarm) { ctx.logJsEvalRequest(); ListenableFuture<JsonNode> asyncDetails = buildAlarmDetails(msg, alarm.getDetails()); return Futures.transform(asyncDetails, details -> { ctx.logJsEvalResponse(); AlarmApiCallResult result = ctx.getAlarmService().clearAlarm(ctx.getTenantId(), alarm.getId(), System.currentTimeMillis(), details); if (result.isSuccessful()) { return new TbAlarmResult(false, false, result.isCleared(), result.getAlarm()); } else { return new TbAlarmResult(false, false, false, alarm); } }, ctx.getDbCallbackExecutor()); }
@Test void alarmCanBeClearedWithAlarmOriginator() { initWithClearAlarmScript(); metadata.putValue("key", "value"); TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, alarmOriginator, metadata, "{\"temperature\": 50}"); long oldEndDate = System.currentTimeMillis(); AlarmId id = new AlarmId(alarmOriginator.getId()); Alarm activeAlarm = Alarm.builder().type("SomeType").tenantId(tenantId).originator(msgOriginator).severity(AlarmSeverity.WARNING).endTs(oldEndDate).build(); activeAlarm.setId(id); Alarm expectedAlarm = Alarm.builder() .tenantId(tenantId) .originator(msgOriginator) .cleared(true) .severity(AlarmSeverity.WARNING) .propagate(false) .type("SomeType") .details(null) .endTs(oldEndDate) .build(); expectedAlarm.setId(id); when(alarmDetailsScriptMock.executeJsonAsync(msg)).thenReturn(Futures.immediateFuture(null)); when(alarmServiceMock.findAlarmById(tenantId, id)).thenReturn(activeAlarm); when(alarmServiceMock.clearAlarm(eq(activeAlarm.getTenantId()), eq(activeAlarm.getId()), anyLong(), nullable(JsonNode.class))) .thenReturn(AlarmApiCallResult.builder() .successful(true) .cleared(true) .alarm(new AlarmInfo(expectedAlarm)) .build()); node.onMsg(ctxMock, msg); verify(ctxMock).enqueue(any(), successCaptor.capture(), failureCaptor.capture()); successCaptor.getValue().run(); verify(ctxMock).tellNext(any(), eq("Cleared")); ArgumentCaptor<TbMsg> msgCaptor = ArgumentCaptor.forClass(TbMsg.class); ArgumentCaptor<TbMsgType> typeCaptor = ArgumentCaptor.forClass(TbMsgType.class); ArgumentCaptor<EntityId> originatorCaptor = ArgumentCaptor.forClass(EntityId.class); ArgumentCaptor<TbMsgMetaData> metadataCaptor = ArgumentCaptor.forClass(TbMsgMetaData.class); ArgumentCaptor<String> dataCaptor = ArgumentCaptor.forClass(String.class); verify(ctxMock).transformMsg(msgCaptor.capture(), typeCaptor.capture(), originatorCaptor.capture(), metadataCaptor.capture(), dataCaptor.capture()); assertThat(TbMsgType.ALARM).isEqualTo(typeCaptor.getValue()); assertThat(alarmOriginator).isEqualTo(originatorCaptor.getValue()); assertThat("value").isEqualTo(metadataCaptor.getValue().getValue("key")); assertThat(Boolean.TRUE.toString()).isEqualTo(metadataCaptor.getValue().getValue(DataConstants.IS_CLEARED_ALARM)); assertThat(metadata).isNotSameAs(metadataCaptor.getValue()); Alarm actualAlarm = JacksonUtil.fromBytes(dataCaptor.getValue().getBytes(), Alarm.class); assertThat(actualAlarm).isEqualTo(expectedAlarm); }
@SuppressWarnings({ "FutureReturnValueIgnored", "nullness" // TODO(https://github.com/apache/beam/issues/21068) }) // Update data in persistent store public void asyncClose() throws Exception { checkState( !isClosed, "Multimap user state is no longer usable because it is closed for %s", keysStateRequest.getStateKey()); isClosed = true; // No mutations necessary if (!isCleared && pendingRemoves.isEmpty() && pendingAdds.isEmpty()) { return; } startStateApiWrites(); updateCache(); }
@Test public void testAsyncClose() throws Exception { FakeBeamFnStateClient fakeClient = new FakeBeamFnStateClient( ImmutableMap.of( createMultimapKeyStateKey(), KV.of(ByteArrayCoder.of(), asList(A0, A1)), createMultimapValueStateKey(A0), KV.of(StringUtf8Coder.of(), asList("V1")), createMultimapValueStateKey(A1), KV.of(StringUtf8Coder.of(), asList("V1", "V2")))); MultimapUserState<byte[], String> userState = new MultimapUserState<>( Caches.noop(), fakeClient, "instructionId", createMultimapKeyStateKey(), ByteArrayCoder.of(), StringUtf8Coder.of()); userState.remove(A0); userState.put(A1, "V3"); userState.put(A2, "V1"); userState.put(A3, "V1"); userState.remove(A3); userState.asyncClose(); Map<StateKey, ByteString> data = fakeClient.getData(); assertNull(data.get(createMultimapValueStateKey(A0))); assertEquals(encode("V1", "V2", "V3"), data.get(createMultimapValueStateKey(A1))); assertEquals(encode("V1"), data.get(createMultimapValueStateKey(A2))); }
public SqlType getExpressionSqlType(final Expression expression) { return getExpressionSqlType(expression, Collections.emptyMap()); }
@Test public void shouldFailIfWhenIsNotBoolean() { // Given: final Expression expression = new SearchedCaseExpression( ImmutableList.of( new WhenClause( new ArithmeticBinaryExpression(Operator.ADD, TestExpressions.COL0, new IntegerLiteral(10) ), new StringLiteral("foo") ) ), Optional.empty() ); // When: final Exception e = assertThrows( KsqlException.class, () -> expressionTypeManager.getExpressionSqlType(expression) ); // Then: assertThat(e.getMessage(), containsString( "WHEN operand type should be boolean." + System.lineSeparator() + "Type for '(COL0 + 10)' is BIGINT" )); }
@Override public void executeUpdate(final SetDistVariableStatement sqlStatement, final ContextManager contextManager) throws SQLException { ShardingSpherePreconditions.checkState(getEnumType(sqlStatement.getName()) instanceof TypedPropertyKey, () -> new UnsupportedVariableException(sqlStatement.getName())); handleConfigurationProperty(contextManager, (TypedPropertyKey) getEnumType(sqlStatement.getName()), sqlStatement.getValue()); }
@Test void assertExecuteWithWrongSystemLogLevel() { ContextManager contextManager = mockContextManager(); SetDistVariableStatement statement = new SetDistVariableStatement("system_log_level", "invalid"); assertThrows(InvalidVariableValueException.class, () -> executor.executeUpdate(statement, contextManager)); }
@Override public <U> ParSeqBasedCompletionStage<U> thenApply(Function<? super T, ? extends U> fn) { return nextStageByComposingTask(_task.map("thenApply", fn::apply)); }
@Test public void testThenApply() throws Exception { CompletionStage<String> stage2 = createTestStage(TESTVALUE1).thenApply(v -> TESTVALUE2); Assert.assertEquals(TESTVALUE2, stage2.toCompletableFuture().get()); }
public ImmutableSet<String> loadAllMessageStreams(final StreamPermissions streamPermissions) { return allStreamsProvider.get() // Unless explicitly queried, exclude event and failure indices by default // Having these indices in every search, makes sorting almost impossible // because it triggers https://github.com/Graylog2/graylog2-server/issues/6378 // TODO: this filter could be removed, once we implement https://github.com/Graylog2/graylog2-server/issues/6490 .filter(id -> !NON_MESSAGE_STREAM_IDS.contains(id)) .filter(streamPermissions::canReadStream) .collect(ImmutableSet.toImmutableSet()); }
@Test public void filtersOutNonPermittedStreams() { final PermittedStreams sut = new PermittedStreams(() -> java.util.stream.Stream.of("oans", "zwoa", "gsuffa")); ImmutableSet<String> result = sut.loadAllMessageStreams(id -> id.equals("gsuffa")); assertThat(result).containsExactly("gsuffa"); }
@Override public void configure(final Map<String, ?> configs, final boolean isKey) { inner.configure(configs, isKey); }
@Test public void shouldConfigureInnerSerializerOnConfigure() { // When: serializer.configure(SOME_CONFIG, true); // Then: verify(innerSerializer).configure(SOME_CONFIG, true); }
@Override public DataflowPipelineJob run(Pipeline pipeline) { // Multi-language pipelines and pipelines that include upgrades should automatically be upgraded // to Runner v2. if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_runner_v2")) { LOG.info( "Automatically enabling Dataflow Runner v2 since the pipeline used cross-language" + " transforms or pipeline needed a transform upgrade."); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build()); } } if (useUnifiedWorker(options)) { if (hasExperiment(options, "disable_runner_v2") || hasExperiment(options, "disable_runner_v2_until_2023") || hasExperiment(options, "disable_prime_runner_v2")) { throw new IllegalArgumentException( "Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set."); } List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("use_runner_v2")) { experiments.add("use_runner_v2"); } if (!experiments.contains("use_unified_worker")) { experiments.add("use_unified_worker"); } if (!experiments.contains("beam_fn_api")) { experiments.add("beam_fn_api"); } if (!experiments.contains("use_portable_job_submission")) { experiments.add("use_portable_job_submission"); } options.setExperiments(ImmutableList.copyOf(experiments)); } logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline); logWarningIfBigqueryDLQUnused(pipeline); if (shouldActAsStreaming(pipeline)) { options.setStreaming(true); if (useUnifiedWorker(options)) { options.setEnableStreamingEngine(true); List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("enable_streaming_engine")) { experiments.add("enable_streaming_engine"); } if (!experiments.contains("enable_windmill_service")) { experiments.add("enable_windmill_service"); } } } if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) { ProjectionPushdownOptimizer.optimize(pipeline); } LOG.info( "Executing pipeline on the Dataflow Service, which will have billing implications " + "related to Google Compute Engine usage and other Google Cloud Services."); DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class); String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions); // This incorrectly puns the worker harness container image (which implements v1beta3 API) // with the SDK harness image (which implements Fn API). // // The same Environment is used in different and contradictory ways, depending on whether // it is a v1 or v2 job submission. RunnerApi.Environment defaultEnvironmentForDataflow = Environments.createDockerEnvironment(workerHarnessContainerImageURL); // The SdkComponents for portable an non-portable job submission must be kept distinct. Both // need the default environment. SdkComponents portableComponents = SdkComponents.create(); portableComponents.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); RunnerApi.Pipeline portablePipelineProto = PipelineTranslation.toProto(pipeline, portableComponents, false); // Note that `stageArtifacts` has to be called before `resolveArtifact` because // `resolveArtifact` updates local paths to staged paths in pipeline proto. portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto); List<DataflowPackage> packages = stageArtifacts(portablePipelineProto); portablePipelineProto = resolveArtifacts(portablePipelineProto); portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options); if (LOG.isDebugEnabled()) { LOG.debug( "Portable pipeline proto:\n{}", TextFormat.printer().printToString(portablePipelineProto)); } // Stage the portable pipeline proto, retrieving the staged pipeline path, then update // the options on the new job // TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation()); byte[] serializedProtoPipeline = portablePipelineProto.toByteArray(); DataflowPackage stagedPipeline = options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME); dataflowOptions.setPipelineUrl(stagedPipeline.getLocation()); if (useUnifiedWorker(options)) { LOG.info("Skipping v1 transform replacements since job will run on v2."); } else { // Now rewrite things to be as needed for v1 (mutates the pipeline) // This way the job submitted is valid for v1 and v2, simultaneously replaceV1Transforms(pipeline); } // Capture the SdkComponents for look up during step translations SdkComponents dataflowV1Components = SdkComponents.create(); dataflowV1Components.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); // No need to perform transform upgrading for the Runner v1 proto. RunnerApi.Pipeline dataflowV1PipelineProto = PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false); if (LOG.isDebugEnabled()) { LOG.debug( "Dataflow v1 pipeline proto:\n{}", TextFormat.printer().printToString(dataflowV1PipelineProto)); } // Set a unique client_request_id in the CreateJob request. // This is used to ensure idempotence of job creation across retried // attempts to create a job. Specifically, if the service returns a job with // a different client_request_id, it means the returned one is a different // job previously created with the same job name, and that the job creation // has been effectively rejected. The SDK should return // Error::Already_Exists to user in that case. int randomNum = new Random().nextInt(9000) + 1000; String requestId = DateTimeFormat.forPattern("YYYYMMddHHmmssmmm") .withZone(DateTimeZone.UTC) .print(DateTimeUtils.currentTimeMillis()) + "_" + randomNum; JobSpecification jobSpecification = translator.translate( pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages); if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_staged_dataflow_worker_jar")) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("use_staged_dataflow_worker_jar") .build()); } } Job newJob = jobSpecification.getJob(); try { newJob .getEnvironment() .setSdkPipelineOptions( MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class)); } catch (IOException e) { throw new IllegalArgumentException( "PipelineOptions specified failed to serialize to JSON.", e); } newJob.setClientRequestId(requestId); DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo(); String version = dataflowRunnerInfo.getVersion(); checkState( !"${pom.version}".equals(version), "Unable to submit a job to the Dataflow service with unset version ${pom.version}"); LOG.info("Dataflow SDK version: {}", version); newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties()); // The Dataflow Service may write to the temporary directory directly, so // must be verified. if (!isNullOrEmpty(options.getGcpTempLocation())) { newJob .getEnvironment() .setTempStoragePrefix( dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation())); } newJob.getEnvironment().setDataset(options.getTempDatasetId()); if (options.getWorkerRegion() != null) { newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion()); } if (options.getWorkerZone() != null) { newJob.getEnvironment().setWorkerZone(options.getWorkerZone()); } if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED"); } else if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED"); } // Represent the minCpuPlatform pipeline option as an experiment, if not already present. if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); List<String> minCpuFlags = experiments.stream() .filter(p -> p.startsWith("min_cpu_platform")) .collect(Collectors.toList()); if (minCpuFlags.isEmpty()) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform()) .build()); } else { LOG.warn( "Flag min_cpu_platform is defined in both top level PipelineOption, " + "as well as under experiments. Proceed using {}.", minCpuFlags.get(0)); } } newJob .getEnvironment() .setExperiments( ImmutableList.copyOf( firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()))); // Set the Docker container image that executes Dataflow worker harness, residing in Google // Container Registry. Translator is guaranteed to create a worker pool prior to this point. // For runner_v1, only worker_harness_container is set. // For runner_v2, both worker_harness_container and sdk_harness_container are set to the same // value. String containerImage = getContainerImageForJob(options); for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) { workerPool.setWorkerHarnessContainerImage(containerImage); } configureSdkHarnessContainerImages(options, portablePipelineProto, newJob); newJob.getEnvironment().setVersion(getEnvironmentVersion(options)); if (hooks != null) { hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment()); } // enable upload_graph when the graph is too large byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8); int jobGraphByteSize = jobGraphBytes.length; if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES && !hasExperiment(options, "upload_graph") && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build()); LOG.info( "The job graph size ({} in bytes) is larger than {}. Automatically add " + "the upload_graph option to experiments.", jobGraphByteSize, CREATE_JOB_REQUEST_LIMIT_BYTES); } if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) { ArrayList<String> experiments = new ArrayList<>(options.getExperiments()); while (experiments.remove("upload_graph")) {} options.setExperiments(experiments); LOG.warn( "The upload_graph experiment was specified, but it does not apply " + "to runner v2 jobs. Option has been automatically removed."); } // Upload the job to GCS and remove the graph object from the API call. The graph // will be downloaded from GCS by the service. if (hasExperiment(options, "upload_graph")) { DataflowPackage stagedGraph = options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME); newJob.getSteps().clear(); newJob.setStepsLocation(stagedGraph.getLocation()); } if (!isNullOrEmpty(options.getDataflowJobFile()) || !isNullOrEmpty(options.getTemplateLocation())) { boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation()); if (isTemplate) { checkArgument( isNullOrEmpty(options.getDataflowJobFile()), "--dataflowJobFile and --templateLocation are mutually exclusive."); } String fileLocation = firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile()); checkArgument( fileLocation.startsWith("/") || fileLocation.startsWith("gs://"), "Location must be local or on Cloud Storage, got %s.", fileLocation); ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */); String workSpecJson = DataflowPipelineTranslator.jobToString(newJob); try (PrintWriter printWriter = new PrintWriter( new BufferedWriter( new OutputStreamWriter( Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)), UTF_8)))) { printWriter.print(workSpecJson); LOG.info("Printed job specification to {}", fileLocation); } catch (IOException ex) { String error = String.format("Cannot create output file at %s", fileLocation); if (isTemplate) { throw new RuntimeException(error, ex); } else { LOG.warn(error, ex); } } if (isTemplate) { LOG.info("Template successfully created."); return new DataflowTemplateJob(); } } String jobIdToUpdate = null; if (options.isUpdate()) { jobIdToUpdate = getJobIdFromName(options.getJobName()); newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setReplaceJobId(jobIdToUpdate); } if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) { newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot()); } Job jobResult; try { jobResult = dataflowClient.createJob(newJob); } catch (GoogleJsonResponseException e) { String errorMessages = "Unexpected errors"; if (e.getDetails() != null) { if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) { errorMessages = "The size of the serialized JSON representation of the pipeline " + "exceeds the allowable limit. " + "For more information, please see the documentation on job submission:\n" + "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs"; } else { errorMessages = e.getDetails().getMessage(); } } throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e); } catch (IOException e) { throw new RuntimeException("Failed to create a workflow job", e); } // Use a raw client for post-launch monitoring, as status calls may fail // regularly and need not be retried automatically. DataflowPipelineJob dataflowPipelineJob = new DataflowPipelineJob( DataflowClient.create(options), jobResult.getId(), options, jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(), portablePipelineProto); // If the service returned client request id, the SDK needs to compare it // with the original id generated in the request, if they are not the same // (i.e., the returned job is not created by this request), throw // DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException // depending on whether this is a reload or not. if (jobResult.getClientRequestId() != null && !jobResult.getClientRequestId().isEmpty() && !jobResult.getClientRequestId().equals(requestId)) { // If updating a job. if (options.isUpdate()) { throw new DataflowJobAlreadyUpdatedException( dataflowPipelineJob, String.format( "The job named %s with id: %s has already been updated into job id: %s " + "and cannot be updated again.", newJob.getName(), jobIdToUpdate, jobResult.getId())); } else { throw new DataflowJobAlreadyExistsException( dataflowPipelineJob, String.format( "There is already an active job named %s with id: %s. If you want to submit a" + " second job, try again by setting a different name using --jobName.", newJob.getName(), jobResult.getId())); } } LOG.info( "To access the Dataflow monitoring console, please navigate to {}", MonitoringUtil.getJobMonitoringPageURL( options.getProject(), options.getRegion(), jobResult.getId())); LOG.info("Submitted job: {}", jobResult.getId()); LOG.info( "To cancel the job using the 'gcloud' tool, run:\n> {}", MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId())); return dataflowPipelineJob; }
@Test public void testStreamingOnSuccessMatcherWhenPipelineSucceeds() throws Exception { options.setStreaming(true); Pipeline p = TestPipeline.create(options); PCollection<Integer> pc = p.apply(Create.of(1, 2, 3)); PAssert.that(pc).containsInAnyOrder(1, 2, 3); final DataflowPipelineJob mockJob = Mockito.mock(DataflowPipelineJob.class); when(mockJob.getState()).thenReturn(State.DONE); when(mockJob.getProjectId()).thenReturn("test-project"); when(mockJob.getJobId()).thenReturn("test-job"); DataflowRunner mockRunner = Mockito.mock(DataflowRunner.class); when(mockRunner.run(any(Pipeline.class))).thenReturn(mockJob); TestDataflowRunner runner = TestDataflowRunner.fromOptionsAndClient(options, mockClient); options.as(TestPipelineOptions.class).setOnSuccessMatcher(new TestSuccessMatcher(mockJob, 1)); when(mockJob.waitUntilFinish(any(Duration.class), any(JobMessagesHandler.class))) .thenReturn(State.DONE); when(mockClient.getJobMetrics(anyString())) .thenReturn(generateMockMetricResponse(true /* success */, true /* tentative */)); runner.run(p, mockRunner); }
@Override public XAConnection wrap(final XADataSource xaDataSource, final Connection connection) throws SQLException { return createXAConnection(connection.unwrap(jdbcConnectionClass)); }
@Test void assertWrap() throws SQLException { XAConnection actual = DatabaseTypedSPILoader.getService(XAConnectionWrapper.class, databaseType).wrap(createXADataSource(), mockConnection()); assertThat(actual.getXAResource(), instanceOf(PGXAConnection.class)); }
public static String md5Hex(String string) { return compute(string, DigestObjectPools.MD5); }
@Test public void shouldComputeForAGivenStreamUsingMD5() { byte[] testData = new byte[1024 * 1024]; new Random().nextBytes(testData); assertEquals(DigestUtils.md5Hex(testData), md5Hex(new ByteArrayInputStream(testData))); }
public static String columnIndexToColumnLabel(int index) { checkArgument(index >= 0, "index must be non-negative"); // Fast path if (index < RADIX) { // A to Z return ALPHABETS[index]; } else if (index < RADIX * RADIX + RADIX) { // AA to ZZ return ALPHABETS[index / RADIX - 1] + ALPHABETS[index % RADIX]; } // Slow path int power = RADIX * RADIX; int sum = RADIX * RADIX + RADIX; int width = 3; int position = -1; while (index >= sum) { power = multiplyExact(power, RADIX); int max = addExact(power, sum); if (index < max) { position = index - sum; break; } sum = max; width += 1; } if (position == -1) { throw new IllegalStateException("Unexpected position value"); } String[] chars = new String[width]; for (int i = width - 1; i >= 0; i--) { chars[i] = ALPHABETS[position % RADIX]; position /= RADIX; } return String.join("", chars); }
@Test public void testColumnIndexToColumnLabel() { assertEquals(columnIndexToColumnLabel(0), "A"); assertEquals(columnIndexToColumnLabel(1), "B"); assertEquals(columnIndexToColumnLabel(RADIX - 1), "Z"); assertEquals(columnIndexToColumnLabel(RADIX), "AA"); assertEquals(columnIndexToColumnLabel(RADIX + 1), "AB"); assertEquals(columnIndexToColumnLabel(RADIX + RADIX - 1), "AZ"); assertEquals(columnIndexToColumnLabel(RADIX + RADIX), "BA"); assertEquals(columnIndexToColumnLabel(RADIX + RADIX + 1), "BB"); assertEquals(columnIndexToColumnLabel(RADIX * RADIX + RADIX - 1), "ZZ"); assertEquals(columnIndexToColumnLabel(RADIX * RADIX + RADIX), "AAA"); assertEquals(columnIndexToColumnLabel(RADIX * RADIX + RADIX + 1), "AAB"); assertEquals(columnIndexToColumnLabel(RADIX * RADIX * RADIX + RADIX * RADIX + RADIX - 1), "ZZZ"); assertEquals(columnIndexToColumnLabel(RADIX * RADIX * RADIX + RADIX * RADIX + RADIX), "AAAA"); assertEquals(columnIndexToColumnLabel(RADIX * RADIX * RADIX + RADIX * RADIX + RADIX + 1), "AAAB"); }
@Override public void execute(final ConnectionSession connectionSession) { String databaseName = sqlStatement.getFromDatabase().map(schema -> schema.getDatabase().getIdentifier().getValue()).orElseGet(connectionSession::getUsedDatabaseName); queryResultMetaData = createQueryResultMetaData(databaseName); mergedResult = new TransparentMergedResult(getQueryResult(databaseName)); }
@Test void assertShowTablesExecutorWithLowerCase() throws SQLException { MySQLShowTablesStatement showTablesStatement = new MySQLShowTablesStatement(); ShowFilterSegment showFilterSegment = mock(ShowFilterSegment.class); when(showFilterSegment.getLike()).thenReturn(Optional.of(new ShowLikeSegment(0, 10, "t_test"))); showTablesStatement.setFilter(showFilterSegment); ShowTablesExecutor executor = new ShowTablesExecutor(showTablesStatement, TypedSPILoader.getService(DatabaseType.class, "MySQL")); Map<String, ShardingSphereDatabase> databases = getDatabases(); ContextManager contextManager = mockContextManager(databases); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); executor.execute(mockConnectionSession()); assertThat(executor.getQueryResultMetaData().getColumnCount(), is(1)); executor.getMergedResult().next(); assertThat(executor.getMergedResult().getValue(1, Object.class), is("T_TEST")); assertFalse(executor.getMergedResult().next()); }
@GetMapping("/find/username") @PreAuthorize("isAnonymous()") public ResponseEntity<?> findUsername(@Validated PhoneVerificationDto.VerifyCodeReq request) { return ResponseEntity.ok(SuccessResponse.from("user", authCheckUseCase.findUsername(request))); }
@Test @DisplayName("일반 회원이 아닌 휴대폰 번호로 아이디를 찾을 때 404 응답을 반환한다.") void findUsernameIfUserNotFound() throws Exception { // given String phone = "010-1111-1111"; given(authCheckUseCase.findUsername(new PhoneVerificationDto.VerifyCodeReq(phone, code))).willThrow(new UserErrorException(UserErrorCode.NOT_FOUND)); // when ResultActions resultActions = findUsernameRequest(phone, code); // then resultActions .andExpect(status().isNotFound()) .andExpect(jsonPath("$.code").value(UserErrorCode.NOT_FOUND.causedBy().getCode())) .andExpect(jsonPath("$.message").value(UserErrorCode.NOT_FOUND.getExplainError())); }
@Override public StorageObject upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency); try { MultipartUpload multipart = null; try { if(status.isAppend()) { final List<MultipartUpload> list = multipartService.find(file); if(!list.isEmpty()) { multipart = list.iterator().next(); } } } catch(AccessDeniedException | InteroperabilityException e) { log.warn(String.format("Ignore failure listing incomplete multipart uploads. %s", e)); } final Path bucket = containerService.getContainer(file); final List<MultipartPart> completed = new ArrayList<>(); // Not found or new upload if(null == multipart) { if(log.isInfoEnabled()) { log.info("No pending multipart upload found"); } final S3Object object = new S3WriteFeature(session, acl).getDetails(file, status); // ID for the initiated multipart upload. multipart = session.getClient().multipartStartUpload(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), object); if(log.isDebugEnabled()) { log.debug(String.format("Multipart upload started for %s with ID %s", multipart.getObjectKey(), multipart.getUploadId())); } } else { if(status.isAppend()) { // Add already completed parts completed.addAll(multipartService.list(multipart)); } } // Full size of file final long size = status.getOffset() + status.getLength(); final List<Future<MultipartPart>> parts = new ArrayList<>(); long remaining = status.getLength(); long offset = 0; for(int partNumber = 1; remaining > 0; partNumber++) { boolean skip = false; if(status.isAppend()) { if(log.isInfoEnabled()) { log.info(String.format("Determine if part number %d can be skipped", partNumber)); } for(MultipartPart c : completed) { if(c.getPartNumber().equals(partNumber)) { if(log.isInfoEnabled()) { log.info(String.format("Skip completed part number %d", partNumber)); } skip = true; offset += c.getSize(); break; } } } if(!skip) { // Last part can be less than 5 MB. Adjust part size. final long length = Math.min(Math.max((size / (S3DefaultMultipartService.MAXIMUM_UPLOAD_PARTS - 1)), partsize), remaining); // Submit to queue parts.add(this.submit(pool, file, local, throttle, listener, status, multipart, partNumber, offset, length, callback)); remaining -= length; offset += length; } } completed.addAll(Interruptibles.awaitAll(parts)); // Combining all the given parts into the final object. Processing of a Complete Multipart Upload request // could take several minutes to complete. Because a request could fail after the initial 200 OK response // has been sent, it is important that you check the response body to determine whether the request succeeded. multipart.setBucketName(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName()); final MultipartCompleted complete = session.getClient().multipartCompleteUpload(multipart, completed); if(log.isInfoEnabled()) { log.info(String.format("Completed multipart upload for %s with %d parts and checksum %s", complete.getObjectKey(), completed.size(), complete.getEtag())); } if(file.getType().contains(Path.Type.encrypted)) { log.warn(String.format("Skip checksum verification for %s with client side encryption enabled", file)); } else { if(S3Session.isAwsHostname(session.getHost().getHostname())) { completed.sort(new MultipartPart.PartNumberComparator()); final StringBuilder concat = new StringBuilder(); for(MultipartPart part : completed) { concat.append(part.getEtag()); } final String expected = String.format("%s-%d", ChecksumComputeFactory.get(HashAlgorithm.md5).compute(concat.toString()), completed.size()); final String reference = StringUtils.remove(complete.getEtag(), "\""); if(!StringUtils.equalsIgnoreCase(expected, reference)) { throw new ChecksumException(MessageFormat.format(LocaleFactory.localizedString("Upload {0} failed", "Error"), file.getName()), MessageFormat.format("Mismatch between MD5 hash {0} of uploaded data and ETag {1} returned by the server", expected, reference)); } } } final StorageObject object = new StorageObject(containerService.getKey(file)); object.setETag(complete.getEtag()); object.setContentLength(size); object.setStorageClass(multipart.getStorageClass()); if(multipart.getMetadata() != null) { object.addAllMetadata(multipart.getMetadata()); } // Mark parent status as complete status.withResponse(new S3AttributesAdapter(session.getHost()).toAttributes(object)).setComplete(); return object; } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Upload {0} failed", e, file); } finally { // Cancel future tasks pool.shutdown(false); } }
@Test public void testUploadSinglePart() throws Exception { final S3AccessControlListFeature acl = new S3AccessControlListFeature(session); final S3MultipartUploadService service = new S3MultipartUploadService(session, new S3WriteFeature(session, acl), acl, 5 * 1024L * 1024L, 2); final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final String name = String.format(" %s.txt", UUID.randomUUID()); final Path test = new Path(container, name, EnumSet.of(Path.Type.file)); final Local local = new Local(System.getProperty("java.io.tmpdir"), name); final byte[] random = RandomUtils.nextBytes(1021); IOUtils.write(random, local.getOutputStream(false)); final TransferStatus status = new TransferStatus(); status.setLength(random.length); status.setStorageClass(S3Object.STORAGE_CLASS_REDUCED_REDUNDANCY); final BytecountStreamListener count = new BytecountStreamListener(); service.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), count, status, new DisabledLoginCallback()); assertEquals(random.length, count.getSent()); assertSame(Checksum.NONE, status.getResponse().getChecksum()); assertTrue(status.isComplete()); assertNotSame(PathAttributes.EMPTY, status.getResponse()); assertEquals(random.length, status.getResponse().getSize()); assertTrue(new S3FindFeature(session, acl).find(test)); final PathAttributes attr = new S3AttributesFinderFeature(session, acl).find(test); assertEquals(status.getResponse().getETag(), attr.getETag()); assertEquals(status.getResponse().getChecksum(), attr.getChecksum()); assertEquals(random.length, attr.getSize()); assertEquals(Checksum.NONE, attr.getChecksum()); assertNotNull(attr.getETag()); // d2b77e21aa68ebdcbfb589124b9f9192-1 assertEquals(Checksum.NONE, Checksum.parse(attr.getETag())); assertEquals(S3Object.STORAGE_CLASS_REDUCED_REDUNDANCY, new S3StorageClassFeature(session, acl).getClass(test)); new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); local.delete(); }
public Map<String, byte[]> getXAttrs(Path path) throws IOException { return retrieveHeaders(path, INVOCATION_XATTR_GET_MAP); }
@Test public void testGetAllXAttrs() throws Throwable { Map<String, byte[]> xAttrs = headerProcessing.getXAttrs(MAGIC_PATH); Assertions.assertThat(xAttrs.keySet()) .describedAs("Attribute keys") .contains(RETRIEVED_XATTRS); }
public void shutdown() { LOG.info("Shutting down anomaly detector."); synchronized (_shutdownLock) { _shutdown = true; } // SHUTDOWN_ANOMALY is a broker failure with detection time set to 0ms. Here we expect it is added to the front of the // priority queue and notify anomaly handler immediately. _anomalies.add(SHUTDOWN_ANOMALY); _maintenanceEventDetector.shutdown(); _detectorScheduler.shutdown(); try { _detectorScheduler.awaitTermination(SCHEDULER_SHUTDOWN_TIMEOUT_MS, TimeUnit.MILLISECONDS); if (!_detectorScheduler.isTerminated()) { LOG.warn("The sampling scheduler failed to shutdown in " + SCHEDULER_SHUTDOWN_TIMEOUT_MS + " ms."); } } catch (InterruptedException e) { LOG.warn("Interrupted while waiting for anomaly detector to shutdown."); } _brokerFailureDetector.shutdown(); _anomalyLoggerExecutor.shutdownNow(); LOG.info("Anomaly detector shutdown completed."); }
@Test public void testShutdown() throws InterruptedException { PriorityBlockingQueue<Anomaly> anomalies = new PriorityBlockingQueue<>(ANOMALY_DETECTOR_INITIAL_QUEUE_SIZE, anomalyComparator()); AnomalyNotifier mockAnomalyNotifier = EasyMock.createNiceMock(AnomalyNotifier.class); AbstractBrokerFailureDetector mockBrokerFailureDetector = EasyMock.createNiceMock(ZKBrokerFailureDetector.class); GoalViolationDetector mockGoalViolationDetector = EasyMock.createNiceMock(GoalViolationDetector.class); MetricAnomalyDetector mockMetricAnomalyDetector = EasyMock.createNiceMock(MetricAnomalyDetector.class); TopicAnomalyDetector mockTopicAnomalyDetector = EasyMock.createNiceMock(TopicAnomalyDetector.class); MaintenanceEventDetector mockMaintenanceEventDetector = EasyMock.createNiceMock(MaintenanceEventDetector.class); DiskFailureDetector mockDiskFailureDetector = EasyMock.createNiceMock(DiskFailureDetector.class); KafkaCruiseControl mockKafkaCruiseControl = EasyMock.createNiceMock(KafkaCruiseControl.class); ScheduledExecutorService detectorScheduler = Executors.newScheduledThreadPool(2, new KafkaCruiseControlThreadFactory("AnomalyDetector", false, null)); AnomalyDetectorManager anomalyDetectorManager = new AnomalyDetectorManager(anomalies, MOCK_ANOMALY_DETECTION_INTERVAL_MS, mockKafkaCruiseControl, mockAnomalyNotifier, mockGoalViolationDetector, mockBrokerFailureDetector, mockMetricAnomalyDetector, mockDiskFailureDetector, mockTopicAnomalyDetector, mockMaintenanceEventDetector, detectorScheduler); anomalyDetectorManager.shutdown(); Thread t = new Thread(anomalyDetectorManager::shutdown); t.start(); t.join(TimeUnit.SECONDS.toMillis(30)); assertEquals(0, anomalyDetectorManager.numSelfHealingStarted()); assertEquals(0, anomalyDetectorManager.numCheckedWithDelay()); assertTrue(detectorScheduler.isTerminated()); }
@Override public boolean tryMarkSlotActive(JobID jobId, AllocationID allocationId) { TaskSlot<T> taskSlot = getTaskSlot(allocationId); if (taskSlot != null && taskSlot.isAllocated(jobId, allocationId)) { return markExistingSlotActive(taskSlot); } else { return false; } }
@Test void testTryMarkSlotActive() throws Exception { final TaskSlotTableImpl<?> taskSlotTable = createTaskSlotTableAndStart(3); try { final JobID jobId1 = new JobID(); final AllocationID allocationId1 = new AllocationID(); taskSlotTable.allocateSlot(0, jobId1, allocationId1, SLOT_TIMEOUT); final AllocationID allocationId2 = new AllocationID(); taskSlotTable.allocateSlot(1, jobId1, allocationId2, SLOT_TIMEOUT); final AllocationID allocationId3 = new AllocationID(); final JobID jobId2 = new JobID(); taskSlotTable.allocateSlot(2, jobId2, allocationId3, SLOT_TIMEOUT); taskSlotTable.markSlotActive(allocationId1); assertThat(taskSlotTable.isAllocated(0, jobId1, allocationId1)).isTrue(); assertThat(taskSlotTable.isAllocated(1, jobId1, allocationId2)).isTrue(); assertThat(taskSlotTable.isAllocated(2, jobId2, allocationId3)).isTrue(); assertThat(taskSlotTable.getActiveTaskSlotAllocationIdsPerJob(jobId1)) .isEqualTo(Sets.newHashSet(allocationId1)); assertThat(taskSlotTable.tryMarkSlotActive(jobId1, allocationId1)).isTrue(); assertThat(taskSlotTable.tryMarkSlotActive(jobId1, allocationId2)).isTrue(); assertThat(taskSlotTable.tryMarkSlotActive(jobId1, allocationId3)).isFalse(); assertThat(taskSlotTable.getActiveTaskSlotAllocationIdsPerJob(jobId1)) .isEqualTo(new HashSet<>(Arrays.asList(allocationId2, allocationId1))); } finally { taskSlotTable.close(); assertThat(taskSlotTable.isClosed()).isTrue(); } }
@Subscribe public void onChatMessage(ChatMessage e) { if (e.getType() != ChatMessageType.GAMEMESSAGE && e.getType() != ChatMessageType.SPAM) { return; } CompostState compostUsed = determineCompostUsed(e.getMessage()); if (compostUsed == null) { return; } this.expirePendingActions(); pendingCompostActions.values() .stream() .filter(this::playerIsBesidePatch) .findFirst() .ifPresent(pc -> { setCompostState(pc.getFarmingPatch(), compostUsed); pendingCompostActions.remove(pc.getFarmingPatch()); }); }
@Test public void onChatMessage_handlesInspectMessages() { ChatMessage chatEvent = mock(ChatMessage.class); when(chatEvent.getType()).thenReturn(ChatMessageType.SPAM); when(chatEvent.getMessage()).thenReturn("This is a tree patch. The soil has been treated with ultracompost. The patch is empty and weeded."); compostTracker.pendingCompostActions.put(farmingPatch, new CompostTracker.PendingCompost(Instant.MAX, worldPoint, farmingPatch)); compostTracker.onChatMessage(chatEvent); verify(configManager).setRSProfileConfiguration("timetracking", "MOCK.compost", CompostState.ULTRACOMPOST); }
public FEELFnResult<BigDecimal> invoke(@ParameterName("from") String from, @ParameterName("grouping separator") String group, @ParameterName("decimal separator") String decimal) { if ( from == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null")); } if ( group != null && !group.equals( " " ) && !group.equals( "." ) && !group.equals( "," ) ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "group", "not a valid one, can only be one of: dot ('.'), comma (','), space (' ') ")); } if ( decimal != null ) { if (!decimal.equals( "." ) && !decimal.equals( "," )) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "decimal", "not a valid one, can only be one of: dot ('.'), comma (',') ")); } else if (group != null && decimal.equals( group )) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "decimal", "cannot be the same as parameter 'group' ")); } } if ( group != null ) { from = from.replaceAll( "\\" + group, "" ); } if ( decimal != null ) { from = from.replaceAll( "\\" + decimal, "." ); } BigDecimal result = NumberEvalHelper.getBigDecimalOrNull(from ); if( from != null && result == null ) { // conversion failed return FEELFnResult.ofError( new InvalidParametersEvent(Severity.ERROR, "unable to calculate final number result" ) ); } else { return FEELFnResult.ofResult( result ); } }
@Test void invokeInvalidGroup() { FunctionTestUtil.assertResultError(numberFunction.invoke("1 000", "test", null), InvalidParametersEvent.class); }
@Override public HttpHeaders add(HttpHeaders headers) { if (headers instanceof DefaultHttpHeaders) { this.headers.add(((DefaultHttpHeaders) headers).headers); return this; } else { return super.add(headers); } }
@Test public void addCharSequences() { final DefaultHttpHeaders headers = newDefaultDefaultHttpHeaders(); headers.add(HEADER_NAME, HeaderValue.THREE.asList()); assertDefaultValues(headers, HeaderValue.THREE); }
public String toJsonString(Object object) { return String.valueOf(toJson(object)); }
@Test public void testSimple() { String output = _obfuscator.toJsonString(_map); Assert.assertTrue(output.contains(VALUE)); Assert.assertFalse(output.contains(SECRET)); }
public void generateTypeStubs() throws IOException { generateMetaAttributeEnum(); for (final List<Token> tokens : ir.types()) { switch (tokens.get(0).signal()) { case BEGIN_ENUM: generateEnum(tokens); break; case BEGIN_SET: generateBitSet(tokens); break; case BEGIN_COMPOSITE: generateComposite(tokens); break; default: break; } } }
@Test void shouldGenerateCompositeEncoder() throws Exception { final int bufferOffset = 64; final int capacityFieldOffset = bufferOffset; final int numCylindersOffset = bufferOffset + 2; final int expectedEngineCapacity = 2000; final int manufacturerCodeOffset = bufferOffset + 3; final byte[] manufacturerCode = { 'A', 'B', 'C' }; final String className = "EngineEncoder"; final String fqClassName = ir.applicableNamespace() + "." + className; when(mockBuffer.getShort(capacityFieldOffset, BYTE_ORDER)) .thenReturn((short)expectedEngineCapacity); generateTypeStubs(); final Class<?> clazz = compile(fqClassName); assertNotNull(clazz); final Object flyweight = clazz.getConstructor().newInstance(); wrap(bufferOffset, flyweight, mockBuffer, BUFFER_CLASS); final short numCylinders = (short)4; clazz.getDeclaredMethod("numCylinders", short.class).invoke(flyweight, numCylinders); clazz.getDeclaredMethod("putManufacturerCode", byte[].class, int.class) .invoke(flyweight, manufacturerCode, 0); verify(mockBuffer).putByte(numCylindersOffset, (byte)numCylinders); verify(mockBuffer).putBytes(manufacturerCodeOffset, manufacturerCode, 0, manufacturerCode.length); }
@Override public void close() throws IOException { delegate.close(); }
@Test public void testClose() throws IOException { CompressionProvider provider = inStream.getCompressionProvider(); ByteArrayInputStream in = createTestInputStream(); inStream = new DummyCompressionIS( in, provider ); inStream.close(); }
@Override public void unwatch(final String key) { if (watchCache.containsKey(key)) { watchCache.remove(key).close(); LOGGER.info("Unwatched etcd key: {}", key); } }
@Test public void testUnWatch() throws NoSuchFieldException, IllegalAccessException { final String key = "key"; etcdDiscoveryServiceUnderTest.unwatch(key); final Field watchCacheField = etcdDiscoveryServiceUnderTest.getClass().getDeclaredField("watchCache"); watchCacheField.setAccessible(true); ConcurrentMap<String, Watch.Watcher> o = (ConcurrentMap) watchCacheField.get(etcdDiscoveryServiceUnderTest); assertFalse(o.containsKey(key)); }
public static <T> void maybeMergeOptions(Properties props, String key, OptionSet options, OptionSpec<T> spec) { if (options.has(spec) || !props.containsKey(key)) { T value = options.valueOf(spec); if (value == null) { props.remove(key); } else { props.put(key, value.toString()); } } }
@Test public void testMaybeMergeOptionsOverwriteExisting() { setUpOptions(); props.put("skey", "existing-string"); props.put("ikey", "300"); props.put("sokey", "existing-string-2"); props.put("iokey", "400"); props.put("sondkey", "existing-string-3"); props.put("iondkey", "500"); OptionSet options = parser.parse( "--str", "some-string", "--int", "600", "--str-opt", "some-string-2", "--int-opt", "700", "--str-opt-nodef", "some-string-3", "--int-opt-nodef", "800" ); CommandLineUtils.maybeMergeOptions(props, "skey", options, stringOpt); CommandLineUtils.maybeMergeOptions(props, "ikey", options, intOpt); CommandLineUtils.maybeMergeOptions(props, "sokey", options, stringOptOptionalArg); CommandLineUtils.maybeMergeOptions(props, "iokey", options, intOptOptionalArg); CommandLineUtils.maybeMergeOptions(props, "sondkey", options, stringOptOptionalArgNoDefault); CommandLineUtils.maybeMergeOptions(props, "iondkey", options, intOptOptionalArgNoDefault); assertEquals("some-string", props.get("skey")); assertEquals("600", props.get("ikey")); assertEquals("some-string-2", props.get("sokey")); assertEquals("700", props.get("iokey")); assertEquals("some-string-3", props.get("sondkey")); assertEquals("800", props.get("iondkey")); }
@Override public boolean isIndexed(QueryContext queryContext) { Index index = queryContext.matchIndex(attributeName, QueryContext.IndexMatchHint.PREFER_ORDERED); return index != null && index.isOrdered() && expressionCanBeUsedAsIndexPrefix(); }
@Test public void likePredicateIsNotIndexed_whenPercentWildcardIsEscaped() { QueryContext queryContext = mock(QueryContext.class); when(queryContext.matchIndex("this", QueryContext.IndexMatchHint.PREFER_ORDERED)).thenReturn(createIndex(IndexType.SORTED)); assertFalse(new LikePredicate("this", "sub\\%").isIndexed(queryContext)); assertFalse(new LikePredicate("this", "sub\\\\\\%").isIndexed(queryContext)); assertFalse(new LikePredicate("this", "sub\\%string\\%").isIndexed(queryContext)); assertFalse(new LikePredicate("this", "sub\\str\\%").isIndexed(queryContext)); }
public static Schema create(Type type) { switch (type) { case STRING: return new StringSchema(); case BYTES: return new BytesSchema(); case INT: return new IntSchema(); case LONG: return new LongSchema(); case FLOAT: return new FloatSchema(); case DOUBLE: return new DoubleSchema(); case BOOLEAN: return new BooleanSchema(); case NULL: return new NullSchema(); default: throw new AvroRuntimeException("Can't create a: " + type); } }
@Test void intAsDoubleDefaultValue() { Schema.Field field = new Schema.Field("myField", Schema.create(Schema.Type.DOUBLE), "doc", 1); assertTrue(field.hasDefaultValue()); assertEquals(1.0d, field.defaultVal()); assertEquals(1.0d, GenericData.get().getDefaultValue(field)); }
public synchronized ClientInstanceIds clientInstanceIds(final Duration timeout) { if (timeout.isNegative()) { throw new IllegalArgumentException("The timeout cannot be negative."); } if (state().hasNotStarted()) { throw new IllegalStateException("KafkaStreams has not been started, you can retry after calling start()."); } if (state().isShuttingDown() || state.hasCompletedShutdown()) { throw new IllegalStateException("KafkaStreams has been stopped (" + state + ")."); } final Timer remainingTime = time.timer(timeout.toMillis()); final ClientInstanceIdsImpl clientInstanceIds = new ClientInstanceIdsImpl(); // (1) fan-out calls to threads // StreamThread for main/restore consumers and producer(s) final Map<String, KafkaFuture<Uuid>> consumerFutures = new HashMap<>(); final Map<String, KafkaFuture<Map<String, KafkaFuture<Uuid>>>> producerFutures = new HashMap<>(); synchronized (changeThreadCount) { for (final StreamThread streamThread : threads) { consumerFutures.putAll(streamThread.consumerClientInstanceIds(timeout)); producerFutures.put(streamThread.getName(), streamThread.producersClientInstanceIds(timeout)); } } // GlobalThread KafkaFuture<Uuid> globalThreadFuture = null; if (globalStreamThread != null) { globalThreadFuture = globalStreamThread.globalConsumerInstanceId(timeout); } // (2) get admin client instance id in a blocking fashion, while Stream/GlobalThreads work in parallel try { clientInstanceIds.setAdminInstanceId(adminClient.clientInstanceId(timeout)); remainingTime.update(time.milliseconds()); } catch (final IllegalStateException telemetryDisabledError) { // swallow log.debug("Telemetry is disabled on the admin client."); } catch (final TimeoutException timeoutException) { throw timeoutException; } catch (final Exception error) { throw new StreamsException("Could not retrieve admin client instance id.", error); } // (3) collect client instance ids from threads // (3a) collect consumers from StreamsThread for (final Map.Entry<String, KafkaFuture<Uuid>> consumerFuture : consumerFutures.entrySet()) { final Uuid instanceId = getOrThrowException( consumerFuture.getValue(), remainingTime.remainingMs(), () -> String.format( "Could not retrieve consumer instance id for %s.", consumerFuture.getKey() ) ); remainingTime.update(time.milliseconds()); // could be `null` if telemetry is disabled on the consumer itself if (instanceId != null) { clientInstanceIds.addConsumerInstanceId( consumerFuture.getKey(), instanceId ); } else { log.debug(String.format("Telemetry is disabled for %s.", consumerFuture.getKey())); } } // (3b) collect producers from StreamsThread for (final Map.Entry<String, KafkaFuture<Map<String, KafkaFuture<Uuid>>>> threadProducerFuture : producerFutures.entrySet()) { final Map<String, KafkaFuture<Uuid>> streamThreadProducerFutures = getOrThrowException( threadProducerFuture.getValue(), remainingTime.remainingMs(), () -> String.format( "Could not retrieve producer instance id for %s.", threadProducerFuture.getKey() ) ); remainingTime.update(time.milliseconds()); for (final Map.Entry<String, KafkaFuture<Uuid>> producerFuture : streamThreadProducerFutures.entrySet()) { final Uuid instanceId = getOrThrowException( producerFuture.getValue(), remainingTime.remainingMs(), () -> String.format( "Could not retrieve producer instance id for %s.", producerFuture.getKey() ) ); remainingTime.update(time.milliseconds()); // could be `null` if telemetry is disabled on the producer itself if (instanceId != null) { clientInstanceIds.addProducerInstanceId( producerFuture.getKey(), instanceId ); } else { log.debug(String.format("Telemetry is disabled for %s.", producerFuture.getKey())); } } } // (3c) collect from GlobalThread if (globalThreadFuture != null) { final Uuid instanceId = getOrThrowException( globalThreadFuture, remainingTime.remainingMs(), () -> "Could not retrieve global consumer client instance id." ); remainingTime.update(time.milliseconds()); // could be `null` if telemetry is disabled on the client itself if (instanceId != null) { clientInstanceIds.addConsumerInstanceId( globalStreamThread.getName(), instanceId ); } else { log.debug("Telemetry is disabled for the global consumer."); } } return clientInstanceIds; }
@Test public void shouldThrowOnClientInstanceIdsWhenNotStarted() { prepareStreams(); prepareStreamThread(streamThreadOne, 1); prepareStreamThread(streamThreadTwo, 2); try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) { final IllegalStateException error = assertThrows( IllegalStateException.class, () -> streams.clientInstanceIds(Duration.ZERO) ); assertThat( error.getMessage(), equalTo("KafkaStreams has not been started, you can retry after calling start().") ); } }
@Override public List<ImportValidationFeedback> verifyRule( Object subject ) { List<ImportValidationFeedback> feedback = new ArrayList<>(); if ( !isEnabled() || !( subject instanceof JobMeta ) ) { return feedback; } JobMeta jobMeta = (JobMeta) subject; String description = jobMeta.getDescription(); if ( null != description && minLength <= description.length() ) { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.APPROVAL, "A description is present" ) ); } else { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.ERROR, "A description is not present or too short" ) ); } return feedback; }
@Test public void testVerifyRule_NullParameter_DisabledRule() { JobHasDescriptionImportRule importRule = getImportRule( 10, false ); List<ImportValidationFeedback> feedbackList = importRule.verifyRule( null ); assertNotNull( feedbackList ); assertTrue( feedbackList.isEmpty() ); }
public static void sortMessages(Message[] messages, final SortTerm[] sortTerm) { final List<SortTermWithDescending> sortTermsWithDescending = getSortTermsWithDescending(sortTerm); sortMessages(messages, sortTermsWithDescending); }
@Test public void testSortMessagesReverse() { Message[] expected = new Message[] { MESSAGES[2], MESSAGES[1], MESSAGES[0] }; // Sort using all the terms. Message order should be the same no matter what term is used for (SortTerm term : POSSIBLE_TERMS) { Message[] actual = MESSAGES.clone(); MailSorter.sortMessages(actual, new SortTerm[] { SortTerm.REVERSE, term }); try { assertArrayEquals(actual, expected); } catch (AssertionError ex) { throw new AssertionError("Term: " + term.toString(), ex); } } }
@Override public int hashCode() { return new HashCodeBuilder(17, 31) .append(nameserviceId) .append(clusterId) .append(blockPoolId) .toHashCode(); }
@Test public void testHashCode() { Set<FederationNamespaceInfo> set = new TreeSet<>(); // set an empty bpId first set.add(new FederationNamespaceInfo("", "nn1", "ns1")); set.add(new FederationNamespaceInfo("bp1", "nn2", "ns1")); assertThat(set).hasSize(2); }
public static Builder forConnector(String connector) { Preconditions.checkNotNull(connector, "Table descriptors require a connector identifier."); final Builder descriptorBuilder = new Builder(); descriptorBuilder.option(FactoryUtil.CONNECTOR, connector); return descriptorBuilder; }
@Test void testFormatDescriptorWithPrefix() { assertThatThrownBy( () -> TableDescriptor.forConnector("test-connector") .schema(Schema.newBuilder().build()) .format( FormatDescriptor.forFormat("test-format") .option("test-format.a", "A") .build()) .build()) .as( "Format options set using #format(FormatDescriptor) should not contain the prefix 'test-format.', but found 'test-format.a'.") .isInstanceOf(ValidationException.class); }
public TopicConnection topicConnection(TopicConnection connection) { // It is common to implement both interfaces if (connection instanceof XATopicConnection) { return xaTopicConnection((XATopicConnection) connection); } return TracingConnection.create(connection, this); }
@Test void topicConnection_wrapsInput() { assertThat(jmsTracing.topicConnection(mock(TopicConnection.class))) .isInstanceOf(TracingConnection.class); }
public boolean colorSimilarCheck(int color1, int color2) { double colorDiff = Math.sqrt( Math.pow(Color.red(color1) - Color.red(color2), 2) + Math.pow(Color.green(color1) - Color.green(color2), 2) + Math.pow(Color.blue(color1) - Color.blue(color2), 2) ); return colorDiff < COLOR_DIFF_THRESHOLD; }
@Test public void colorSimilarCheck() { ColorUtils colorUtils = ColorUtils.getInstance(); Assert.assertEquals( mIsColorSimilar, colorUtils.colorSimilarCheck(mColor1, mColor2) ); }
public String getMessage() { return String.format("[%s] %s", checkName, getMessageWithoutCheckName()); }
@Test public void customLink() { Description description = new CustomLinkChecker() .buildDescription((DiagnosticPosition) new MockTree()) .setMessage("custom message") .build(); assertThat(description.getMessage()) .isEqualTo("[CustomLinkChecker] custom message\n (see https://www.google.com/)"); }
@GetMapping("/getAllRoles") @RequiresPermissions("system:role:list") public ShenyuAdminResult selectAll() { return ShenyuAdminResult.success(ShenyuResultMessage.QUERY_SUCCESS, roleService.selectAll()); }
@Test public void testSelectAll() throws Exception { given(roleService.selectAll()).willReturn(Collections.emptyList()); this.mockMvc.perform(MockMvcRequestBuilders.get("/role/getAllRoles")) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.QUERY_SUCCESS))) .andReturn(); }
@Override public void open() throws Exception { this.timerService = getInternalTimerService("processing timer", VoidNamespaceSerializer.INSTANCE, this); this.keySet = new HashSet<>(); super.open(); }
@Test void testProcessRecord() throws Exception { KeyedProcessOperator<Integer, Integer, Integer> processOperator = new KeyedProcessOperator<>( new OneInputStreamProcessFunction<Integer, Integer>() { @Override public void processRecord( Integer record, Collector<Integer> output, PartitionedContext ctx) { output.collect(record + 1); } }); try (KeyedOneInputStreamOperatorTestHarness<Integer, Integer, Integer> testHarness = new KeyedOneInputStreamOperatorTestHarness<>( processOperator, (KeySelector<Integer, Integer>) value -> value, Types.INT)) { testHarness.open(); testHarness.processElement(new StreamRecord<>(1)); testHarness.processElement(new StreamRecord<>(2)); testHarness.processElement(new StreamRecord<>(3)); Collection<StreamRecord<Integer>> recordOutput = testHarness.getRecordOutput(); assertThat(recordOutput) .containsExactly( new StreamRecord<>(2), new StreamRecord<>(3), new StreamRecord<>(4)); } }
public boolean hasOobLog(String secretString) { // making a blocking call to get result Optional<PollingResult> result = sendPollingRequest(secretString); if (result.isPresent()) { // In the future we may refactor hasOobLog() to return finer grained info about what kind // of oob is logged return result.get().getHasDnsInteraction() || result.get().getHasHttpInteraction(); } else { // we may choose to retry sendPollingRequest() if oob interactions do arrive late. return false; } }
@Test public void isVulnerable_validLogRecordWithHttpLogged_returnsTrue() throws IOException { PollingResult log = PollingResult.newBuilder().setHasHttpInteraction(true).build(); String body = JsonFormat.printer().preservingProtoFieldNames().print(log); MockWebServer mockWebServer = new MockWebServer(); mockWebServer.enqueue(new MockResponse().setResponseCode(HttpStatus.OK.code()).setBody(body)); client = new TcsClient(VALID_DOMAIN, VALID_PORT, mockWebServer.url("/").toString(), httpClient); boolean detectionResult = client.hasOobLog(SECRET); assertThat(detectionResult).isTrue(); mockWebServer.shutdown(); }
@Override public ParseResult parsePath(String path) { if (path.isEmpty()) { return emptyPath(); } checkValid(path); String root = path.startsWith("/") ? "/" : null; return new ParseResult(root, splitter().split(path)); }
@Test public void testUnix_illegalCharacters() { try { PathType.unix().parsePath("/foo/bar\0"); fail(); } catch (InvalidPathException expected) { assertEquals(8, expected.getIndex()); } try { PathType.unix().parsePath("/\u00001/foo"); fail(); } catch (InvalidPathException expected) { assertEquals(1, expected.getIndex()); } }
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj instanceof NiciraEncapEthDst) { NiciraEncapEthDst that = (NiciraEncapEthDst) obj; return Objects.equals(encapEthDst, that.encapEthDst); } return false; }
@Test public void testEquals() { final NiciraEncapEthDst encapEthDst1 = new NiciraEncapEthDst(mac1); final NiciraEncapEthDst sameAsEncapEthDst1 = new NiciraEncapEthDst(mac1); final NiciraEncapEthDst encapEthDst2 = new NiciraEncapEthDst(mac2); new EqualsTester().addEqualityGroup(encapEthDst1, sameAsEncapEthDst1).addEqualityGroup(encapEthDst2) .testEquals(); }
public String render(Object o) { StringBuilder result = new StringBuilder(template.length()); render(o, result); return result.toString(); }
@Test public void valuesSubstitutedIntoTemplateDontNeedToBeStrings() { Template template = new Template("Hello {{nonStringValue}} "); assertEquals("Hello 3 ", template.render(foo)); }
@Udf public List<Long> generateSeriesLong( @UdfParameter(description = "The beginning of the series") final long start, @UdfParameter(description = "Marks the end of the series (inclusive)") final long end ) { return generateSeriesLong(start, end, end - start > 0 ? 1 : -1); }
@Test public void shouldComputeIntRangeWithNegativeEvenStepLong() { final List<Long> range = rangeUdf.generateSeriesLong(9, 0, -2); assertThat(range, hasSize(5)); long val = 9; for (final long i : range) { assertThat(val, is(i)); val -= 2; } }
@Override public DbEntitiesCatalog get() { final Stopwatch stopwatch = Stopwatch.createStarted(); final DbEntitiesCatalog catalog = scan(packagesToScan, packagesToExclude, chainingClassLoader); stopwatch.stop(); LOG.info("{} entities have been scanned and added to DB Entity Catalog, it took {}", catalog.size(), stopwatch); return catalog; }
@Test void testExcludingPackagesWorkCorrectly() { DbEntitiesScanner scanner = new DbEntitiesScanner( new String[]{"org.graylog2.indexer"}, new String[]{"org.graylog2.indexer.indexset"} ); final DbEntitiesCatalog dbEntitiesCatalog = scanner.get(); final Optional<DbEntityCatalogEntry> entryByCollectionName = dbEntitiesCatalog.getByCollectionName("index_sets"); assertFalse(entryByCollectionName.isPresent()); }
@Override public String toString() { switch (mode) { case FILEPATTERN: return fileOrPatternSpec.toString(); case SINGLE_FILE_OR_SUBRANGE: return fileOrPatternSpec + " range " + super.toString(); default: throw new IllegalStateException("Unexpected mode: " + mode); } }
@Test public void testToStringFile() throws Exception { File f = createFileWithData("foo", Collections.emptyList()); Metadata metadata = FileSystems.matchSingleFileSpec(f.getPath()); TestFileBasedSource source = new TestFileBasedSource(metadata, 1, 0, 10, null); assertEquals(String.format("%s range [0, 10)", f.getAbsolutePath()), source.toString()); }
public static PlacementRule getPlacementRule(String ruleStr, Configuration conf) throws ClassNotFoundException { Class<? extends PlacementRule> ruleClass = Class.forName(ruleStr) .asSubclass(PlacementRule.class); LOG.info("Using PlacementRule implementation - " + ruleClass); return ReflectionUtils.newInstance(ruleClass, conf); }
@Test public void testGetExistRuleText() { final String exists = DefaultPlacementRule.class.getCanonicalName(); PlacementRule rule = null; try { rule = PlacementFactory.getPlacementRule(exists, null); } catch (ClassNotFoundException cnfe) { fail("Class should have been found"); } assertNotNull("Rule object is null", rule); assertEquals("Names not equal", rule.getName(), exists); }
public boolean isValid(String value) { if (value == null) { return false; } URI uri; // ensure value is a valid URI try { uri = new URI(value); } catch (URISyntaxException e) { return false; } // OK, perfom additional validation String scheme = uri.getScheme(); if (!isValidScheme(scheme)) { return false; } String authority = uri.getRawAuthority(); if ("file".equals(scheme) && (authority == null || "".equals(authority))) { // Special case - file: allows an empty authority return true; // this is a local file - nothing more to do here } else if ("file".equals(scheme) && authority != null && authority.contains(":")) { return false; } else { // Validate the authority if (!isValidAuthority(authority)) { return false; } } if (!isValidPath(uri.getRawPath())) { return false; } if (!isValidQuery(uri.getRawQuery())) { return false; } if (!isValidFragment(uri.getRawFragment())) { return false; } return true; }
@Test public void testValidator202() { String[] schemes = {"http", "https"}; UrlValidator urlValidator = new UrlValidator(schemes, UrlValidator.NO_FRAGMENTS); assertTrue(urlValidator.isValid("http://l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.l.org")); }
@Override public CompletableFuture<T> toCompletableFuture() { return _task.toCompletionStage().toCompletableFuture(); }
@Test public void testCreateStageFromTask() throws Exception { String testResult = "testCreateStageFromTask"; Task<String> valueTask = Task.value(testResult); _engine.run(valueTask); ParSeqBasedCompletionStage<String> stageFromTask = _parSeqBasedCompletionStageFactory.buildStageFromTask(valueTask); Assert.assertEquals(testResult, stageFromTask.toCompletableFuture().get()); }
@Override public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain) throws IOException, ServletException { RequestContext requestContext = RequestContextHolder.getContext(); try { requestContext.getBasicContext().setRequestProtocol(BasicContext.HTTP_PROTOCOL); HttpServletRequest request = (HttpServletRequest) servletRequest; setRequestTarget(request, requestContext); setEncoding(request, requestContext); setAddressContext(request, requestContext); setOtherBasicContext(request, requestContext); filterChain.doFilter(servletRequest, servletResponse); } finally { RequestContextHolder.removeContext(); } }
@Test public void testDoFilterWithoutEncoding() throws Exception { when(servletRequest.getCharacterEncoding()).thenReturn(""); MockNextFilter nextFilter = new MockNextFilter("testApp", "UTF-8"); filter.doFilter(servletRequest, servletResponse, new MockFilterChain(servlet, nextFilter)); if (null != nextFilter.error) { throw nextFilter.error; } }
@Override public void lock() { try { lockInterruptibly(-1, null); } catch (InterruptedException e) { throw new IllegalStateException(); } }
@Test public void testUnlockFail() { Assertions.assertThrows(IllegalMonitorStateException.class, () -> { RLock lock = redisson.getSpinLock("lock"); Thread t = new Thread() { public void run() { RLock lock = redisson.getSpinLock("lock"); lock.lock(); try { Thread.sleep(1000); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } lock.unlock(); } ; }; t.start(); t.join(400); try { lock.unlock(); } catch (IllegalMonitorStateException e) { t.join(); throw e; } }); }
public Model<T> reproduceFromProvenance() throws ClassNotFoundException { // Until now the object only holds the configuration for these objects, the following // functions will actually re-instantiate them. Trainer<T> newTrainer = recoverTrainer(); Dataset<T> newDataset = recoverDataset(); // Exposing the configuration manager means there could be an edge case were // the invocation count is changed before the model is trained. // Pass through a desired invocation count to prevent this behavior // TODO: does not apply to inner trainers, figure out how to address this or if it needs to be addressed int trainedInvocationCount = (int) this.modelProvenance .getTrainerProvenance() .getInstanceValues() .get("train-invocation-count") .getValue(); // This function actually re-trains a model rather than copy the original return newTrainer.train(newDataset); }
@Test public void testReproduceFromProvenanceNoSplitter() throws URISyntaxException, ClassNotFoundException { CSVDataSource<Label> csvSource = getCSVDataSource(); MutableDataset<Label> datasetFromCSV = new MutableDataset<>(csvSource); LogisticRegressionTrainer trainer = new LogisticRegressionTrainer(); LinearSGDModel model = (LinearSGDModel) trainer.train(datasetFromCSV); model = (LinearSGDModel) trainer.train(datasetFromCSV); model = (LinearSGDModel) trainer.train(datasetFromCSV); ReproUtil<Label> reproUtil = new ReproUtil<>(model.getProvenance(),Label.class); LinearSGDModel newModel = (LinearSGDModel) reproUtil.reproduceFromProvenance(); assertEquals(newModel.getWeightsCopy(), model.getWeightsCopy()); }
static void executeTargets(final PMML4Result toModify, final ProcessingDTO processingDTO) { logger.debug("executeTargets {} {}", toModify, processingDTO); if (!toModify.getResultCode().equals(OK.getName())) { return; } final String targetFieldName = toModify.getResultObjectName(); final Map<String, Object> resultVariables = toModify.getResultVariables(); processingDTO.getKiePMMLTargets() .stream() .filter(kiePMMLTarget -> kiePMMLTarget.getField() != null && kiePMMLTarget.getField().equals(targetFieldName)) .findFirst() .ifPresent(kiePMMLTarget -> { Object prediction = resultVariables.get(targetFieldName); logger.debug("Original prediction {}", prediction); Object modifiedPrediction = kiePMMLTarget.modifyPrediction(resultVariables.get(targetFieldName)); logger.debug("Modified prediction {}", modifiedPrediction); resultVariables.put(targetFieldName, modifiedPrediction); }); }
@Test void executeTargets() { // Build model String TARGET_NAME = "TARGET_NAME"; String FIELD_NAME = "FIELD_NAME"; TargetField targetField = new TargetField(Collections.emptyList(), null, FIELD_NAME, null, 4.34, null, null, null); KiePMMLTarget kiePMMLTarget = KiePMMLTarget.builder(TARGET_NAME, Collections.emptyList(), targetField) .build(); List<KiePMMLTarget> kiePMMLTargets = Arrays.asList(kiePMMLTarget, KiePMMLTarget.builder("NEW_TARGET", Collections.emptyList(), new TargetField(Collections.emptyList(), null, "NEW_TARGET", null, null, null, null, null)).build()); KiePMMLTestingModel model = KiePMMLTestingModel.builder("FILENAME", "TESTINGMODEL", Collections.emptyList(), MINING_FUNCTION.REGRESSION) .withKiePMMLTargets(kiePMMLTargets) .build(); // Build PMML4Result PMML4Result toModify = new PMML4Result(); toModify.setResultCode(ResultCode.FAIL.getName()); toModify.addResultVariable(FIELD_NAME, 4.33); assertThat(toModify.getResultVariables().get(FIELD_NAME)).isEqualTo(4.33); ProcessingDTO processingDTO = getProcessingDTO(model, new ArrayList<>()); PostProcess.executeTargets(toModify, processingDTO); assertThat(toModify.getResultVariables().get(FIELD_NAME)).isEqualTo(4.33); toModify.setResultCode(ResultCode.OK.getName()); PostProcess.executeTargets(toModify, processingDTO); assertThat(toModify.getResultVariables().get(FIELD_NAME)).isEqualTo(4.33); toModify.setResultObjectName(FIELD_NAME); PostProcess.executeTargets(toModify, processingDTO); assertThat(toModify.getResultVariables().get(FIELD_NAME)).isEqualTo(4.34); }
public static VersionMessage read(ByteBuffer payload) throws BufferUnderflowException, ProtocolException { int clientVersion = (int) ByteUtils.readUint32(payload); check(clientVersion >= ProtocolVersion.MINIMUM.intValue(), ProtocolException::new); Services localServices = Services.read(payload); Instant time = Instant.ofEpochSecond(ByteUtils.readInt64(payload)); Services receivingServices = Services.read(payload); InetAddress receivingInetAddress = PeerAddress.getByAddress(Buffers.readBytes(payload, 16)); int receivingPort = ByteUtils.readUint16BE(payload); InetSocketAddress receivingAddr = new InetSocketAddress(receivingInetAddress, receivingPort); Buffers.skipBytes(payload, NETADDR_BYTES); // addr_from // uint64 localHostNonce (random data) // We don't care about the localhost nonce. It's used to detect connecting back to yourself in cases where // there are NATs and proxies in the way. However we don't listen for inbound connections so it's // irrelevant. Buffers.skipBytes(payload, 8); // string subVer (currently "") String subVer = Buffers.readLengthPrefixedString(payload); // int bestHeight (size of known block chain). long bestHeight = ByteUtils.readUint32(payload); boolean relayTxesBeforeFilter = clientVersion >= ProtocolVersion.BLOOM_FILTER.intValue() ? payload.get() != 0 : true; return new VersionMessage(clientVersion, localServices, time, receivingServices, receivingAddr, subVer, bestHeight, relayTxesBeforeFilter); }
@Test public void roundTrip_ipv6() throws Exception { VersionMessage ver = new VersionMessage(TESTNET, 1234); ver.time = Instant.ofEpochSecond(23456); ver.subVer = "/bitcoinj/"; ver.localServices = Services.of(1); ver.receivingAddr = new InetSocketAddress(InetAddress.getByName("2002:db8:85a3:0:0:8a2e:370:7335"), 8333); byte[] serialized = ver.serialize(); VersionMessage ver2 = VersionMessage.read(ByteBuffer.wrap(serialized)); assertEquals(1234, ver2.bestHeight); assertEquals(Instant.ofEpochSecond(23456), ver2.time); assertEquals("/bitcoinj/", ver2.subVer); assertEquals(ProtocolVersion.CURRENT.intValue(), ver2.clientVersion); assertEquals(1, ver2.localServices.bits()); assertEquals("2002:db8:85a3:0:0:8a2e:370:7335", ver2.receivingAddr.getHostName()); assertEquals(8333, ver2.receivingAddr.getPort()); }
public <T extends Config> T getConfig(Class<T> clazz) { if (!Modifier.isPublic(clazz.getModifiers())) { throw new RuntimeException("Non-public configuration classes can't have default methods invoked"); } T t = (T) Proxy.newProxyInstance(clazz.getClassLoader(), new Class<?>[] { clazz }, handler); return t; }
@Test public void testSetConfig() throws IOException { TestConfig conf = manager.getConfig(TestConfig.class); conf.key("new value"); Assert.assertEquals("new value", conf.key()); }