focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Deserializer deserializer(String topic, Target type) { return (headers, data) -> { var schemaId = extractSchemaIdFromMsg(data); SchemaType format = getMessageFormatBySchemaId(schemaId); MessageFormatter formatter = schemaRegistryFormatters.get(format); return new DeserializeResult( formatter.format(topic, data), DeserializeResult.Type.JSON, Map.of( "schemaId", schemaId, "type", format.name() ) ); }; }
@Test void deserializeReturnsJsonAvroMsgJsonRepresentation() throws RestClientException, IOException { AvroSchema schema = new AvroSchema( "{" + " \"type\": \"record\"," + " \"name\": \"TestAvroRecord1\"," + " \"fields\": [" + " {" + " \"name\": \"field1\"," + " \"type\": \"string\"" + " }," + " {" + " \"name\": \"field2\"," + " \"type\": \"int\"" + " }" + " ]" + "}" ); String jsonValue = "{ \"field1\":\"testStr\", \"field2\": 123 }"; String topic = "test"; int schemaId = registryClient.register(topic + "-value", schema); byte[] data = toBytesWithMagicByteAndSchemaId(schemaId, jsonValue, schema); var result = serde.deserializer(topic, Serde.Target.VALUE).deserialize(null, data); assertJsonsEqual(jsonValue, result.getResult()); assertThat(result.getType()).isEqualTo(DeserializeResult.Type.JSON); assertThat(result.getAdditionalProperties()) .contains(Map.entry("type", "AVRO")) .contains(Map.entry("schemaId", schemaId)); }
@GET @Path(GET_INFO) @ApiOperation(value = "Get general Alluxio Master service information", response = alluxio.wire.AlluxioMasterInfo.class) public Response getInfo(@QueryParam(QUERY_RAW_CONFIGURATION) final Boolean rawConfiguration) { // TODO(jiri): Add a mechanism for retrieving only a subset of the fields. return RestUtils.call(() -> { boolean rawConfig = false; if (rawConfiguration != null) { rawConfig = rawConfiguration; } return new AlluxioMasterInfo().setCapacity(getCapacityInternal()) .setConfiguration(getConfigurationInternal(rawConfig)) .setLostWorkers(mBlockMaster.getLostWorkersInfoList()).setMetrics(getMetricsInternal()) .setMountPoints(getMountPointsInternal()) .setRpcAddress(mMasterProcess.getRpcAddress().toString()) .setStartTimeMs(mMasterProcess.getStartTimeMs()) .setTierCapacity(getTierCapacityInternal()).setUfsCapacity(getUfsCapacityInternal()) .setUptimeMs(mMasterProcess.getUptimeMs()) .setVersion(RuntimeConstants.VERSION).setRevision(ProjectConstants.REVISION) .setWorkers(mBlockMaster.getWorkerInfoList()); }, Configuration.global()); }
@Test public void getMasterInfo() { // Mock for rpc address when(mMasterProcess.getRpcAddress()).thenReturn(new InetSocketAddress("localhost", 8080)); // Mock for metrics final int FILES_PINNED_TEST_VALUE = 100; String filesPinnedProperty = MetricKey.MASTER_FILES_PINNED.getName(); Gauge<Integer> filesPinnedGauge = () -> FILES_PINNED_TEST_VALUE; MetricSet mockMetricsSet = mock(MetricSet.class); Map<String, Metric> map = new HashMap<>(); map.put(filesPinnedProperty, filesPinnedGauge); when(mockMetricsSet.getMetrics()).thenReturn(map); MetricsSystem.METRIC_REGISTRY.registerAll(mockMetricsSet); // Mock for start time when(mMasterProcess.getStartTimeMs()).thenReturn(101L); // Mock for up time when(mMasterProcess.getUptimeMs()).thenReturn(102L); Response response = mHandler.getInfo(false); try { assertNotNull("Response must be not null!", response); assertNotNull("Response must have a entry!", response.getEntity()); assertTrue("Entry must be an AlluxioMasterInfo!", (response.getEntity() instanceof AlluxioMasterInfo)); AlluxioMasterInfo info = (AlluxioMasterInfo) response.getEntity(); // Validate configuration assertNotNull("Configuration must be not null", info.getConfiguration()); assertFalse("Properties Map must be not empty!", (info.getConfiguration().isEmpty())); // Validate rpc address assertEquals("localhost/127.0.0.1:8080", info.getRpcAddress()); // Validate metrics Map<String, Long> metricsMap = info.getMetrics(); assertFalse("Metrics Map must be not empty!", (metricsMap.isEmpty())); assertTrue("Map must contain key " + filesPinnedProperty + "!", metricsMap.containsKey(filesPinnedProperty)); assertEquals(FILES_PINNED_TEST_VALUE, metricsMap.get(filesPinnedProperty).longValue()); // Validate StartTimeMs assertEquals(101L, info.getStartTimeMs()); // Validate UptimeMs assertEquals(102L, info.getUptimeMs()); // Validate version assertEquals(RuntimeConstants.VERSION, info.getVersion()); // Validate capacity bytes Capacity cap = info.getCapacity(); long sumCapacityBytes = 0; for (Map.Entry<String, Long> entry1 : WORKER1_TOTAL_BYTES_ON_TIERS.entrySet()) { Long totalBytes = entry1.getValue(); sumCapacityBytes += totalBytes; } for (Map.Entry<String, Long> entry1 : WORKER2_TOTAL_BYTES_ON_TIERS.entrySet()) { Long totalBytes = entry1.getValue(); sumCapacityBytes += totalBytes; } assertEquals(sumCapacityBytes, cap.getTotal()); // Validate used bytes long sumUsedBytes = 0; for (Map.Entry<String, Long> entry1 : WORKER1_USED_BYTES_ON_TIERS.entrySet()) { Long totalBytes = entry1.getValue(); sumUsedBytes += totalBytes; } for (Map.Entry<String, Long> entry1 : WORKER2_USED_BYTES_ON_TIERS.entrySet()) { Long totalBytes = entry1.getValue(); sumUsedBytes += totalBytes; } assertEquals(sumUsedBytes, cap.getUsed()); // Validate UFS capacity Capacity ufsCapacity = info.getUfsCapacity(); assertEquals(UFS_SPACE_TOTAL, ufsCapacity.getTotal()); assertEquals(UFS_SPACE_USED, ufsCapacity.getUsed()); // Validate workers List<WorkerInfo> workers = info.getWorkers(); assertEquals(2, workers.size()); long worker1 = mBlockMaster.getWorkerId(NET_ADDRESS_1); long worker2 = mBlockMaster.getWorkerId(NET_ADDRESS_2); Set<Long> expectedWorkers = new HashSet<>(); expectedWorkers.add(worker1); expectedWorkers.add(worker2); Set<Long> actualWorkers = new HashSet<>(); for (WorkerInfo w : workers) { actualWorkers.add(w.getId()); } assertEquals(expectedWorkers, actualWorkers); } finally { response.close(); } }
public String getDomainObjectName() { if (stringHasValue(domainObjectName)) { return domainObjectName; } String finalDomainObjectName; if (stringHasValue(runtimeTableName)) { finalDomainObjectName = JavaBeansUtil.getCamelCaseString(runtimeTableName, true); } else { finalDomainObjectName = JavaBeansUtil.getCamelCaseString(introspectedTableName, true); } if (domainObjectRenamingRule != null) { Pattern pattern = Pattern.compile(domainObjectRenamingRule.getSearchString()); String replaceString = domainObjectRenamingRule.getReplaceString(); replaceString = replaceString == null ? "" : replaceString; //$NON-NLS-1$ Matcher matcher = pattern.matcher(finalDomainObjectName); finalDomainObjectName = JavaBeansUtil.getFirstCharacterUppercase(matcher.replaceAll(replaceString)); } return finalDomainObjectName; }
@Test void testRenamingRuleNoUnderscore() { DomainObjectRenamingRule renamingRule = new DomainObjectRenamingRule(); renamingRule.setSearchString("^Sys"); renamingRule.setReplaceString(""); FullyQualifiedTable fqt = new FullyQualifiedTable(null, "myschema", "sysmytable", null, null, false, null, null, null, false, renamingRule, null); assertThat(fqt.getDomainObjectName()).isEqualTo("Mytable"); }
public static Optional<CheckpointStorage> fromConfig( ReadableConfig config, ClassLoader classLoader, @Nullable Logger logger) throws IllegalStateException, DynamicCodeLoadingException { Preconditions.checkNotNull(config, "config"); Preconditions.checkNotNull(classLoader, "classLoader"); final String storageName = config.get(CheckpointingOptions.CHECKPOINT_STORAGE); if (storageName == null) { if (logger != null) { logger.debug( "The configuration {} has not be set in the current" + " sessions config.yaml. Falling back to a default CheckpointStorage" + " type. Users are strongly encouraged explicitly set this configuration" + " so they understand how their applications are checkpointing" + " snapshots for fault-tolerance.", CheckpointingOptions.CHECKPOINT_STORAGE.key()); } return Optional.empty(); } switch (storageName.toLowerCase()) { case JOB_MANAGER_STORAGE_NAME: return Optional.of(createJobManagerCheckpointStorage(config, classLoader, logger)); case FILE_SYSTEM_STORAGE_NAME: return Optional.of(createFileSystemCheckpointStorage(config, classLoader, logger)); default: if (logger != null) { logger.info("Loading state backend via factory '{}'", storageName); } CheckpointStorageFactory<?> factory; try { @SuppressWarnings("rawtypes") Class<? extends CheckpointStorageFactory> clazz = Class.forName(storageName, false, classLoader) .asSubclass(CheckpointStorageFactory.class); factory = clazz.newInstance(); } catch (ClassNotFoundException e) { throw new DynamicCodeLoadingException( "Cannot find configured state backend factory class: " + storageName, e); } catch (ClassCastException | InstantiationException | IllegalAccessException e) { throw new DynamicCodeLoadingException( "The class configured under '" + CheckpointingOptions.CHECKPOINT_STORAGE.key() + "' is not a valid checkpoint storage factory (" + storageName + ')', e); } return Optional.of(factory.createFromConfig(config, classLoader)); } }
@Test void testLoadFileSystemCheckpointStorage() throws Exception { final String checkpointDir = new Path(TempDirUtils.newFolder(tmp).toURI()).toString(); final String savepointDir = new Path(TempDirUtils.newFolder(tmp).toURI()).toString(); final Path expectedCheckpointsPath = new Path(checkpointDir); final Path expectedSavepointsPath = new Path(savepointDir); final MemorySize threshold = MemorySize.parse("900kb"); final int minWriteBufferSize = 1024; // we configure with the explicit string (rather than // AbstractStateBackend#X_STATE_BACKEND_NAME) // to guard against config-breaking changes of the name final Configuration config1 = new Configuration(); config1.set(CheckpointingOptions.CHECKPOINT_STORAGE, "filesystem"); config1.set(CheckpointingOptions.CHECKPOINTS_DIRECTORY, checkpointDir); config1.set(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointDir); config1.set(CheckpointingOptions.FS_SMALL_FILE_THRESHOLD, threshold); config1.set(CheckpointingOptions.FS_WRITE_BUFFER_SIZE, minWriteBufferSize); CheckpointStorage storage1 = CheckpointStorageLoader.fromConfig(config1, cl, null).get(); assertThat(storage1).isInstanceOf(FileSystemCheckpointStorage.class); FileSystemCheckpointStorage fs1 = (FileSystemCheckpointStorage) storage1; assertThat(fs1.getCheckpointPath()).is(matching(normalizedPath(expectedCheckpointsPath))); assertThat(fs1.getSavepointPath()).is(matching(normalizedPath(expectedSavepointsPath))); assertThat(fs1.getMinFileSizeThreshold()).isEqualTo(threshold.getBytes()); assertThat(fs1.getWriteBufferSize()) .isEqualTo(Math.max(threshold.getBytes(), minWriteBufferSize)); }
@Override public Coder<SqsCheckpointMark> getCheckpointMarkCoder() { return SerializableCoder.of(SqsCheckpointMark.class); }
@Test public void testCheckpointCoderIsSane() { final AmazonSQS client = embeddedSqsRestServer.getClient(); final String queueUrl = embeddedSqsRestServer.getQueueUrl(); client.sendMessage(queueUrl, DATA); SqsUnboundedSource source = new SqsUnboundedSource( SqsIO.read().withQueueUrl(queueUrl).withMaxNumRecords(1), new SqsConfiguration(pipeline.getOptions().as(AwsOptions.class)), SqsMessageCoder.of()); CoderProperties.coderSerializable(source.getCheckpointMarkCoder()); }
public static Exception toException(int code, String msg) throws Exception { if (code == Response.Status.NOT_FOUND.getStatusCode()) { throw new NotFoundException(msg); } else if (code == Response.Status.NOT_IMPLEMENTED.getStatusCode()) { throw new ClassNotFoundException(msg); } else if (code == Response.Status.BAD_REQUEST.getStatusCode()) { throw new InvalidRequestException(msg); } else if (code == Response.Status.CONFLICT.getStatusCode()) { throw new RequestConflictException(msg); } else { throw new RuntimeException(msg); } }
@Test public void testToExceptionSerializationException() { assertThrows(InvalidRequestException.class, () -> RestExceptionMapper.toException(Response.Status.BAD_REQUEST.getStatusCode(), "Bad Request")); }
public static NamenodeRole convert(NamenodeRoleProto role) { switch (role) { case NAMENODE: return NamenodeRole.NAMENODE; case BACKUP: return NamenodeRole.BACKUP; case CHECKPOINT: return NamenodeRole.CHECKPOINT; } return null; }
@Test public void testConvertBlockRecoveryCommand() { DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo(); DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo(); DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 }; List<RecoveringBlock> blks = ImmutableList.of( new RecoveringBlock(getExtendedBlock(1), dnInfo, 3), new RecoveringBlock(getExtendedBlock(2), dnInfo, 3) ); BlockRecoveryCommand cmd = new BlockRecoveryCommand(blks); BlockRecoveryCommandProto proto = PBHelper.convert(cmd); assertEquals(1, proto.getBlocks(0).getBlock().getB().getBlockId()); assertEquals(2, proto.getBlocks(1).getBlock().getB().getBlockId()); BlockRecoveryCommand cmd2 = PBHelper.convert(proto); List<RecoveringBlock> cmd2Blks = Lists.newArrayList( cmd2.getRecoveringBlocks()); assertEquals(blks.get(0).getBlock(), cmd2Blks.get(0).getBlock()); assertEquals(blks.get(1).getBlock(), cmd2Blks.get(1).getBlock()); assertEquals(Joiner.on(",").join(blks), Joiner.on(",").join(cmd2Blks)); assertEquals(cmd.toString(), cmd2.toString()); }
@NonNull public Client authenticate(@NonNull Request request) { // https://datatracker.ietf.org/doc/html/rfc7521#section-4.2 try { if (!CLIENT_ASSERTION_TYPE_PRIVATE_KEY_JWT.equals(request.clientAssertionType())) { throw new AuthenticationException( "unsupported client_assertion_type='%s', expected '%s'" .formatted(request.clientAssertionType(), CLIENT_ASSERTION_TYPE_PRIVATE_KEY_JWT)); } var processor = new DefaultJWTProcessor<>(); var keySelector = new JWSVerificationKeySelector<>( Set.of(JWSAlgorithm.RS256, JWSAlgorithm.ES256), jwkSource); processor.setJWSKeySelector(keySelector); processor.setJWTClaimsSetVerifier( new DefaultJWTClaimsVerifier<>( new JWTClaimsSet.Builder().audience(baseUri.toString()).build(), Set.of( JWTClaimNames.JWT_ID, JWTClaimNames.EXPIRATION_TIME, JWTClaimNames.ISSUER, JWTClaimNames.SUBJECT))); var claims = processor.process(request.clientAssertion(), null); var clientId = clientIdFromAssertion(request.clientId(), claims); return new Client(clientId); } catch (ParseException e) { throw new AuthenticationException("failed to parse client assertion", e); } catch (BadJOSEException | JOSEException e) { throw new AuthenticationException("failed to verify client assertion", e); } }
@Test void authenticate_expired() throws JOSEException { var key = generateKey(); var jwkSource = new StaticJwkSource<>(key); var inThePast = Date.from(Instant.now().minusSeconds(60)); var claims = new JWTClaimsSet.Builder() .audience(RP_ISSUER.toString()) .subject("not the right client") .issuer(CLIENT_ID) .expirationTime(inThePast) .build(); var signed = signJwt(claims, key); var authenticator = new ClientAuthenticator(jwkSource, RP_ISSUER); // when & then assertThrows( AuthenticationException.class, () -> authenticator.authenticate( new Request( CLIENT_ID, ClientAuthenticator.CLIENT_ASSERTION_TYPE_PRIVATE_KEY_JWT, signed))); }
@Override public GetApplicationReportResponse getApplicationReport( GetApplicationReportRequest request) throws YarnException, IOException { if (request == null || request.getApplicationId() == null) { routerMetrics.incrAppsFailedRetrieved(); String errMsg = "Missing getApplicationReport request or applicationId information."; RouterAuditLogger.logFailure(user.getShortUserName(), GET_APP_REPORT, UNKNOWN, TARGET_CLIENT_RM_SERVICE, errMsg); RouterServerUtil.logAndThrowException(errMsg, null); } long startTime = clock.getTime(); SubClusterId subClusterId = null; try { subClusterId = federationFacade .getApplicationHomeSubCluster(request.getApplicationId()); } catch (YarnException e) { routerMetrics.incrAppsFailedRetrieved(); String errMsg = String.format("Application %s does not exist in FederationStateStore.", request.getApplicationId()); RouterAuditLogger.logFailure(user.getShortUserName(), GET_APP_REPORT, UNKNOWN, TARGET_CLIENT_RM_SERVICE, errMsg, request.getApplicationId()); RouterServerUtil.logAndThrowException(errMsg, e); } ApplicationClientProtocol clientRMProxy = getClientRMProxyForSubCluster(subClusterId); GetApplicationReportResponse response = null; try { response = clientRMProxy.getApplicationReport(request); } catch (Exception e) { routerMetrics.incrAppsFailedRetrieved(); String errMsg = String.format("Unable to get the application report for %s to SubCluster %s.", request.getApplicationId(), subClusterId.getId()); RouterAuditLogger.logFailure(user.getShortUserName(), GET_APP_REPORT, UNKNOWN, TARGET_CLIENT_RM_SERVICE, errMsg, request.getApplicationId(), subClusterId); RouterServerUtil.logAndThrowException(errMsg, e); } if (response == null) { LOG.error("No response when attempting to retrieve the report of " + "the application {} to SubCluster {}.", request.getApplicationId(), subClusterId.getId()); } long stopTime = clock.getTime(); routerMetrics.succeededAppsRetrieved(stopTime - startTime); RouterAuditLogger.logSuccess(user.getShortUserName(), GET_APP_REPORT, TARGET_CLIENT_RM_SERVICE, request.getApplicationId()); return response; }
@Test public void testGetApplicationNotExists() throws Exception { LOG.info("Test ApplicationClientProtocol: Get Application Report - Not Exists."); ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1); GetApplicationReportRequest requestGet = GetApplicationReportRequest.newInstance(appId); LambdaTestUtils.intercept(YarnException.class, "Application " + appId + " does not exist in FederationStateStore.", () -> interceptor.getApplicationReport(requestGet)); }
@Override public JType apply(String nodeName, JsonNode node, JsonNode parent, JClassContainer jClassContainer, Schema schema) { String propertyTypeName = getTypeName(node); JType type; if (propertyTypeName.equals("object") || node.has("properties") && node.path("properties").size() > 0) { type = ruleFactory.getObjectRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema); } else if (node.has("existingJavaType")) { String typeName = node.path("existingJavaType").asText(); if (isPrimitive(typeName, jClassContainer.owner())) { type = primitiveType(typeName, jClassContainer.owner()); } else { type = resolveType(jClassContainer, typeName); } } else if (propertyTypeName.equals("string")) { type = jClassContainer.owner().ref(String.class); } else if (propertyTypeName.equals("number")) { type = getNumberType(jClassContainer.owner(), ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("integer")) { type = getIntegerType(jClassContainer.owner(), node, ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("boolean")) { type = unboxIfNecessary(jClassContainer.owner().ref(Boolean.class), ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("array")) { type = ruleFactory.getArrayRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema); } else { type = jClassContainer.owner().ref(Object.class); } if (!node.has("javaType") && !node.has("existingJavaType") && node.has("format")) { type = ruleFactory.getFormatRule().apply(nodeName, node.get("format"), node, type, schema); } else if (!node.has("javaType") && !node.has("existingJavaType") && propertyTypeName.equals("string") && node.has("media")) { type = ruleFactory.getMediaRule().apply(nodeName, node.get("media"), node, type, schema); } return type; }
@Test public void applyGeneratesBigDecimalOverridingDouble() { JPackage jpackage = new JCodeModel()._package(getClass().getPackage().getName()); ObjectNode objectNode = new ObjectMapper().createObjectNode(); objectNode.put("type", "number"); //this shows that isUseBigDecimals overrides isUseDoubleNumbers when(config.isUseDoubleNumbers()).thenReturn(true); when(config.isUseBigDecimals()).thenReturn(true); JType result = rule.apply("fooBar", objectNode, null, jpackage, null); assertThat(result.fullName(), is(BigDecimal.class.getName())); }
public void print(PrintStream out, String prefix) { print(out, prefix, data, data.length); }
@Test public void testPrintNonPrintable() { byte[] buf = new byte[12]; Arrays.fill(buf, (byte) 0x04); ZData data = new ZData(buf); data.print(System.out, "ZData: "); }
@VisibleForTesting public File getStorageLocation(@Nullable JobID jobId, BlobKey key) throws IOException { return BlobUtils.getStorageLocation(storageDir.deref(), jobId, key); }
@Test void transientBlobCacheTimesOutRecoveredBlobs(@TempDir Path storageDirectory) throws Exception { final JobID jobId = new JobID(); final TransientBlobKey transientBlobKey = TestingBlobUtils.writeTransientBlob( storageDirectory, jobId, new byte[] {1, 2, 3, 4}); final File blobFile = BlobUtils.getStorageLocation(storageDirectory.toFile(), jobId, transientBlobKey); final Configuration configuration = new Configuration(); final long cleanupInterval = 1L; configuration.set(BlobServerOptions.CLEANUP_INTERVAL, cleanupInterval); try (final TransientBlobCache transientBlobCache = new TransientBlobCache(configuration, storageDirectory.toFile(), null)) { CommonTestUtils.waitUntilCondition(() -> !blobFile.exists()); } }
public String mapSrv(String responsibleTag) { final List<String> servers = healthyList; if (CollectionUtils.isEmpty(servers) || !switchDomain.isDistroEnabled()) { return EnvUtil.getLocalAddress(); } try { int index = distroHash(responsibleTag) % servers.size(); return servers.get(index); } catch (Throwable e) { Loggers.SRV_LOG .warn("[NACOS-DISTRO] distro mapper failed, return localhost: " + EnvUtil.getLocalAddress(), e); return EnvUtil.getLocalAddress(); } }
@Test void testMapSrv() { String server = distroMapper.mapSrv(serviceName); assertEquals(server, ip4); }
public static String toStringAddress(SocketAddress address) { if (address == null) { return StringUtils.EMPTY; } return toStringAddress((InetSocketAddress) address); }
@Test public void testToStringAddress() { try { String stringAddress = NetUtil.toStringAddress(InetSocketAddress.createUnresolved("127.0.0.1", 9828)); } catch (Exception e) { assertThat(e).isInstanceOf(NullPointerException.class); } }
public static <X extends Throwable> void isTrue(boolean expression, Supplier<? extends X> supplier) throws X { if (false == expression) { throw supplier.get(); } }
@Test public void isTrueTest() { Assertions.assertThrows(IllegalArgumentException.class, () -> { int i = 0; //noinspection ConstantConditions cn.hutool.core.lang.Assert.isTrue(i > 0, IllegalArgumentException::new); }); }
@Override public String key() { return PropertyType.STRING.name(); }
@Test public void key() { assertThat(validation.key()).isEqualTo("STRING"); }
@NonNull @Override public ConnectionFileName toPvfsFileName( @NonNull FileName providerFileName, @NonNull T details ) throws KettleException { // Determine the part of provider file name following the connection "root". // Use the transformer to generate the connection root provider uri. // Both uris are assumed to be normalized. // Examples: // - connectionRootProviderUri: "hcp://domain.my:443/root/path/" | "s3://" | "local://" // - providerUri: "hcp://domain.my:443/root/path/rest/path" | "s3://rest/path" // Example: "pvfs://my-connection" String connectionRootProviderUri = getConnectionRootProviderUriPrefix( details ); String providerUri = providerFileName.getURI(); if ( !connectionFileNameUtils.isDescendantOrSelf( providerUri, connectionRootProviderUri ) ) { throw new IllegalArgumentException( String.format( "Provider file name '%s' is not a descendant of the connection root '%s'.", providerUri, connectionRootProviderUri ) ); } String restUriPath = providerUri.substring( connectionRootProviderUri.length() ); // Examples: "/rest/path" or "rest/path" return buildPvfsFileName( details, restUriPath, providerFileName.getType() ); }
@Test( expected = IllegalArgumentException.class ) public void testToPvfsFileNameThrowsIfProviderUriIsNotDescendantOfConnectionRootUri() throws Exception { mockDetailsWithDomain( details1, "my-domain:8080" ); mockDetailsWithRootPath( details1, "my/root/path" ); // Note the `another-domain` which is different from the `my-domain` of the connection. String connectionRootProviderUriPrefix = "scheme1://another-domain:8080/my/root/path"; String restPath = "/rest/path"; FileName providerFileName = mockFileNameWithUri( FileName.class, connectionRootProviderUriPrefix + restPath ); transformer.toPvfsFileName( providerFileName, details1 ); }
@Override public String toString() { int numColumns = getColumnCount(); TextTable table = new TextTable(); String[] columnNames = new String[numColumns]; String[] columnDataTypes = new String[numColumns]; for (int c = 0; c < numColumns; c++) { columnNames[c] = _columnNamesArray.get(c).asText(); columnDataTypes[c] = _columnDataTypesArray.get(c).asText(); } table.addHeader(columnNames); table.addHeader(columnDataTypes); int numRows = getRowCount(); for (int r = 0; r < numRows; r++) { String[] columnValues = new String[numColumns]; for (int c = 0; c < numColumns; c++) { try { columnValues[c] = getString(r, c); } catch (Exception e) { columnNames[c] = "ERROR"; } } table.addRow(columnValues); } return table.toString(); }
@Test public void testToString() { // Run the test final String result = _resultTableResultSetUnderTest.toString(); // Verify the results assertNotEquals("", result); }
public JerseyClientBuilder using(JerseyClientConfiguration configuration) { this.configuration = configuration; apacheHttpClientBuilder.using(configuration); return this; }
@Test void usesACustomHostnameVerifier() { final HostnameVerifier customHostnameVerifier = new NoopHostnameVerifier(); builder.using(customHostnameVerifier); verify(apacheHttpClientBuilder).using(customHostnameVerifier); }
static String convertEnvVars(String input){ // check for any non-alphanumeric chars and convert to underscore // convert to upper case if (input == null) { return null; } return input.replaceAll("[^A-Za-z0-9]", "_").toUpperCase(); }
@Test public void testConvertEnvVarsUsingEmptyString() { String testInput3 = ConfigInjection.convertEnvVars(""); Assert.assertEquals("", testInput3); }
public static int findThread(String expectedThread) { int count = 0; List<Log> logList = DubboAppender.logList; for (int i = 0; i < logList.size(); i++) { String logThread = logList.get(i).getLogThread(); if (logThread.contains(expectedThread)) { count++; } } return count; }
@Test void testFindThread() { Log log = mock(Log.class); DubboAppender.logList.add(log); when(log.getLogThread()).thenReturn("thread-1"); assertThat(LogUtil.findThread("thread-1"), equalTo(1)); }
@Override public void keyPressed(KeyEvent e) { if (!plugin.chatboxFocused()) { return; } if (!plugin.isTyping()) { int mappedKeyCode = KeyEvent.VK_UNDEFINED; if (config.cameraRemap()) { if (config.up().matches(e)) { mappedKeyCode = KeyEvent.VK_UP; } else if (config.down().matches(e)) { mappedKeyCode = KeyEvent.VK_DOWN; } else if (config.left().matches(e)) { mappedKeyCode = KeyEvent.VK_LEFT; } else if (config.right().matches(e)) { mappedKeyCode = KeyEvent.VK_RIGHT; } } // In addition to the above checks, the F-key remapping shouldn't // activate when dialogs are open which listen for number keys // to select options if (config.fkeyRemap() && !plugin.isDialogOpen()) { if (config.f1().matches(e)) { mappedKeyCode = KeyEvent.VK_F1; } else if (config.f2().matches(e)) { mappedKeyCode = KeyEvent.VK_F2; } else if (config.f3().matches(e)) { mappedKeyCode = KeyEvent.VK_F3; } else if (config.f4().matches(e)) { mappedKeyCode = KeyEvent.VK_F4; } else if (config.f5().matches(e)) { mappedKeyCode = KeyEvent.VK_F5; } else if (config.f6().matches(e)) { mappedKeyCode = KeyEvent.VK_F6; } else if (config.f7().matches(e)) { mappedKeyCode = KeyEvent.VK_F7; } else if (config.f8().matches(e)) { mappedKeyCode = KeyEvent.VK_F8; } else if (config.f9().matches(e)) { mappedKeyCode = KeyEvent.VK_F9; } else if (config.f10().matches(e)) { mappedKeyCode = KeyEvent.VK_F10; } else if (config.f11().matches(e)) { mappedKeyCode = KeyEvent.VK_F11; } else if (config.f12().matches(e)) { mappedKeyCode = KeyEvent.VK_F12; } else if (config.esc().matches(e)) { mappedKeyCode = KeyEvent.VK_ESCAPE; } } // Do not remap to space key when the options dialog is open, since the options dialog never // listens for space, and the remapped key may be one of keys it listens for. if (plugin.isDialogOpen() && !plugin.isOptionsDialogOpen() && config.space().matches(e)) { mappedKeyCode = KeyEvent.VK_SPACE; } if (config.control().matches(e)) { mappedKeyCode = KeyEvent.VK_CONTROL; } if (mappedKeyCode != KeyEvent.VK_UNDEFINED && mappedKeyCode != e.getKeyCode()) { final char keyChar = e.getKeyChar(); modified.put(e.getKeyCode(), mappedKeyCode); e.setKeyCode(mappedKeyCode); // arrow keys and fkeys do not have a character e.setKeyChar(KeyEvent.CHAR_UNDEFINED); if (keyChar != KeyEvent.CHAR_UNDEFINED) { // If this key event has a valid key char then a key typed event may be received next, // we must block it blockedChars.add(keyChar); } } switch (e.getKeyCode()) { case KeyEvent.VK_ENTER: case KeyEvent.VK_SLASH: case KeyEvent.VK_COLON: // refocus chatbox plugin.setTyping(true); clientThread.invoke(plugin::unlockChat); break; } } else { switch (e.getKeyCode()) { case KeyEvent.VK_ESCAPE: // When exiting typing mode, block the escape key // so that it doesn't trigger the in-game hotkeys e.consume(); plugin.setTyping(false); clientThread.invoke(() -> { client.setVarcStrValue(VarClientStr.CHATBOX_TYPED_TEXT, ""); plugin.lockChat(); }); break; case KeyEvent.VK_ENTER: plugin.setTyping(false); clientThread.invoke(plugin::lockChat); break; case KeyEvent.VK_BACK_SPACE: // Only lock chat on backspace when the typed text is now empty if (Strings.isNullOrEmpty(client.getVarcStrValue(VarClientStr.CHATBOX_TYPED_TEXT))) { plugin.setTyping(false); clientThread.invoke(plugin::lockChat); } break; } } }
@Test public void testControlRemap() { when(keyRemappingConfig.control()).thenReturn(new ModifierlessKeybind(KeyEvent.VK_NUMPAD1, 0)); when(keyRemappingPlugin.chatboxFocused()).thenReturn(true); KeyEvent event = mock(KeyEvent.class); when(event.getExtendedKeyCode()).thenReturn(KeyEvent.VK_NUMPAD1); // for keybind matches() keyRemappingListener.keyPressed(event); verify(event).setKeyCode(KeyEvent.VK_CONTROL); }
public static String getNacosHome() { if (StringUtils.isBlank(nacosHomePath)) { String nacosHome = System.getProperty(NACOS_HOME_KEY); if (StringUtils.isBlank(nacosHome)) { nacosHome = Paths.get(System.getProperty(NACOS_HOME_PROPERTY), NACOS_HOME_ADDITIONAL_FILEPATH) .toString(); } return nacosHome; } return nacosHomePath; }
@Test void test() { String nacosHome = EnvUtils.getNacosHome(); assertEquals(System.getProperty("user.home") + File.separator + "nacos", nacosHome); System.setProperty("nacos.home", "test"); String testHome = EnvUtils.getNacosHome(); assertEquals("test", testHome); }
@Override public RecordCursor cursor() { return new JdbcRecordCursor(jdbcClient, session, split, columnHandles); }
@Test public void testCursorSimple() { RecordSet recordSet = new JdbcRecordSet(jdbcClient, session, split, ImmutableList.of( columnHandles.get("text"), columnHandles.get("text_short"), columnHandles.get("value"))); try (RecordCursor cursor = recordSet.cursor()) { assertEquals(cursor.getType(0), VARCHAR); assertEquals(cursor.getType(1), createVarcharType(32)); assertEquals(cursor.getType(2), BIGINT); Map<String, Long> data = new LinkedHashMap<>(); while (cursor.advanceNextPosition()) { data.put(cursor.getSlice(0).toStringUtf8(), cursor.getLong(2)); assertEquals(cursor.getSlice(0), cursor.getSlice(1)); assertFalse(cursor.isNull(0)); assertFalse(cursor.isNull(1)); assertFalse(cursor.isNull(2)); } assertEquals(data, ImmutableMap.<String, Long>builder() .put("one", 1L) .put("two", 2L) .put("three", 3L) .put("ten", 10L) .put("eleven", 11L) .put("twelve", 12L) .build()); } }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } if(containerService.isContainer(file)) { return PathAttributes.EMPTY; } return this.toAttributes(this.details(file)); }
@Test public void testReadAtSignInKey() throws Exception { final Path container = new SpectraDirectoryFeature(session, new SpectraWriteFeature(session)).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); container.attributes().setRegion("us-east-1"); final Path file = new Path(container, String.format("%s@", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file)); new SpectraTouchFeature(session).touch(file, new TransferStatus()); new SpectraAttributesFinderFeature(session).find(file); new SpectraDeleteFeature(session).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Udf(description = "Converts a string representation of a time in the given format" + " into the TIME value.") public Time parseTime( @UdfParameter( description = "The string representation of a time.") final String formattedTime, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.time.format.DateTimeFormatter.") final String formatPattern) { if (formattedTime == null | formatPattern == null) { return null; } try { final TemporalAccessor ta = formatters.get(formatPattern).parse(formattedTime); final Optional<ChronoField> dateField = Arrays.stream(ChronoField.values()) .filter(ChronoField::isDateBased) .filter(ta::isSupported) .findFirst(); if (dateField.isPresent()) { throw new KsqlFunctionException("Time format contains date field."); } return new Time(TimeUnit.NANOSECONDS.toMillis(LocalTime.from(ta).toNanoOfDay())); } catch (ExecutionException | RuntimeException e) { throw new KsqlFunctionException("Failed to parse time '" + formattedTime + "' with formatter '" + formatPattern + "': " + e.getMessage(), e); } }
@Test public void shouldSupportEmbeddedChars() { // When: final Time result = udf.parseTime("000105.000Fred", "HHmmss.SSS'Fred'"); // Then: assertThat(result.getTime(), is(65000L)); }
@VisibleForTesting ClientConfiguration createBkClientConfiguration(MetadataStoreExtended store, ServiceConfiguration conf) { ClientConfiguration bkConf = new ClientConfiguration(); if (conf.getBookkeeperClientAuthenticationPlugin() != null && conf.getBookkeeperClientAuthenticationPlugin().trim().length() > 0) { bkConf.setClientAuthProviderFactoryClass(conf.getBookkeeperClientAuthenticationPlugin()); bkConf.setProperty(conf.getBookkeeperClientAuthenticationParametersName(), conf.getBookkeeperClientAuthenticationParameters()); } if (conf.isBookkeeperTLSClientAuthentication()) { bkConf.setTLSClientAuthentication(true); bkConf.setTLSCertificatePath(conf.getBookkeeperTLSCertificateFilePath()); bkConf.setTLSKeyStore(conf.getBookkeeperTLSKeyFilePath()); bkConf.setTLSKeyStoreType(conf.getBookkeeperTLSKeyFileType()); bkConf.setTLSKeyStorePasswordPath(conf.getBookkeeperTLSKeyStorePasswordPath()); bkConf.setTLSProviderFactoryClass(conf.getBookkeeperTLSProviderFactoryClass()); bkConf.setTLSTrustStore(conf.getBookkeeperTLSTrustCertsFilePath()); bkConf.setTLSTrustStoreType(conf.getBookkeeperTLSTrustCertTypes()); bkConf.setTLSTrustStorePasswordPath(conf.getBookkeeperTLSTrustStorePasswordPath()); bkConf.setTLSCertFilesRefreshDurationSeconds(conf.getBookkeeperTlsCertFilesRefreshDurationSeconds()); } bkConf.setBusyWaitEnabled(conf.isEnableBusyWait()); bkConf.setNumWorkerThreads(conf.getBookkeeperClientNumWorkerThreads()); bkConf.setThrottleValue(conf.getBookkeeperClientThrottleValue()); bkConf.setAddEntryTimeout((int) conf.getBookkeeperClientTimeoutInSeconds()); bkConf.setReadEntryTimeout((int) conf.getBookkeeperClientTimeoutInSeconds()); bkConf.setSpeculativeReadTimeout(conf.getBookkeeperClientSpeculativeReadTimeoutInMillis()); bkConf.setNumChannelsPerBookie(conf.getBookkeeperNumberOfChannelsPerBookie()); bkConf.setUseV2WireProtocol(conf.isBookkeeperUseV2WireProtocol()); bkConf.setEnableDigestTypeAutodetection(true); bkConf.setStickyReadsEnabled(conf.isBookkeeperEnableStickyReads()); bkConf.setNettyMaxFrameSizeBytes(conf.getMaxMessageSize() + Commands.MESSAGE_SIZE_FRAME_PADDING); bkConf.setDiskWeightBasedPlacementEnabled(conf.isBookkeeperDiskWeightBasedPlacementEnabled()); bkConf.setMetadataServiceUri(conf.getBookkeeperMetadataStoreUrl()); bkConf.setLimitStatsLogging(conf.isBookkeeperClientLimitStatsLogging()); if (!conf.isBookkeeperMetadataStoreSeparated()) { // If we're connecting to the same metadata service, with same config, then // let's share the MetadataStore instance bkConf.setProperty(AbstractMetadataDriver.METADATA_STORE_INSTANCE, store); } if (conf.isBookkeeperClientHealthCheckEnabled()) { bkConf.enableBookieHealthCheck(); bkConf.setBookieHealthCheckInterval((int) conf.getBookkeeperClientHealthCheckIntervalSeconds(), TimeUnit.SECONDS); bkConf.setBookieErrorThresholdPerInterval(conf.getBookkeeperClientHealthCheckErrorThresholdPerInterval()); bkConf.setBookieQuarantineTime((int) conf.getBookkeeperClientHealthCheckQuarantineTimeInSeconds(), TimeUnit.SECONDS); bkConf.setBookieQuarantineRatio(conf.getBookkeeperClientQuarantineRatio()); } bkConf.setReorderReadSequenceEnabled(conf.isBookkeeperClientReorderReadSequenceEnabled()); bkConf.setExplictLacInterval(conf.getBookkeeperExplicitLacIntervalInMills()); bkConf.setGetBookieInfoIntervalSeconds( conf.getBookkeeperClientGetBookieInfoIntervalSeconds(), TimeUnit.SECONDS); bkConf.setGetBookieInfoRetryIntervalSeconds( conf.getBookkeeperClientGetBookieInfoRetryIntervalSeconds(), TimeUnit.SECONDS); bkConf.setNumIOThreads(conf.getBookkeeperClientNumIoThreads()); PropertiesUtils.filterAndMapProperties(conf.getProperties(), "bookkeeper_") .forEach((key, value) -> { log.info("Applying BookKeeper client configuration setting {}={}", key, value); bkConf.setProperty(key, value); }); return bkConf; }
@Test public void testOpportunisticStripingConfiguration() { BookKeeperClientFactoryImpl factory = new BookKeeperClientFactoryImpl(); ServiceConfiguration conf = new ServiceConfiguration(); // default value assertFalse(factory.createBkClientConfiguration(mock(MetadataStoreExtended.class), conf) .getOpportunisticStriping()); conf.getProperties().setProperty("bookkeeper_opportunisticStriping", "true"); assertTrue(factory.createBkClientConfiguration(mock(MetadataStoreExtended.class), conf) .getOpportunisticStriping()); conf.getProperties().setProperty("bookkeeper_opportunisticStriping", "false"); assertFalse(factory.createBkClientConfiguration(mock(MetadataStoreExtended.class), conf) .getOpportunisticStriping()); }
@Override public String decrypt(final String key, final byte[] encryptData) throws Exception { byte[] decoded = Base64.getDecoder().decode(key); PrivateKey priKey = KeyFactory.getInstance(RSA).generatePrivate(new PKCS8EncodedKeySpec(decoded)); Cipher cipher = Cipher.getInstance(RSA); cipher.init(Cipher.DECRYPT_MODE, priKey); return new String(cipher.doFinal(encryptData)); }
@Test public void testDecrypt() throws Exception { assertThat(cryptorStrategy.decrypt(decKey, encryptedData), is(decryptedData)); }
public static MilliPct ofPercent(float value) { return new MilliPct(Float.valueOf(value * 1000f).intValue()); }
@Test public void testOfPercent() { assertEquals(63563, MilliPct.ofPercent(63.563f).intValue()); assertEquals(-63563, MilliPct.ofPercent(-63.563f).intValue()); }
@Override public String doSharding(final Collection<String> availableTargetNames, final PreciseShardingValue<Comparable<?>> shardingValue) { ShardingSpherePreconditions.checkNotNull(shardingValue.getValue(), NullShardingValueException::new); String columnName = shardingValue.getColumnName(); ShardingSpherePreconditions.checkState(algorithmExpression.contains(columnName), () -> new MismatchedInlineShardingAlgorithmExpressionAndColumnException(algorithmExpression, columnName)); try { return InlineExpressionParserFactory.newInstance(algorithmExpression).evaluateWithArgs(Collections.singletonMap(columnName, shardingValue.getValue())); } catch (final MissingMethodException ignored) { throw new MismatchedInlineShardingAlgorithmExpressionAndColumnException(algorithmExpression, columnName); } }
@Test void assertDoShardingWithLargeValues() { List<String> availableTargetNames = Lists.newArrayList("t_order_0", "t_order_1", "t_order_2", "t_order_3"); assertThat(inlineShardingAlgorithm.doSharding(availableTargetNames, new PreciseShardingValue<>("t_order", "order_id", DATA_NODE_INFO, 787694822390497280L)), is("t_order_0")); assertThat(inlineShardingAlgorithm.doSharding(availableTargetNames, new PreciseShardingValue<>("t_order", "order_id", DATA_NODE_INFO, new BigInteger("787694822390497280787694822390497280"))), is("t_order_0")); assertThat(inlineShardingAlgorithmWithSimplified.doSharding(availableTargetNames, new PreciseShardingValue<>("t_order", "order_id", DATA_NODE_INFO, 787694822390497280L)), is("t_order_0")); assertThat(inlineShardingAlgorithmWithSimplified.doSharding(availableTargetNames, new PreciseShardingValue<>("t_order", "order_id", DATA_NODE_INFO, new BigInteger("787694822390497280787694822390497280"))), is("t_order_0")); }
@Override public T deserialize(final String topic, final byte[] bytes) { try { if (bytes == null) { return null; } // don't use the JsonSchemaConverter to read this data because // we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS, // which is not currently available in the standard converters final JsonNode value = isJsonSchema ? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class) : MAPPER.readTree(bytes); final Object coerced = enforceFieldType( "$", new JsonValueContext(value, schema) ); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } return SerdeUtils.castToTargetType(coerced, targetType); } catch (final Exception e) { // Clear location in order to avoid logging data, for security reasons if (e instanceof JsonParseException) { ((JsonParseException) e).clearLocation(); } throw new SerializationException( "Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e); } }
@Test public void shouldDeserializeDecimalsWithoutStrippingTrailingZeros() { // Given: final KsqlJsonDeserializer<BigDecimal> deserializer = givenDeserializerForSchema(DecimalUtil.builder(3, 1).build(), BigDecimal.class); final byte[] bytes = addMagic("10.0".getBytes(UTF_8)); // When: final Object result = deserializer.deserialize(SOME_TOPIC, bytes); // Then: assertThat(result, is(new BigDecimal("10.0"))); }
@Override public StatusOutputStream<Node> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final ObjectReader reader = session.getClient().getJSON().getContext(null).readerFor(FileKey.class); if(log.isDebugEnabled()) { log.debug(String.format("Read file key for file %s", file)); } if(null == status.getFilekey()) { status.setFilekey(SDSTripleCryptEncryptorFeature.generateFileKey()); } final FileKey fileKey = reader.readValue(status.getFilekey().array()); return new TripleCryptEncryptingOutputStream(session, nodeid, proxy.write(file, status, callback), Crypto.createFileEncryptionCipher(TripleCryptConverter.toCryptoPlainFileKey(fileKey)), status ); } catch(CryptoSystemException | UnknownVersionException e) { throw new TripleCryptExceptionMappingService().map("Upload {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file); } }
@Test public void testWrite() throws Exception { final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session); final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(new Path( new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final EncryptRoomRequest encrypt = new EncryptRoomRequest().isEncrypted(true); new NodesApi(session.getClient()).encryptRoom(encrypt, Long.parseLong(new SDSNodeIdProvider(session).getVersionId(room)), StringUtils.EMPTY, null); room.attributes().withCustom(KEY_ENCRYPTED, String.valueOf(true)); final TripleCryptWriteFeature writer = new TripleCryptWriteFeature(session, nodeid, new SDSDirectS3MultipartWriteFeature(session, nodeid)); final byte[] content = RandomUtils.nextBytes(32769); final TransferStatus status = new TransferStatus(); status.setLength(content.length); status.setChecksum(new MD5ChecksumCompute().compute(new ByteArrayInputStream(content), new TransferStatus())); final Path test = new Path(room, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final SDSEncryptionBulkFeature bulk = new SDSEncryptionBulkFeature(session, nodeid); bulk.pre(Transfer.Type.upload, Collections.singletonMap(new TransferItem(test), status), new DisabledConnectionCallback()); final StatusOutputStream<Node> out = writer.write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out); assertNotNull(test.attributes().getVersionId()); assertTrue(new DefaultFindFeature(session).find(test)); assertEquals(content.length, new SDSAttributesFinderFeature(session, nodeid).find(test).getSize()); final byte[] compare = new byte[content.length]; final InputStream stream = new TripleCryptReadFeature(session, nodeid, new SDSReadFeature(session, nodeid)).read(test, new TransferStatus(), new DisabledConnectionCallback() { @Override public void warn(final Host bookmark, final String title, final String message, final String defaultButton, final String cancelButton, final String preference) { // } @Override public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) { return new VaultCredentials("eth[oh8uv4Eesij"); } }); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public RemotingCommand processRequest(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { switch (request.getCode()) { case RequestCode.GET_CONSUMER_LIST_BY_GROUP: return this.getConsumerListByGroup(ctx, request); case RequestCode.UPDATE_CONSUMER_OFFSET: return this.updateConsumerOffset(ctx, request); case RequestCode.QUERY_CONSUMER_OFFSET: return this.queryConsumerOffset(ctx, request); default: break; } return null; }
@Test public void testUpdateConsumerOffset_GroupNotExist() throws Exception { RemotingCommand request = buildUpdateConsumerOffsetRequest("NotExistGroup", topic, 0, 0); RemotingCommand response = consumerManageProcessor.processRequest(handlerContext, request); assertThat(response).isNotNull(); assertThat(response.getCode()).isEqualTo(ResponseCode.SUBSCRIPTION_GROUP_NOT_EXIST); }
Converter<E> compile() { head = tail = null; for (Node n = top; n != null; n = n.next) { switch (n.type) { case Node.LITERAL: addToList(new LiteralConverter<E>((String) n.getValue())); break; case Node.COMPOSITE_KEYWORD: CompositeNode cn = (CompositeNode) n; CompositeConverter<E> compositeConverter = createCompositeConverter(cn); if (compositeConverter == null) { addError("Failed to create converter for [%" + cn.getValue() + "] keyword"); addToList(new LiteralConverter<E>("%PARSER_ERROR[" + cn.getValue() + "]")); break; } compositeConverter.setFormattingInfo(cn.getFormatInfo()); compositeConverter.setOptionList(cn.getOptions()); Compiler<E> childCompiler = new Compiler<E>(cn.getChildNode(), converterMap); childCompiler.setContext(context); Converter<E> childConverter = childCompiler.compile(); compositeConverter.setChildConverter(childConverter); addToList(compositeConverter); break; case Node.SIMPLE_KEYWORD: SimpleKeywordNode kn = (SimpleKeywordNode) n; DynamicConverter<E> dynaConverter = createConverter(kn); if (dynaConverter != null) { dynaConverter.setFormattingInfo(kn.getFormatInfo()); dynaConverter.setOptionList(kn.getOptions()); addToList(dynaConverter); } else { // if the appropriate dynaconverter cannot be found, then replace // it with a dummy LiteralConverter indicating an error. Converter<E> errConveter = new LiteralConverter<E>("%PARSER_ERROR[" + kn.getValue() + "]"); addStatus(new ErrorStatus("[" + kn.getValue() + "] is not a valid conversion word", this)); addToList(errConveter); } } } return head; }
@Test public void testComposite() throws Exception { // { // Parser<Object> p = new Parser<Object>("%(ABC)"); // p.setContext(context); // Node t = p.parse(); // Converter<Object> head = p.compile(t, converterMap); // String result = write(head, new Object()); // assertEquals("ABC", result); // } { Context c = new ContextBase(); Parser<Object> p = new Parser<Object>("%(ABC %hello)"); p.setContext(c); Node t = p.parse(); Converter<Object> head = p.compile(t, converterMap); String result = write(head, new Object()); // StatusPrinter.print(c); assertEquals("ABC Hello", result); } { Parser<Object> p = new Parser<Object>("%(ABC %hello)"); p.setContext(context); Node t = p.parse(); Converter<Object> head = p.compile(t, converterMap); String result = write(head, new Object()); assertEquals("ABC Hello", result); } }
@Subscribe public void onChatMessage(ChatMessage event) { if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM) { String message = Text.removeTags(event.getMessage()); Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message); Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message); Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message); Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message); Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message); Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message); Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message); Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message); Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message); Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message); Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message); Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message); Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message); Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message); Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message); Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message); Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message); Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message); Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message); Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message); if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE)) { notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered"); } else if (dodgyBreakMatcher.find()) { notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust."); updateDodgyNecklaceCharges(MAX_DODGY_CHARGES); } else if (dodgyCheckMatcher.find()) { updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1))); } else if (dodgyProtectMatcher.find()) { updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1))); } else if (amuletOfChemistryCheckMatcher.find()) { updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1))); } else if (amuletOfChemistryUsedMatcher.find()) { final String match = amuletOfChemistryUsedMatcher.group(1); int charges = 1; if (!match.equals("one")) { charges = Integer.parseInt(match); } updateAmuletOfChemistryCharges(charges); } else if (amuletOfChemistryBreakMatcher.find()) { notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust."); updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES); } else if (amuletOfBountyCheckMatcher.find()) { updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1))); } else if (amuletOfBountyUsedMatcher.find()) { updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1))); } else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT)) { updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES); } else if (message.contains(BINDING_BREAK_TEXT)) { notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT); // This chat message triggers before the used message so add 1 to the max charges to ensure proper sync updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1); } else if (bindingNecklaceUsedMatcher.find()) { final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT); if (equipment.contains(ItemID.BINDING_NECKLACE)) { updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1); } } else if (bindingNecklaceCheckMatcher.find()) { final String match = bindingNecklaceCheckMatcher.group(1); int charges = 1; if (!match.equals("one")) { charges = Integer.parseInt(match); } updateBindingNecklaceCharges(charges); } else if (ringOfForgingCheckMatcher.find()) { final String match = ringOfForgingCheckMatcher.group(1); int charges = 1; if (!match.equals("one")) { charges = Integer.parseInt(match); } updateRingOfForgingCharges(charges); } else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY)) { final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY); final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT); // Determine if the player smelted with a Ring of Forging equipped. if (equipment == null) { return; } if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1)) { int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES); updateRingOfForgingCharges(charges); } } else if (message.equals(RING_OF_FORGING_BREAK_TEXT)) { notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted."); // This chat message triggers before the used message so add 1 to the max charges to ensure proper sync updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1); } else if (chronicleAddMatcher.find()) { final String match = chronicleAddMatcher.group(1); if (match.equals("one")) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1); } else { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match)); } } else if (chronicleUseAndCheckMatcher.find()) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1))); } else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT)) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1); } else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT)) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0); } else if (message.equals(CHRONICLE_FULL_TEXT)) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000); } else if (slaughterActivateMatcher.find()) { final String found = slaughterActivateMatcher.group(1); if (found == null) { updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES); notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT); } else { updateBraceletOfSlaughterCharges(Integer.parseInt(found)); } } else if (slaughterCheckMatcher.find()) { updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1))); } else if (expeditiousActivateMatcher.find()) { final String found = expeditiousActivateMatcher.group(1); if (found == null) { updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES); notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT); } else { updateExpeditiousBraceletCharges(Integer.parseInt(found)); } } else if (expeditiousCheckMatcher.find()) { updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1))); } else if (bloodEssenceCheckMatcher.find()) { updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1))); } else if (bloodEssenceExtractMatcher.find()) { updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1))); } else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT)) { updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES); } else if (braceletOfClayCheckMatcher.find()) { updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1))); } else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN)) { final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT); // Determine if the player mined with a Bracelet of Clay equipped. if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY)) { final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY); // Charge is not used if only 1 inventory slot is available when mining in Prifddinas boolean ignore = inventory != null && inventory.count() == 27 && message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN); if (!ignore) { int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES); updateBraceletOfClayCharges(charges); } } } else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT)) { notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust"); updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES); } } }
@Test public void testSlaughterRegenerate() { ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", REGENERATE_BRACELET_OF_SLAUGHTER, "", 0); itemChargePlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration(ItemChargeConfig.GROUP, ItemChargeConfig.KEY_BRACELET_OF_SLAUGHTER, 30); }
public boolean isMulticast() { return isIp4() ? IPV4_MULTICAST_PREFIX.contains(this.getIp4Prefix()) : IPV6_MULTICAST_PREFIX.contains(this.getIp6Prefix()); }
@Test public void testIsMulticast() { IpPrefix v4Unicast = IpPrefix.valueOf("10.0.0.1/16"); IpPrefix v4Multicast = IpPrefix.valueOf("224.0.0.1/4"); IpPrefix v4Overlap = IpPrefix.valueOf("192.0.0.0/2"); IpPrefix v6Unicast = IpPrefix.valueOf("1000::1/8"); IpPrefix v6Multicast = IpPrefix.valueOf("ff02::1/8"); IpPrefix v6Overlap = IpPrefix.valueOf("ff00::1/4"); assertFalse(v4Unicast.isMulticast()); assertTrue(v4Multicast.isMulticast()); assertFalse(v4Overlap.isMulticast()); assertFalse(v6Unicast.isMulticast()); assertTrue(v6Multicast.isMulticast()); assertFalse(v6Overlap.isMulticast()); }
@InvokeOnHeader(Web3jConstants.ETH_SYNCING) void ethSyncing(Message message) throws IOException { Request<?, EthSyncing> request = web3j.ethSyncing(); setRequestId(message, request); EthSyncing response = request.send(); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.isSyncing()); } }
@Test public void ethSyncingTest() throws Exception { EthSyncing response = Mockito.mock(EthSyncing.class); Mockito.when(mockWeb3j.ethSyncing()).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Mockito.when(response.isSyncing()).thenReturn(Boolean.TRUE); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_SYNCING); template.send(exchange); Boolean body = exchange.getIn().getBody(Boolean.class); assertTrue(body); }
public Domain intersect(Domain other) { checkCompatibility(other); return new Domain(values.intersect(other.getValues()), this.isNullAllowed() && other.isNullAllowed()); }
@Test public void testIntersect() { assertEquals( Domain.all(BIGINT).intersect(Domain.all(BIGINT)), Domain.all(BIGINT)); assertEquals( Domain.none(BIGINT).intersect(Domain.none(BIGINT)), Domain.none(BIGINT)); assertEquals( Domain.all(BIGINT).intersect(Domain.none(BIGINT)), Domain.none(BIGINT)); assertEquals( Domain.notNull(BIGINT).intersect(Domain.onlyNull(BIGINT)), Domain.none(BIGINT)); assertEquals( Domain.singleValue(BIGINT, 0L).intersect(Domain.all(BIGINT)), Domain.singleValue(BIGINT, 0L)); assertEquals( Domain.singleValue(BIGINT, 0L).intersect(Domain.onlyNull(BIGINT)), Domain.none(BIGINT)); assertEquals( Domain.create(ValueSet.ofRanges(Range.equal(BIGINT, 1L)), true).intersect(Domain.create(ValueSet.ofRanges(Range.equal(BIGINT, 2L)), true)), Domain.onlyNull(BIGINT)); assertEquals( Domain.create(ValueSet.ofRanges(Range.equal(BIGINT, 1L)), true).intersect(Domain.create(ValueSet.ofRanges(Range.equal(BIGINT, 1L), Range.equal(BIGINT, 2L)), false)), Domain.singleValue(BIGINT, 1L)); }
@Nonnull public static <C> SourceBuilder<C>.Batch<Void> batch( @Nonnull String name, @Nonnull FunctionEx<? super Processor.Context, ? extends C> createFn ) { return new SourceBuilder<C>(name, createFn).new Batch<>(); }
@Test public void stream_socketSource_distributed() throws IOException { // Given try (ServerSocket serverSocket = new ServerSocket(0)) { startServer(serverSocket); // When int localPort = serverSocket.getLocalPort(); BatchSource<String> socketSource = SourceBuilder .batch("distributed-socket-source", ctx -> socketReader(localPort)) .<String>fillBufferFn((in, buf) -> { String line = in.readLine(); if (line != null) { buf.add(line); } else { buf.close(); } }) .destroyFn(BufferedReader::close) .distributed(PREFERRED_LOCAL_PARALLELISM) .build(); // Then Pipeline p = Pipeline.create(); p.readFrom(socketSource) .writeTo(sinkList()); hz().getJet().newJob(p).join(); Map<String, Integer> expected = IntStream.range(0, itemCount) .boxed() .collect(Collectors.toMap(i -> "line" + i, i -> PREFERRED_LOCAL_PARALLELISM * MEMBER_COUNT)); assertEquals(expected, sinkToBag()); } }
public static String toJson(Object obj) { try { return mapper.writeValueAsString(obj); } catch (JsonProcessingException e) { throw new NacosSerializationException(obj.getClass(), e); } }
@Test void testToJson1() { assertEquals("null", JacksonUtils.toJson(null)); assertEquals("\"string\"", JacksonUtils.toJson("string")); assertEquals("30", JacksonUtils.toJson(new BigDecimal(30))); assertEquals("{\"key\":\"value\"}", JacksonUtils.toJson(Collections.singletonMap("key", "value"))); assertEquals("[{\"key\":\"value\"}]", JacksonUtils.toJson(Collections.singletonList(Collections.singletonMap("key", "value")))); assertEquals("{\"aLong\":0,\"aInteger\":1,\"aBoolean\":false}", JacksonUtils.toJson(new TestOfAtomicObject())); assertEquals("{\"date\":1626192000000}", JacksonUtils.toJson(new TestOfDate())); // only public assertEquals("{\"publicAccessModifier\":\"public\"}", JacksonUtils.toJson(new TestOfAccessModifier())); // getter is also recognized assertEquals("{\"value\":\"value\",\"key\":\"key\"}", JacksonUtils.toJson(new TestOfGetter())); // annotation available assertEquals( "{\"@type\":\"JacksonUtilsTest$TestOfAnnotationSub\",\"date\":\"2021-07-14\",\"subField\":\"subField\"," + "\"camelCase\":\"value\"}", JacksonUtils.toJson(new TestOfAnnotationSub())); }
public static double auc(double[] x, double[] y) { if (x.length != y.length) { throw new IllegalArgumentException("x and y must be the same length, x.length = " + x.length + ", y.length = " + y.length); } double output = 0.0; for (int i = 1; i < x.length; i++) { double ySum = y[i] + y[i-1]; double xDiff = x[i] - x[i-1]; if (xDiff < -1e-12) { throw new IllegalStateException(String.format("X is not increasing, x[%d]=%f, x[%d]=%f",i,x[i],i-1,x[i-1])); } output += (ySum * xDiff) / 2.0; } return output; }
@Test public void testAUC() { double output; try { output = Util.auc(new double[]{0.0,1.0},new double[]{0.0,1.0,2.0}); fail("Exception not thrown for mismatched lengths."); } catch (IllegalArgumentException e) { } try { output = Util.auc(new double[]{0.0,1.0,2.0,1.5,3.0}, new double[]{1.0,1.0,1.0,1.0,1.0}); fail("Exception not thrown for non-increasing x."); } catch (IllegalStateException e) { } output = Util.auc(new double[]{4,6,8},new double[]{1,2,3}); assertEquals(8.0,output,DELTA); output = Util.auc(new double[]{0,1},new double[]{0,1}); assertEquals(0.5,output,DELTA); output = Util.auc(new double[]{0,0,1},new double[]{1,1,0}); assertEquals(0.5,output,DELTA); output = Util.auc(new double[]{0,1},new double[]{1,1}); assertEquals(1,output,DELTA); output = Util.auc(new double[]{0,0.5,1},new double[]{0,0.5,1}); assertEquals(0.5,output,DELTA); }
@Override @CacheEvict(cacheNames = RedisKeyConstants.NOTIFY_TEMPLATE, allEntries = true) // allEntries 清空所有缓存,因为可能修改到 code 字段,不好清理 public void updateNotifyTemplate(NotifyTemplateSaveReqVO updateReqVO) { // 校验存在 validateNotifyTemplateExists(updateReqVO.getId()); // 校验站内信编码是否重复 validateNotifyTemplateCodeDuplicate(updateReqVO.getId(), updateReqVO.getCode()); // 更新 NotifyTemplateDO updateObj = BeanUtils.toBean(updateReqVO, NotifyTemplateDO.class); updateObj.setParams(parseTemplateContentParams(updateObj.getContent())); notifyTemplateMapper.updateById(updateObj); }
@Test public void testUpdateNotifyTemplate_notExists() { // 准备参数 NotifyTemplateSaveReqVO reqVO = randomPojo(NotifyTemplateSaveReqVO.class); // 调用, 并断言异常 assertServiceException(() -> notifyTemplateService.updateNotifyTemplate(reqVO), NOTIFY_TEMPLATE_NOT_EXISTS); }
@Override public boolean process(Set<? extends TypeElement> annotations, RoundEnvironment roundEnv) { var triggers = annotations.stream() .filter(te -> { for (var trigger : KoraSchedulingAnnotationProcessor.triggers) { if (te.getQualifiedName().contentEquals(trigger.canonicalName())) { return true; } } return false; }) .toArray(TypeElement[]::new); var scheduledMethods = roundEnv.getElementsAnnotatedWithAny(triggers); var scheduledTypes = scheduledMethods.stream().collect(Collectors.groupingBy(e -> { var type = (TypeElement) e.getEnclosingElement(); return type.getQualifiedName().toString(); })); for (var entry : scheduledTypes.entrySet()) { var methods = entry.getValue(); var type = (TypeElement) entry.getValue().get(0).getEnclosingElement(); try { this.generateModule(type, methods); } catch (ProcessingErrorException e) { e.printError(this.processingEnv); } catch (IOException e) { throw new RuntimeException(e); } // todo exceptions } return false; }
@Test void testScheduledJdkAtFixedDelayTest() throws Exception { process(ScheduledJdkAtFixedDelayTest.class); }
@GetInitialRestriction public OffsetRange initialRestriction(@Element KafkaSourceDescriptor kafkaSourceDescriptor) { Map<String, Object> updatedConsumerConfig = overrideBootstrapServersConfig(consumerConfig, kafkaSourceDescriptor); TopicPartition partition = kafkaSourceDescriptor.getTopicPartition(); LOG.info("Creating Kafka consumer for initial restriction for {}", partition); try (Consumer<byte[], byte[]> offsetConsumer = consumerFactoryFn.apply(updatedConsumerConfig)) { ConsumerSpEL.evaluateAssign(offsetConsumer, ImmutableList.of(partition)); long startOffset; @Nullable Instant startReadTime = kafkaSourceDescriptor.getStartReadTime(); if (kafkaSourceDescriptor.getStartReadOffset() != null) { startOffset = kafkaSourceDescriptor.getStartReadOffset(); } else if (startReadTime != null) { startOffset = ConsumerSpEL.offsetForTime(offsetConsumer, partition, startReadTime); } else { startOffset = offsetConsumer.position(partition); } long endOffset = Long.MAX_VALUE; @Nullable Instant stopReadTime = kafkaSourceDescriptor.getStopReadTime(); if (kafkaSourceDescriptor.getStopReadOffset() != null) { endOffset = kafkaSourceDescriptor.getStopReadOffset(); } else if (stopReadTime != null) { endOffset = ConsumerSpEL.offsetForTime(offsetConsumer, partition, stopReadTime); } new OffsetRange(startOffset, endOffset); Lineage.getSources() .add( "kafka", ImmutableList.of( (String) updatedConsumerConfig.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG), MoreObjects.firstNonNull(kafkaSourceDescriptor.getTopic(), partition.topic()))); return new OffsetRange(startOffset, endOffset); } }
@Test public void testInitialRestrictionWithException() throws Exception { thrown.expect(KafkaException.class); thrown.expectMessage("PositionException"); exceptionDofnInstance.initialRestriction( KafkaSourceDescriptor.of(topicPartition, null, null, null, null, ImmutableList.of())); }
public static Builder builder() { return new Builder(); }
@Test void testJsonDeserializeAndBuilder() throws Exception { String json = "{" + "\"service_id\":\"custom-service\"," + "\"max_tokens\":512," + "\"temperature\":0.8," + "\"top_p\":0.5," + "\"presence_penalty\":0.2," + "\"frequency_penalty\":0.3," + "\"best_of\":3," + "\"results_per_prompt\":5," + "\"model_id\":\"custom-model\"," + "\"user\":\"custom-user\"" + "}"; PromptExecutionSettings settingsFromJson = new ObjectMapper().readValue(json, PromptExecutionSettings.class); PromptExecutionSettings settingsFromBuilder = PromptExecutionSettings.builder() .withServiceId("custom-service") .withMaxTokens(512) .withTemperature(0.8) .withTopP(0.5) .withPresencePenalty(0.2) .withFrequencyPenalty(0.3) .withBestOf(3) .withResultsPerPrompt(5) .withModelId("custom-model") .withUser("custom-user") .build(); assertEquals(settingsFromBuilder, settingsFromJson); }
@Override public int hashCode() { int result = Objects.hash(method, paramDesc, returnClass, methodName, generic, attributeMap); result = 31 * result + Arrays.hashCode(compatibleParamSignatures); result = 31 * result + Arrays.hashCode(parameterClasses); result = 31 * result + Arrays.hashCode(returnTypes); return result; }
@Test void testHashCode() { try { MethodDescriptor method2 = new ReflectionMethodDescriptor(DemoService.class.getDeclaredMethod("sayHello", String.class)); method.addAttribute("attr", "attr"); method2.addAttribute("attr", "attr"); Assertions.assertEquals(method.hashCode(), method2.hashCode()); } catch (NoSuchMethodException e) { throw new IllegalStateException(e); } }
public ExecutorConfig setPoolSize(final int poolSize) { if (poolSize <= 0) { throw new IllegalArgumentException("poolSize must be positive"); } this.poolSize = poolSize; return this; }
@Test(expected = IllegalArgumentException.class) public void shouldNotAcceptNegativeCorePoolSize() { new ExecutorConfig().setPoolSize(-1); }
public static void main(String[] args) { /* set up */ var charProto = new Character(); charProto.set(Stats.STRENGTH, 10); charProto.set(Stats.AGILITY, 10); charProto.set(Stats.ARMOR, 10); charProto.set(Stats.ATTACK_POWER, 10); var mageProto = new Character(Type.MAGE, charProto); mageProto.set(Stats.INTELLECT, 15); mageProto.set(Stats.SPIRIT, 10); var warProto = new Character(Type.WARRIOR, charProto); warProto.set(Stats.RAGE, 15); warProto.set(Stats.ARMOR, 15); // boost default armor for warrior var rogueProto = new Character(Type.ROGUE, charProto); rogueProto.set(Stats.ENERGY, 15); rogueProto.set(Stats.AGILITY, 15); // boost default agility for rogue /* usage */ var mag = new Character("Player_1", mageProto); mag.set(Stats.ARMOR, 8); LOGGER.info(mag.toString()); var warrior = new Character("Player_2", warProto); LOGGER.info(warrior.toString()); var rogue = new Character("Player_3", rogueProto); LOGGER.info(rogue.toString()); var rogueDouble = new Character("Player_4", rogue); rogueDouble.set(Stats.ATTACK_POWER, 12); LOGGER.info(rogueDouble.toString()); }
@Test void shouldExecuteWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
public Page<InstanceConfig> findActiveInstanceConfigsByReleaseKey(String releaseKey, Pageable pageable) { return instanceConfigRepository.findByReleaseKeyAndDataChangeLastModifiedTimeAfter(releaseKey, getValidInstanceConfigDate(), pageable); }
@Test @Rollback public void testFindActiveInstanceConfigs() throws Exception { long someInstanceId = 1; long anotherInstanceId = 2; String someConfigAppId = "someConfigAppId"; String someConfigClusterName = "someConfigClusterName"; String someConfigNamespaceName = "someConfigNamespaceName"; Date someValidDate = new Date(); Pageable pageable = PageRequest.of(0, 10); String someReleaseKey = "someReleaseKey"; Calendar calendar = Calendar.getInstance(); calendar.add(Calendar.DATE, -2); Date someInvalidDate = calendar.getTime(); prepareInstanceConfigForInstance(someInstanceId, someConfigAppId, someConfigClusterName, someConfigNamespaceName, someReleaseKey, someValidDate); prepareInstanceConfigForInstance(anotherInstanceId, someConfigAppId, someConfigClusterName, someConfigNamespaceName, someReleaseKey, someInvalidDate); Page<InstanceConfig> validInstanceConfigs = instanceService .findActiveInstanceConfigsByReleaseKey(someReleaseKey, pageable); assertEquals(1, validInstanceConfigs.getContent().size()); assertEquals(someInstanceId, validInstanceConfigs.getContent().get(0).getInstanceId()); }
protected EventValuesWithLog extractEventParametersWithLog(Event event, Log log) { return staticExtractEventParametersWithLog(event, log); }
@Test public void testExtractEventParametersWithLogGivenATransactionReceipt() { final java.util.function.Function<String, Event> eventFactory = name -> new Event(name, emptyList()); final BiFunction<Integer, Event, Log> logFactory = (logIndex, event) -> new Log( false, "" + logIndex, "0", "0x0", "0x0", "0", "0x" + logIndex, "", "", singletonList(EventEncoder.encode(event))); final Event testEvent1 = eventFactory.apply("TestEvent1"); final Event testEvent2 = eventFactory.apply("TestEvent2"); final List<Log> logs = Arrays.asList(logFactory.apply(0, testEvent1), logFactory.apply(1, testEvent2)); final TransactionReceipt transactionReceipt = new TransactionReceipt(); transactionReceipt.setLogs(logs); final List<Contract.EventValuesWithLog> eventValuesWithLogs1 = contract.extractEventParametersWithLog(testEvent1, transactionReceipt); assertEquals(eventValuesWithLogs1.size(), 1); assertEquals(eventValuesWithLogs1.get(0).getLog(), logs.get(0)); final List<Contract.EventValuesWithLog> eventValuesWithLogs2 = contract.extractEventParametersWithLog(testEvent2, transactionReceipt); assertEquals(eventValuesWithLogs2.size(), 1); assertEquals(eventValuesWithLogs2.get(0).getLog(), logs.get(1)); }
public Iterator<TrainTestFold<T>> split(Dataset<T> dataset, boolean shuffle) { int nsamples = dataset.size(); if (nsamples == 0) { throw new IllegalArgumentException("empty input data"); } if (nsplits > nsamples) { throw new IllegalArgumentException("cannot have nsplits > nsamples"); } int[] indices; if (shuffle) { indices = Util.randperm(nsamples,rng); } else { indices = IntStream.range(0, nsamples).toArray(); } int[] foldSizes = new int[nsplits]; Arrays.fill(foldSizes, nsamples/nsplits); for (int i = 0; i < (nsamples%nsplits); i++) { foldSizes[i] += 1; } return new Iterator<TrainTestFold<T>>() { int foldPtr = 0; int dataPtr = 0; @Override public boolean hasNext() { return foldPtr < foldSizes.length; } @Override public TrainTestFold<T> next() { int size = foldSizes[foldPtr]; foldPtr++; int start = dataPtr; int stop = dataPtr+size; dataPtr = stop; int[] holdOut = Arrays.copyOfRange(indices, start, stop); int[] rest = new int[indices.length - holdOut.length]; System.arraycopy(indices, 0, rest, 0, start); System.arraycopy(indices, stop, rest, start, nsamples-stop); return new TrainTestFold<>( new DatasetView<>(dataset, rest, "TrainFold(seed="+seed+","+foldPtr+" of " + nsplits+")"), new DatasetView<>(dataset, holdOut, "TestFold(seed="+seed+","+foldPtr+" of " + nsplits+")" ) ); } }; }
@Test public void testKFolderTwoSplits() { Dataset<MockOutput> data = getData(50); KFoldSplitter<MockOutput> kf = new KFoldSplitter<>(2,1); Iterator<KFoldSplitter.TrainTestFold<MockOutput>> itr = kf.split(data,false); while (itr.hasNext()) { KFoldSplitter.TrainTestFold<MockOutput> fold = itr.next(); assertFalse(Arrays.equals(fold.train.getExampleIndices(),fold.test.getExampleIndices())); } }
public Map<String, Long> getFreeBytesOnTiers() { Map<String, Long> freeCapacityBytes = new HashMap<>(); for (Map.Entry<String, Long> entry : mUsage.mTotalBytesOnTiers.entrySet()) { freeCapacityBytes.put(entry.getKey(), entry.getValue() - mUsage.mUsedBytesOnTiers.get(entry.getKey())); } return freeCapacityBytes; }
@Test public void getFreeBytesOnTiers() { assertEquals(ImmutableMap.of(Constants.MEDIUM_MEM, Constants.KB * 2L, Constants.MEDIUM_SSD, Constants.KB * 2L), mInfo.getFreeBytesOnTiers()); }
public static boolean isTableNode(final String path) { return Pattern.compile(getMetaDataNode() + TABLES_PATTERN + TABLE_SUFFIX, Pattern.CASE_INSENSITIVE).matcher(path).find(); }
@Test void assertIsTableNode() { assertTrue(TableMetaDataNode.isTableNode("/metadata/foo_db/schemas/foo_schema/tables/foo_table")); }
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { if (!(request instanceof HttpServletRequest)) { super.doFilter(request, response, chain); return; } final HttpServletRequest httpRequest = (HttpServletRequest) request; registerSessionIfNeeded(httpRequest); super.doFilter(request, response, chain); // si logout on prend en compte de suite la destruction de la session unregisterSessionIfNeeded(httpRequest); }
@Test public void testNoHttp() throws IOException, ServletException { final ServletRequest request = createNiceMock(ServletRequest.class); final ServletResponse response = createNiceMock(ServletResponse.class); final FilterChain chain = createNiceMock(FilterChain.class); replay(request); replay(response); replay(chain); pluginMonitoringFilter.doFilter(request, response, chain); verify(request); verify(response); verify(chain); }
static void clusterIdCommand(PrintStream stream, Admin adminClient) throws Exception { String clusterId = adminClient.describeCluster().clusterId().get(); if (clusterId != null) { stream.println("Cluster ID: " + clusterId); } else { stream.println("No cluster ID found. The Kafka version is probably too old."); } }
@Test public void testPrintClusterId() throws Exception { Admin adminClient = new MockAdminClient.Builder(). clusterId("QtNwvtfVQ3GEFpzOmDEE-w"). build(); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ClusterTool.clusterIdCommand(new PrintStream(stream), adminClient); assertEquals("Cluster ID: QtNwvtfVQ3GEFpzOmDEE-w\n", stream.toString()); }
public Statement buildStatement(final ParserRuleContext parseTree) { return build(Optional.of(getSources(parseTree)), parseTree); }
@Test public void shouldFailOnPersistentQueryLimitClauseStream() { // Given: final SingleStatementContext stmt = givenQuery("CREATE STREAM X AS SELECT * FROM TEST1 LIMIT 5;"); // Then: Exception exception = assertThrows(KsqlException.class, () -> { builder.buildStatement(stmt); }); String expectedMessage = "CREATE STREAM AS SELECT statements don't support LIMIT clause."; String actualMessage = exception.getMessage(); assertEquals(expectedMessage, actualMessage); }
@NonNull public List<VFSConnectionDetails> getAllDetails( @NonNull ConnectionManager manager ) { return getProviders( manager ) .stream() .flatMap( provider -> { List<VFSConnectionDetails> providerDetails = provider.getConnectionDetails( manager ); return providerDetails != null ? providerDetails.stream() : null; } ) .collect( Collectors.toList() ); }
@Test public void testGetAllDetailsReturnsAllDetailsFromAllVFSProvidersFromManager() { ConnectionProvider<? extends ConnectionDetails> provider1 = (ConnectionProvider<? extends ConnectionDetails>) mock( VFSConnectionProvider.class ); ConnectionProvider<? extends ConnectionDetails> provider2 = (ConnectionProvider<? extends ConnectionDetails>) mock( VFSConnectionProvider.class ); ConnectionProvider<? extends ConnectionDetails> provider3 = (ConnectionProvider<? extends ConnectionDetails>) mock( VFSConnectionProvider.class ); doReturn( Arrays.asList( provider1, provider2, provider3 ) ) .when( connectionManager ) .getProvidersByType( VFSConnectionProvider.class ); VFSConnectionDetails details1 = mock( VFSConnectionDetails.class ); VFSConnectionDetails details2 = mock( VFSConnectionDetails.class ); VFSConnectionDetails details3 = mock( VFSConnectionDetails.class ); VFSConnectionDetails details4 = mock( VFSConnectionDetails.class ); doReturn( Arrays.asList( details3, details4 ) ).when( provider1 ).getConnectionDetails( connectionManager ); doReturn( null ).when( provider2 ).getConnectionDetails( connectionManager ); doReturn( Arrays.asList( details1, details2 ) ).when( provider3 ).getConnectionDetails( connectionManager ); List<VFSConnectionDetails> allDetails = vfsConnectionManagerHelper.getAllDetails( connectionManager ); assertNotNull( allDetails ); assertEquals( 4, allDetails.size() ); assertSame( details3, allDetails.get( 0 ) ); assertSame( details4, allDetails.get( 1 ) ); assertSame( details1, allDetails.get( 2 ) ); assertSame( details2, allDetails.get( 3 ) ); }
@Override public Boolean mSet(Map<byte[], byte[]> tuple) { if (isQueueing() || isPipelined()) { for (Entry<byte[], byte[]> entry: tuple.entrySet()) { write(entry.getKey(), StringCodec.INSTANCE, RedisCommands.SET, entry.getKey(), entry.getValue()); } return true; } CommandBatchService es = new CommandBatchService(executorService); for (Entry<byte[], byte[]> entry: tuple.entrySet()) { es.writeAsync(entry.getKey(), StringCodec.INSTANCE, RedisCommands.SET, entry.getKey(), entry.getValue()); } es.execute(); return true; }
@Test public void testMSet() { Map<byte[], byte[]> map = new HashMap<>(); for (int i = 0; i < 10; i++) { map.put(("test" + i).getBytes(), ("test" + i*100).getBytes()); } connection.mSet(map); for (Map.Entry<byte[], byte[]> entry : map.entrySet()) { assertThat(connection.get(entry.getKey())).isEqualTo(entry.getValue()); } }
@Override public Mono<byte[]> decrypt(byte[] encryptedMessage) { return Mono.just(this.keyPair) .map(KeyPair::getPrivate) .flatMap(privateKey -> { try { var cipher = Cipher.getInstance(TRANSFORMATION); cipher.init(Cipher.DECRYPT_MODE, privateKey); return Mono.just(cipher.doFinal(encryptedMessage)); } catch (NoSuchAlgorithmException | NoSuchPaddingException | InvalidKeyException e) { return Mono.error(new RuntimeException( "Failed to read private key or the key was invalid.", e )); } catch (IllegalBlockSizeException | BadPaddingException e) { return Mono.error(new InvalidEncryptedMessageException( "Invalid encrypted message." )); } }) .subscribeOn(Schedulers.boundedElastic()); }
@Test void shouldFailToDecryptMessage() { StepVerifier.create(service.decrypt("invalid-bytes".getBytes())) .verifyError(InvalidEncryptedMessageException.class); }
Map<String, String> execute(ServerWebExchange exchange, StainingRule stainingRule) { if (stainingRule == null) { return Collections.emptyMap(); } List<StainingRule.Rule> rules = stainingRule.getRules(); if (CollectionUtils.isEmpty(rules)) { return Collections.emptyMap(); } Map<String, String> parsedLabels = new HashMap<>(); for (StainingRule.Rule rule : rules) { List<Condition> conditions = rule.getConditions(); Set<String> keys = new HashSet<>(); conditions.forEach(condition -> keys.add(condition.getKey())); Map<String, String> actualValues = SpringWebExpressionLabelUtils.resolve(exchange, keys); if (!ConditionUtils.match(actualValues, conditions)) { continue; } parsedLabels.putAll(KVPairUtils.toMap(rule.getLabels())); } return parsedLabels; }
@Test public void testMatchCondition() { Condition condition1 = new Condition(); condition1.setKey("${http.header.uid}"); condition1.setOperation(Operation.EQUALS.toString()); condition1.setValues(Collections.singletonList("1000")); Condition condition2 = new Condition(); condition2.setKey("${http.query.source}"); condition2.setOperation(Operation.IN.toString()); condition2.setValues(Collections.singletonList("wx")); StainingRule.Rule rule = new StainingRule.Rule(); rule.setConditions(Arrays.asList(condition1, condition2)); KVPair kvPair = new KVPair(); kvPair.setKey("env"); kvPair.setValue("blue"); rule.setLabels(Collections.singletonList(kvPair)); StainingRule stainingRule = new StainingRule(); stainingRule.setRules(Collections.singletonList(rule)); MockServerHttpRequest request = MockServerHttpRequest.get("/users") .queryParam("source", "wx") .header("uid", "1000").build(); MockServerWebExchange exchange = new MockServerWebExchange.Builder(request).build(); RuleStainingExecutor executor = new RuleStainingExecutor(); Map<String, String> stainedLabels = executor.execute(exchange, stainingRule); assertThat(stainedLabels).isNotNull(); assertThat(stainedLabels.size()).isEqualTo(1); assertThat(stainedLabels.get("env")).isEqualTo("blue"); }
@Override public List<Plugin> plugins() { List<Plugin> plugins = configurationParameters.get(PLUGIN_PROPERTY_NAME, s -> Arrays.stream(s.split(",")) .map(String::trim) .map(PluginOption::parse) .map(pluginOption -> (Plugin) pluginOption) .collect(Collectors.toList())) .orElseGet(ArrayList::new); getPublishPlugin() .ifPresent(plugins::add); return plugins; }
@Test void getPluginNamesWithPublishToken() { ConfigurationParameters config = new MapConfigurationParameters( Constants.PLUGIN_PUBLISH_TOKEN_PROPERTY_NAME, "some/token"); assertThat(new CucumberEngineOptions(config).plugins().stream() .map(Options.Plugin::pluginString) .collect(toList()), hasItem("io.cucumber.core.plugin.PublishFormatter:some/token")); }
public void forEach(BiConsumer<? super PropertyKey, ? super Object> action) { for (Map.Entry<PropertyKey, Object> entry : entrySet()) { action.accept(entry.getKey(), entry.getValue()); } }
@Test public void forEach() { Set<PropertyKey> expected = new HashSet<>(PropertyKey.defaultKeys()); Set<PropertyKey> actual = Sets.newHashSet(); mProperties.forEach((key, value) -> actual.add(key)); assertThat(actual, is(expected)); PropertyKey newKey = stringBuilder("forEachNew").build(); mProperties.put(newKey, "value", Source.RUNTIME); Set<PropertyKey> actual2 = Sets.newHashSet(); mProperties.forEach((key, value) -> actual2.add(key)); expected.add(newKey); assertThat(actual2, is(expected)); }
public void deschedule(Node<K, V> node) { unlink(node); node.setNextInVariableOrder(null); node.setPreviousInVariableOrder(null); }
@Test(dataProvider = "clock") public void deschedule_notScheduled(long clock) { timerWheel.nanos = clock; timerWheel.deschedule(new Timer(clock + 100)); }
public String formatDuration(Date then) { Duration duration = approximateDuration(then); return formatDuration(duration); }
@Test public void testFormatDuration() throws Exception { long tenMinMillis = java.util.concurrent.TimeUnit.MINUTES.toMillis(10); Date tenMinAgo = new Date(System.currentTimeMillis() - tenMinMillis); PrettyTime t = new PrettyTime(); String result = t.formatDuration(tenMinAgo); Assert.assertEquals("10 minutes", result); result = t.formatDuration(now.minusMinutes(10)); Assert.assertEquals("10 minutes", result); }
@Override public LocalResourceId resolve(String other, ResolveOptions resolveOptions) { checkState(isDirectory(), "Expected the path is a directory, but had [%s].", pathString); checkArgument( resolveOptions.equals(StandardResolveOptions.RESOLVE_FILE) || resolveOptions.equals(StandardResolveOptions.RESOLVE_DIRECTORY), "ResolveOptions: [%s] is not supported.", resolveOptions); checkArgument( !(resolveOptions.equals(StandardResolveOptions.RESOLVE_FILE) && other.endsWith("/")), "The resolved file: [%s] should not end with '/'.", other); if (SystemUtils.IS_OS_WINDOWS) { return resolveLocalPathWindowsOS(other, resolveOptions); } else { return resolveLocalPath(other, resolveOptions); } }
@Test public void testResolveHandleBadInputsInUnix() { if (SystemUtils.IS_OS_WINDOWS) { // Skip tests return; } assertEquals( toResourceIdentifier("/root/tmp/"), toResourceIdentifier("/root/").resolve("tmp/", StandardResolveOptions.RESOLVE_DIRECTORY)); }
@Override public Health health() { Map<String, Health> healths = rateLimiterRegistry.getAllRateLimiters().stream() .filter(this::isRegisterHealthIndicator) .collect(Collectors.toMap(RateLimiter::getName, this::mapRateLimiterHealth)); Status status = statusAggregator.getAggregateStatus(healths.values().stream().map(Health::getStatus).collect(Collectors.toSet())); return Health.status(status).withDetails(healths).build(); }
@Test public void healthIndicatorMaxImpactCanBeOverridden() throws Exception { // given RateLimiterConfig config = mock(RateLimiterConfig.class); AtomicRateLimiter.AtomicRateLimiterMetrics metrics = mock(AtomicRateLimiter.AtomicRateLimiterMetrics.class); AtomicRateLimiter rateLimiter = mock(AtomicRateLimiter.class); RateLimiterRegistry rateLimiterRegistry = mock(RateLimiterRegistry.class); io.github.resilience4j.common.ratelimiter.configuration.CommonRateLimiterConfigurationProperties.InstanceProperties instanceProperties = mock(io.github.resilience4j.common.ratelimiter.configuration.CommonRateLimiterConfigurationProperties.InstanceProperties.class); RateLimiterConfigurationProperties rateLimiterProperties = mock(RateLimiterConfigurationProperties.class); //when when(rateLimiter.getRateLimiterConfig()).thenReturn(config); when(rateLimiter.getName()).thenReturn("test"); when(rateLimiterProperties.findRateLimiterProperties("test")).thenReturn(Optional.of(instanceProperties)); when(instanceProperties.getRegisterHealthIndicator()).thenReturn(true); boolean allowHealthIndicatorToFail = false; // do not allow health indicator to fail when(instanceProperties.getAllowHealthIndicatorToFail()).thenReturn(allowHealthIndicatorToFail); when(rateLimiter.getMetrics()).thenReturn(metrics); when(rateLimiter.getDetailedMetrics()).thenReturn(metrics); when(rateLimiterRegistry.getAllRateLimiters()).thenReturn(Set.of(rateLimiter)); when(config.getTimeoutDuration()).thenReturn(Duration.ofNanos(30L)); when(metrics.getAvailablePermissions()) .thenReturn(-2); when(metrics.getNumberOfWaitingThreads()) .thenReturn(2); when(metrics.getNanosToWait()) .thenReturn(40L); // then RateLimitersHealthIndicator healthIndicator = new RateLimitersHealthIndicator(rateLimiterRegistry, rateLimiterProperties, new SimpleStatusAggregator()); Health health = healthIndicator.health(); then(health.getStatus()).isEqualTo(Status.UNKNOWN); then(((Health) health.getDetails().get("test")).getStatus()).isEqualTo(new Status("RATE_LIMITED")); then(health.getDetails().get("test")).isInstanceOf(Health.class); then(((Health) health.getDetails().get("test")).getDetails()) .contains( entry("availablePermissions", -2), entry("numberOfWaitingThreads", 2) ); }
@Override public void addResponseCookie(Cookie cookie) { this.response.addHeader("Set-Cookie", WebContextHelper.createCookieHeader(cookie)); }
@Test public void testCookieExpires() { var mockResponse = new MockHttpServletResponse(); WebContext context = new JEEContext(request, mockResponse); Cookie c = new Cookie("thename","thevalue"); c.setMaxAge(1000); context.addResponseCookie(c); var header = mockResponse.getHeader("Set-Cookie"); assertNotNull(header); assertTrue(header.matches("thename=thevalue; Path=/; Max-Age=1000; Expires=.* GMT; SameSite=Lax")); }
@Override public void onError(Throwable e) { _future.completeExceptionally(e); }
@Test public void testError() { CompletableFuture<String> future = new CompletableFuture<>(); CompletableFutureCallbackAdapter<String> adapter = new CompletableFutureCallbackAdapter<>(future); Throwable error = new IllegalArgumentException("exception"); adapter.onError(error); assertTrue(future.isDone()); assertTrue(future.isCompletedExceptionally()); assertFalse(future.isCancelled()); try { future.get(); } catch (ExecutionException | InterruptedException e) { assertTrue(e instanceof ExecutionException); assertEquals(e.getCause(), error); } }
@Override public <T> T convert(DataTable dataTable, Type type) { return convert(dataTable, type, false); }
@Test void convert_to_map_of_primitive_to_list_of_primitive() { DataTable table = parse("", "| KMSY | 29.993333 | -90.258056 |", "| KSFO | 37.618889 | -122.375 |", "| KSEA | 47.448889 | -122.309444 |", "| KJFK | 40.639722 | -73.778889 |"); Map<String, List<Double>> expected = new HashMap<String, List<Double>>() { { put("KMSY", asList(29.993333, -90.258056)); put("KSFO", asList(37.618889, -122.375)); put("KSEA", asList(47.448889, -122.309444)); put("KJFK", asList(40.639722, -73.778889)); } }; assertEquals(expected, converter.convert(table, MAP_OF_STRING_TO_LIST_OF_DOUBLE)); }
public Matcher parse(String xpath) { if (xpath.equals("/text()")) { return TextMatcher.INSTANCE; } else if (xpath.equals("/node()")) { return NodeMatcher.INSTANCE; } else if (xpath.equals("/descendant::node()") || xpath.equals("/descendant:node()")) { // for compatibility return new CompositeMatcher(TextMatcher.INSTANCE, new ChildMatcher(new SubtreeMatcher(NodeMatcher.INSTANCE))); } else if (xpath.equals("/@*")) { return AttributeMatcher.INSTANCE; } else if (xpath.length() == 0) { return ElementMatcher.INSTANCE; } else if (xpath.startsWith("/@")) { String name = xpath.substring(2); String prefix = null; int colon = name.indexOf(':'); if (colon != -1) { prefix = name.substring(0, colon); name = name.substring(colon + 1); } if (prefixes.containsKey(prefix)) { return new NamedAttributeMatcher(prefixes.get(prefix), name); } else { return Matcher.FAIL; } } else if (xpath.startsWith("/*")) { return new ChildMatcher(parse(xpath.substring(2))); } else if (xpath.startsWith("///")) { return Matcher.FAIL; } else if (xpath.startsWith("//")) { return new SubtreeMatcher(parse(xpath.substring(1))); } else if (xpath.startsWith("/")) { int slash = xpath.indexOf('/', 1); if (slash == -1) { slash = xpath.length(); } String name = xpath.substring(1, slash); String prefix = null; int colon = name.indexOf(':'); if (colon != -1) { prefix = name.substring(0, colon); name = name.substring(colon + 1); } if (prefixes.containsKey(prefix)) { return new NamedElementMatcher(prefixes.get(prefix), name, parse(xpath.substring(slash))); } else { return Matcher.FAIL; } } else { return Matcher.FAIL; } }
@Test public void testText() { Matcher matcher = parser.parse("/text()"); assertTrue(matcher.matchesText()); assertFalse(matcher.matchesElement()); assertFalse(matcher.matchesAttribute(NS, "name")); assertEquals(Matcher.FAIL, matcher.descend(NS, "name")); }
@Override public Optional<FieldTypes> get(final String fieldName) { return Optional.ofNullable(get(ImmutableSet.of(fieldName)).get(fieldName)); }
@Test public void getSingleField() { dbService.save(createDto("graylog_0", "abc", Collections.emptySet())); dbService.save(createDto("graylog_1", "xyz", Collections.emptySet())); dbService.save(createDto("graylog_2", "xyz", Collections.emptySet())); dbService.save(createDto("graylog_3", "xyz", of( FieldTypeDTO.create("yolo1", "text") ))); final FieldTypes result = lookup.get("message").orElse(null); assertThat(result).isNotNull(); assertThat(result.fieldName()).isEqualTo("message"); assertThat(result.types()).containsOnly(FieldTypes.Type.builder() .type("string") .properties(of("full-text-search")) .indexNames(of("graylog_0", "graylog_1", "graylog_2", "graylog_3")) .build()); }
public DLQEntry pollEntry(long timeout) throws IOException, InterruptedException { byte[] bytes = pollEntryBytes(timeout); if (bytes == null) { return null; } return DLQEntry.deserialize(bytes); }
@Test public void testFlushAfterWriterClose() throws Exception { Event event = new Event(); event.setField("T", generateMessageContent(PAD_FOR_BLOCK_SIZE_EVENT/8)); Timestamp timestamp = new Timestamp(); try (DeadLetterQueueWriter writeManager = DeadLetterQueueWriter .newBuilder(dir, BLOCK_SIZE, defaultDlqSize, Duration.ofSeconds(1)) .build()) { for (int i = 0; i < 6; i++) { DLQEntry entry = new DLQEntry(event, "", "", Integer.toString(i), timestamp); writeManager.writeEntry(entry); } } try (DeadLetterQueueReader readManager = new DeadLetterQueueReader(dir)) { for (int i = 0; i < 6;i++) { DLQEntry entry = readManager.pollEntry(100); assertThat(entry.getReason(), is(String.valueOf(i))); } } }
public static int min(int a, int b, int c) { return Math.min(Math.min(a, b), c); }
@Test public void testMin_doubleArrArr() { System.out.println("min"); double[][] A = { {0.7220180, 0.07121225, 0.6881997}, {-0.2648886, -0.89044952, 0.3700456}, {-0.6391588, 0.44947578, 0.6240573} }; assertEquals(-0.89044952, MathEx.min(A), 1E-7); }
@SuppressWarnings("unchecked") public final V getIfExists() { InternalThreadLocalMap threadLocalMap = InternalThreadLocalMap.getIfSet(); if (threadLocalMap != null) { Object v = threadLocalMap.indexedVariable(index); if (v != InternalThreadLocalMap.UNSET) { return (V) v; } } return null; }
@Test public void testGetIfExists() { FastThreadLocal<Boolean> threadLocal = new FastThreadLocal<Boolean>() { @Override protected Boolean initialValue() { return Boolean.TRUE; } }; assertNull(threadLocal.getIfExists()); assertTrue(threadLocal.get()); assertTrue(threadLocal.getIfExists()); FastThreadLocal.removeAll(); assertNull(threadLocal.getIfExists()); }
@Override public void checkTopicAccess( final KsqlSecurityContext securityContext, final String topicName, final AclOperation operation ) { authorizationProvider.checkPrivileges( securityContext, AuthObjectType.TOPIC, topicName, ImmutableList.of(operation) ); }
@Test public void shouldCheckTopicPrivilegesOnProvidedAccessValidator() { // When accessValidator.checkTopicAccess(securityContext, "topic1", AclOperation.WRITE); // Then verify(authorizationProvider, times(1)) .checkPrivileges( securityContext, AuthObjectType.TOPIC, "topic1", ImmutableList.of(AclOperation.WRITE)); }
public Double asDouble(Map<String, ValueReference> parameters) { switch (valueType()) { case DOUBLE: if (value() instanceof Number) { return ((Number) value()).doubleValue(); } throw new IllegalStateException("Expected value reference of type DOUBLE but got " + value().getClass()); case PARAMETER: return asType(parameters, Double.class); default: throw new IllegalStateException("Expected value reference of type DOUBLE but got " + valueType()); } }
@Test public void asDouble() { assertThat(ValueReference.of(1.0d).asDouble(Collections.emptyMap())).isEqualTo(1.0d); assertThatThrownBy(() -> ValueReference.of("Test").asDouble(Collections.emptyMap())) .isInstanceOf(IllegalStateException.class) .hasMessage("Expected value reference of type DOUBLE but got STRING"); }
void truncateTable() throws KettleDatabaseException { if ( !meta.isPartitioningEnabled() && !meta.isTableNameInField() ) { // Only the first one truncates in a non-partitioned step copy // if ( meta.truncateTable() && ( ( getCopy() == 0 && getUniqueStepNrAcrossSlaves() == 0 ) || !Utils.isEmpty( getPartitionID() ) ) ) { data.db.truncateTable( environmentSubstitute( meta.getSchemaName() ), environmentSubstitute( meta .getTableName() ) ); } } }
@Test public void testTruncateTable_on_PartitionId() throws Exception { when( tableOutputMeta.truncateTable() ).thenReturn( true ); when( tableOutputSpy.getCopy() ).thenReturn( 1 ); when( tableOutputSpy.getUniqueStepNrAcrossSlaves() ).thenReturn( 0 ); when( tableOutputSpy.getPartitionID() ).thenReturn( "partition id" ); when( tableOutputMeta.getTableName() ).thenReturn( "fooTable" ); when( tableOutputMeta.getSchemaName() ).thenReturn( "barSchema" ); tableOutputSpy.truncateTable(); verify( db ).truncateTable( any(), any() ); }
@Override public Result search(Query query, Execution execution) { Result mergedResults = execution.search(query); var targets = getTargets(query.getModel().getSources(), query.properties()); warnIfUnresolvedSearchChains(extractErrors(targets), mergedResults.hits()); var prunedTargets = pruneTargetsWithoutDocumentTypes(query.getModel().getRestrict(), extractSpecs(targets)); var regularTargetHandlers = resolveSearchChains(prunedTargets, execution.searchChainRegistry()); query.errors().addAll(regularTargetHandlers.errors()); Set<Target> targetHandlers = new LinkedHashSet<>(regularTargetHandlers.data()); targetHandlers.addAll(getAdditionalTargets(query, execution, targetSelector)); traceTargets(query, targetHandlers); if (targetHandlers.isEmpty()) return mergedResults; else if (targetHandlers.size() > 1) search(query, execution, targetHandlers, mergedResults); else if (shouldExecuteTargetLongerThanThread(query, targetHandlers.iterator().next())) search(query, execution, targetHandlers, mergedResults); // one target, but search in separate thread else search(query, execution, first(targetHandlers), mergedResults); // search in this thread return mergedResults; }
@Test void custom_federation_target() { ComponentId targetSelectorId = ComponentId.fromString("TargetSelector"); ComponentRegistry<TargetSelector> targetSelectors = new ComponentRegistry<>(); targetSelectors.register(targetSelectorId, new TestTargetSelector()); FederationSearcher searcher = new FederationSearcher( new FederationConfig(new FederationConfig.Builder().targetSelector(targetSelectorId.toString())), SchemaInfo.empty(), targetSelectors); Query query = new Query(); query.setTimeout(20000); Result result = new Execution(searcher, Context.createContextStub()).search(query); HitGroup myChainGroup = (HitGroup) result.hits().get(0); assertEquals(myChainGroup.getId(), new URI("source:myChain")); assertEquals(myChainGroup.get(0).getId(), new URI("myHit")); }
@Override public void write(T record) { recordConsumer.startMessage(); try { messageWriter.writeTopLevelMessage(record); } catch (RuntimeException e) { Message m = (record instanceof Message.Builder) ? ((Message.Builder) record).build() : (Message) record; LOG.error("Cannot write message {}: {}", e.getMessage(), m); throw e; } recordConsumer.endMessage(); }
@Test public void testOptionalInnerMessage() throws Exception { RecordConsumer readConsumerMock = Mockito.mock(RecordConsumer.class); ProtoWriteSupport<TestProtobuf.MessageA> instance = createReadConsumerInstance(TestProtobuf.MessageA.class, readConsumerMock); TestProtobuf.MessageA.Builder msg = TestProtobuf.MessageA.newBuilder(); msg.getInnerBuilder().setOne("one"); instance.write(msg.build()); InOrder inOrder = Mockito.inOrder(readConsumerMock); inOrder.verify(readConsumerMock).startMessage(); inOrder.verify(readConsumerMock).startField("inner", 0); inOrder.verify(readConsumerMock).startGroup(); inOrder.verify(readConsumerMock).startField("one", 0); inOrder.verify(readConsumerMock).addBinary(Binary.fromConstantByteArray("one".getBytes())); inOrder.verify(readConsumerMock).endField("one", 0); inOrder.verify(readConsumerMock).endGroup(); inOrder.verify(readConsumerMock).endField("inner", 0); inOrder.verify(readConsumerMock).endMessage(); Mockito.verifyNoMoreInteractions(readConsumerMock); }
public static boolean isIPv6Address(final String input) { return isIPv6StdAddress(input) || isIPv6HexCompressedAddress(input) || isLinkLocalIPv6WithZoneIndex(input) || isIPv6IPv4MappedAddress(input) || isIPv6MixedAddress(input); }
@Test public void isIPv6Address() { assertThat(NetAddressValidatorUtil.isIPv6Address("2000:0000:0000:0000:0001:2345:6789:abcd")).isTrue(); assertThat(NetAddressValidatorUtil.isIPv6Address("2001:DB8:0:0:8:800:200C:417A")).isTrue(); assertThat(NetAddressValidatorUtil.isIPv6Address("2001:DB8::8:800:200C:417A")).isTrue(); assertThat(NetAddressValidatorUtil.isIPv6Address("2001:DB8::8:800:200C141aA")).isFalse(); assertThat(NetAddressValidatorUtil.isIPv6Address("::")).isTrue(); }
@Override @MethodNotAvailable public void loadAll(boolean replaceExistingValues) { throw new MethodNotAvailableException(); }
@Test(expected = MethodNotAvailableException.class) public void testLoadAll() { adapterWithLoader.loadAll(true); }
public FloatArrayAsIterable usingExactEquality() { return new FloatArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject()); }
@Test public void usingExactEquality_contains_failure() { expectFailureWhenTestingThat(array(1.1f, JUST_OVER_2POINT2, 3.3f)) .usingExactEquality() .contains(2.2f); assertFailureKeys("value of", "expected to contain", "testing whether", "but was"); assertFailureValue("expected to contain", Float.toString(2.2f)); assertFailureValue("testing whether", "actual element is exactly equal to expected element"); assertFailureValue("but was", "[" + 1.1f + ", " + JUST_OVER_2POINT2 + ", " + 3.3f + "]"); }
public String process(String preResolved, ParamHandler paramsHandler) { ReaderState state = ReaderState.NOT_IN_PATTERN; for (int i = 0; i < preResolved.length(); i++) { state = state.interpret(preResolved.charAt(i), paramsHandler); } paramsHandler.handleAfterResolution(state); return paramsHandler.getResult(); }
@Test public void shouldClearPatternWhenParameterCannotBeResolved() throws Exception { ParamStateMachine stateMachine = new ParamStateMachine(); doThrow(new IllegalStateException()).when(handler).handlePatternFound(any(StringBuilder.class)); try { stateMachine.process("#{pattern}", handler); } catch (Exception e) { //Ignore to assert on the pattern } assertThat(ParamStateMachine.ReaderState.IN_PATTERN.pattern.length(), is(0)); verify(handler).handlePatternFound(any(StringBuilder.class)); }
String resolve() throws IOException { String from = String.format("%s/v%s/%s.zip", GITHUB_DOWNLOAD_PREFIX, getSDKVersion(), buildFileName()); if (!Strings.isNullOrEmpty(options.getPrismLocation())) { checkArgument( !options.getPrismLocation().startsWith(GITHUB_TAG_PREFIX), "Provided --prismLocation URL is not an Apache Beam Github " + "Release page URL or download URL: ", from); from = options.getPrismLocation(); } String fromFileName = getNameWithoutExtension(from); Path to = Paths.get(userHome(), PRISM_BIN_PATH, fromFileName); if (Files.exists(to)) { return to.toString(); } createDirectoryIfNeeded(to); if (from.startsWith("http")) { String result = resolve(new URL(from), to); checkState(Files.exists(to), "Resolved location does not exist: %s", result); return result; } String result = resolve(Paths.get(from), to); checkState(Files.exists(to), "Resolved location does not exist: %s", result); return result; }
@Test public void givenHttpPrismLocationOption_thenResolves() throws IOException { assertThat(Files.exists(DESTINATION_DIRECTORY)).isFalse(); PrismPipelineOptions options = options(); options.setPrismLocation( "https://github.com/apache/beam/releases/download/v2.57.0/apache_beam-v2.57.0-prism-darwin-arm64.zip"); PrismLocator underTest = new PrismLocator(options); String got = underTest.resolve(); assertThat(got).contains(DESTINATION_DIRECTORY.toString()); Path gotPath = Paths.get(got); assertThat(Files.exists(gotPath)).isTrue(); }
public static boolean isKerberosSecurityEnabled(UserGroupInformation ugi) { return UserGroupInformation.isSecurityEnabled() && ugi.getAuthenticationMethod() == UserGroupInformation.AuthenticationMethod.KERBEROS; }
@Test public void isKerberosSecurityEnabled_NoKerberos_ReturnsFalse() { UserGroupInformation.setConfiguration( getHadoopConfigWithAuthMethod(AuthenticationMethod.PROXY)); UserGroupInformation userWithAuthMethodOtherThanKerberos = createTestUser(AuthenticationMethod.PROXY); boolean result = HadoopUtils.isKerberosSecurityEnabled(userWithAuthMethodOtherThanKerberos); assertFalse(result); }
public static String get(String urlString, Charset customCharset) { return HttpRequest.get(urlString).charset(customCharset).execute().body(); }
@Test @Disabled public void getTest2() { // 此链接较为特殊,User-Agent去掉后进入一个JS跳转页面,如果设置了,需要开启302跳转 // 自定义的默认header无效 final String result = HttpRequest .get("https://graph.qq.com/oauth2.0/authorize?response_type=code&client_id=101457313&redirect_uri=http%3A%2F%2Fwww.benmovip.com%2Fpay-cloud%2Fqqlogin%2FgetCode&state=ok") .removeHeader(Header.USER_AGENT).execute().body(); Console.log(result); }
@Override protected List<MatchResult> match(List<String> specs) throws IOException { List<GcsPath> gcsPaths = toGcsPaths(specs); List<GcsPath> globs = Lists.newArrayList(); List<GcsPath> nonGlobs = Lists.newArrayList(); List<Boolean> isGlobBooleans = Lists.newArrayList(); for (GcsPath path : gcsPaths) { if (GcsUtil.isWildcard(path)) { globs.add(path); isGlobBooleans.add(true); } else { nonGlobs.add(path); isGlobBooleans.add(false); } } Iterator<MatchResult> globsMatchResults = matchGlobs(globs).iterator(); Iterator<MatchResult> nonGlobsMatchResults = matchNonGlobs(nonGlobs).iterator(); ImmutableList.Builder<MatchResult> ret = ImmutableList.builder(); for (Boolean isGlob : isGlobBooleans) { if (isGlob) { checkState(globsMatchResults.hasNext(), "Expect globsMatchResults has next: %s", globs); ret.add(globsMatchResults.next()); } else { checkState( nonGlobsMatchResults.hasNext(), "Expect nonGlobsMatchResults has next: %s", nonGlobs); ret.add(nonGlobsMatchResults.next()); } } checkState( !globsMatchResults.hasNext(), "Internal error encountered in GcsFilesystem: expected no more elements in globsMatchResults."); checkState( !nonGlobsMatchResults.hasNext(), "Internal error encountered in GcsFilesystem: expected no more elements in globsMatchResults."); return ret.build(); }
@Test public void testMatch() throws Exception { Objects modelObjects = new Objects(); List<StorageObject> items = new ArrayList<>(); // A directory items.add(new StorageObject().setBucket("testbucket").setName("testdirectory/")); // Files within the directory items.add(createStorageObject("gs://testbucket/testdirectory/file1name", 1L /* fileSize */)); items.add(createStorageObject("gs://testbucket/testdirectory/file2name", 2L /* fileSize */)); items.add(createStorageObject("gs://testbucket/testdirectory/file3name", 3L /* fileSize */)); items.add(createStorageObject("gs://testbucket/testdirectory/file4name", 4L /* fileSize */)); items.add(createStorageObject("gs://testbucket/testdirectory/otherfile", 5L /* fileSize */)); items.add(createStorageObject("gs://testbucket/testdirectory/anotherfile", 6L /* fileSize */)); modelObjects.setItems(items); when(mockGcsUtil.listObjects(eq("testbucket"), anyString(), isNull(String.class))) .thenReturn(modelObjects); List<GcsPath> gcsPaths = ImmutableList.of( GcsPath.fromUri("gs://testbucket/testdirectory/non-exist-file"), GcsPath.fromUri("gs://testbucket/testdirectory/otherfile")); when(mockGcsUtil.getObjects(eq(gcsPaths))) .thenReturn( ImmutableList.of( StorageObjectOrIOException.create(new FileNotFoundException()), StorageObjectOrIOException.create( createStorageObject("gs://testbucket/testdirectory/otherfile", 4L)))); List<String> specs = ImmutableList.of( "gs://testbucket/testdirectory/file[1-3]*", "gs://testbucket/testdirectory/non-exist-file", "gs://testbucket/testdirectory/otherfile"); List<MatchResult> matchResults = gcsFileSystem.match(specs); assertEquals(3, matchResults.size()); assertEquals(Status.OK, matchResults.get(0).status()); assertThat( ImmutableList.of( "gs://testbucket/testdirectory/file1name", "gs://testbucket/testdirectory/file2name", "gs://testbucket/testdirectory/file3name"), contains(toFilenames(matchResults.get(0)).toArray())); assertEquals(Status.NOT_FOUND, matchResults.get(1).status()); assertEquals(Status.OK, matchResults.get(2).status()); assertThat( ImmutableList.of("gs://testbucket/testdirectory/otherfile"), contains(toFilenames(matchResults.get(2)).toArray())); }
public Stream<SqlMigration> getMigrations() { SqlMigrationProvider migrationProvider = getMigrationProvider(); try { final Map<String, SqlMigration> commonMigrations = getCommonMigrations(migrationProvider).stream().collect(toMap(SqlMigration::getFileName, m -> m)); final Map<String, SqlMigration> databaseSpecificMigrations = getDatabaseSpecificMigrations(migrationProvider).stream().collect(toMap(SqlMigration::getFileName, p -> p)); final HashMap<String, SqlMigration> actualMigrations = new HashMap<>(commonMigrations); actualMigrations.putAll(databaseSpecificMigrations); return actualMigrations.values().stream(); } catch (IllegalStateException e) { if(e.getMessage().startsWith("Duplicate key")) { throw new IllegalStateException("It seems you have JobRunr twice on your classpath. Please make sure to only have one JobRunr jar in your classpath.", e); } throw e; } }
@Test void testGetMigrations() { final DatabaseMigrationsProvider databaseCreator = new DatabaseMigrationsProvider(null); final Stream<SqlMigration> databaseSpecificMigrations = databaseCreator.getMigrations(); assertThat(databaseSpecificMigrations).anyMatch(migration -> migration.getFileName().equals("v000__create_migrations_table.sql")); }
@Override public int hashCode() { return MessageIdAdvUtils.hashCode(this); }
@Test public void hashCodeUnbatchedTest() { BatchMessageIdImpl batchMsgId1 = new BatchMessageIdImpl(0, 0, 0, -1); BatchMessageIdImpl batchMsgId2 = new BatchMessageIdImpl(1, 1, 1, -1); MessageIdImpl msgId1 = new MessageIdImpl(0, 0, 0); MessageIdImpl msgId2 = new MessageIdImpl(1, 1, 1); assertEquals(batchMsgId1.hashCode(), msgId1.hashCode()); assertEquals(batchMsgId2.hashCode(), msgId2.hashCode()); }
@ProcessElement public void processElement( @Element KV<ByteString, ChangeStreamRecord> changeStreamRecordKV, OutputReceiver<KV<ByteString, ChangeStreamMutation>> receiver) { ChangeStreamRecord inputRecord = changeStreamRecordKV.getValue(); if (inputRecord instanceof ChangeStreamMutation) { receiver.output(KV.of(changeStreamRecordKV.getKey(), (ChangeStreamMutation) inputRecord)); } }
@Test public void shouldOutputCloseStreams() { // This shouldn't happen but if it were to we wouldn't want the CloseStreams to be returned to // users CloseStream closeStream = mock(CloseStream.class); doFn.processElement(KV.of(ByteString.copyFromUtf8("test"), closeStream), outputReceiver); verify(outputReceiver, never()).output(any()); }
public FEELFnResult<TemporalAccessor> invoke(@ParameterName("from") String val) { if ( val == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null")); } try { TemporalAccessor parsed = FEEL_TIME.parse(val); if (parsed.query(TemporalQueries.offset()) != null) { // it is an offset-zoned time, so I can know for certain an OffsetTime OffsetTime asOffSetTime = parsed.query(OffsetTime::from); return FEELFnResult.ofResult(asOffSetTime); } else if (parsed.query(TemporalQueries.zone()) == null) { // if it does not contain any zone information at all, then I know for certain is a local time. LocalTime asLocalTime = parsed.query(LocalTime::from); return FEELFnResult.ofResult(asLocalTime); } else if (parsed.query(TemporalQueries.zone()) != null) { boolean hasSeconds = timeStringWithSeconds(val); LocalTime asLocalTime = parsed.query(LocalTime::from); ZoneId zoneId = parsed.query(TemporalQueries.zone()); ZoneTime zoneTime = ZoneTime.of(asLocalTime, zoneId, hasSeconds); return FEELFnResult.ofResult(zoneTime); } return FEELFnResult.ofResult(parsed); } catch (DateTimeException e) { return manageDateTimeException(e, val); } }
@Test void invokeTemporalAccessorParamDate() { FunctionTestUtil.assertResult(timeFunction.invoke(LocalDate.of(2017, 6, 12)), OffsetTime.of(0, 0, 0, 0, ZoneOffset.UTC)); }
@Override public void pre(SpanAdapter span, Exchange exchange, Endpoint endpoint) { super.pre(span, exchange, endpoint); span.setTag(TagConstants.DB_SYSTEM, ELASTICSEARCH_DB_TYPE); Map<String, String> queryParameters = toQueryParameters(endpoint.getEndpointUri()); if (queryParameters.containsKey("indexName")) { span.setTag(TagConstants.DB_NAME, queryParameters.get("indexName")); } String cluster = stripSchemeAndOptions(endpoint); span.setTag(ELASTICSEARCH_CLUSTER_TAG, cluster); }
@Test public void testPre() { String indexName = "twitter"; String cluster = "local"; Endpoint endpoint = Mockito.mock(Endpoint.class); Exchange exchange = Mockito.mock(Exchange.class); Message message = Mockito.mock(Message.class); Mockito.when(endpoint.getEndpointUri()).thenReturn("elasticsearch://" + cluster + "?operation=INDEX&indexName=" + indexName + "&indexType=tweet"); Mockito.when(exchange.getIn()).thenReturn(message); SpanDecorator decorator = new ElasticsearchSpanDecorator(); MockSpanAdapter span = new MockSpanAdapter(); decorator.pre(span, exchange, endpoint); assertEquals(ElasticsearchSpanDecorator.ELASTICSEARCH_DB_TYPE, span.tags().get(TagConstants.DB_SYSTEM)); assertEquals(indexName, span.tags().get(TagConstants.DB_NAME)); assertEquals(cluster, span.tags().get(ElasticsearchSpanDecorator.ELASTICSEARCH_CLUSTER_TAG)); assertNull(span.tags().get(TagConstants.SERVER_ADDRESS)); }
public List<VespaConfigChangeAction> validate() { return currentDocType.getAllFields().stream(). map(field -> createFieldChange(field, nextDocType)). filter(fieldChange -> fieldChange.valid() && fieldChange.changedType()). map(fieldChange -> VespaRefeedAction.of(id, ValidationId.fieldTypeChange, new ChangeMessageBuilder(fieldChange.fieldName()). addChange("data type", fieldChange.currentTypeName(), fieldChange.nextTypeName()).build() )). collect(Collectors.toList()); }
@Test void requireThatChangingTargetTypeOfReferenceFieldIsNotOK() { var validator = new DocumentTypeChangeValidator(ClusterSpec.Id.from("test"), createDocumentTypeWithReferenceField("oldDoc"), createDocumentTypeWithReferenceField("newDoc")); List<VespaConfigChangeAction> result = validator.validate(); assertEquals(1, result.size()); VespaConfigChangeAction action = result.get(0); assertTrue(action instanceof VespaRefeedAction); assertEquals( "type='refeed', " + "message='Field 'ref' changed: data type: 'Reference<oldDoc>' -> 'Reference<newDoc>'', " + "services=[], documentType=''", action.toString()); }
public static byte[] parseHigh2Low6Bytes(byte b) { return new byte[] { (byte) ((b >> 6)), // 右移6位,只取前2bit的值 (byte) ((b & 0x3f)) // 只取后面6bit的值,前面两位补0 }; }
@Test public void parseHigh2Low6Bytes() { byte b = 117; // = 1*64 + 53 byte[] bs = CodecUtils.parseHigh2Low6Bytes(b); Assert.assertEquals(bs[0], 1); Assert.assertEquals(bs[1], 53); }
@Override public Mono<ReserveUsernameHashResponse> reserveUsernameHash(final ReserveUsernameHashRequest request) { final AuthenticatedDevice authenticatedDevice = AuthenticationUtil.requireAuthenticatedDevice(); if (request.getUsernameHashesCount() == 0) { throw Status.INVALID_ARGUMENT .withDescription("List of username hashes must not be empty") .asRuntimeException(); } if (request.getUsernameHashesCount() > AccountController.MAXIMUM_USERNAME_HASHES_LIST_LENGTH) { throw Status.INVALID_ARGUMENT .withDescription(String.format("List of username hashes may have at most %d elements, but actually had %d", AccountController.MAXIMUM_USERNAME_HASHES_LIST_LENGTH, request.getUsernameHashesCount())) .asRuntimeException(); } final List<byte[]> usernameHashes = new ArrayList<>(request.getUsernameHashesCount()); for (final ByteString usernameHash : request.getUsernameHashesList()) { if (usernameHash.size() != AccountController.USERNAME_HASH_LENGTH) { throw Status.INVALID_ARGUMENT .withDescription(String.format("Username hash length must be %d bytes, but was actually %d", AccountController.USERNAME_HASH_LENGTH, usernameHash.size())) .asRuntimeException(); } usernameHashes.add(usernameHash.toByteArray()); } return rateLimiters.getUsernameReserveLimiter().validateReactive(authenticatedDevice.accountIdentifier()) .then(Mono.fromFuture(() -> accountsManager.getByAccountIdentifierAsync(authenticatedDevice.accountIdentifier()))) .map(maybeAccount -> maybeAccount.orElseThrow(Status.UNAUTHENTICATED::asRuntimeException)) .flatMap(account -> Mono.fromFuture(() -> accountsManager.reserveUsernameHash(account, usernameHashes))) .map(reservation -> ReserveUsernameHashResponse.newBuilder() .setUsernameHash(ByteString.copyFrom(reservation.reservedUsernameHash())) .build()) .onErrorReturn(UsernameHashNotAvailableException.class, ReserveUsernameHashResponse.newBuilder() .setError(ReserveUsernameHashError.newBuilder() .setErrorType(ReserveUsernameHashErrorType.RESERVE_USERNAME_HASH_ERROR_TYPE_NO_HASHES_AVAILABLE) .build()) .build()); }
@Test void reserveUsernameHash() { final Account account = mock(Account.class); when(accountsManager.getByAccountIdentifierAsync(AUTHENTICATED_ACI)) .thenReturn(CompletableFuture.completedFuture(Optional.of(account))); final byte[] usernameHash = TestRandomUtil.nextBytes(AccountController.USERNAME_HASH_LENGTH); when(accountsManager.reserveUsernameHash(any(), any())) .thenAnswer(invocation -> { final List<byte[]> usernameHashes = invocation.getArgument(1); return CompletableFuture.completedFuture( new AccountsManager.UsernameReservation(invocation.getArgument(0), usernameHashes.get(0))); }); final ReserveUsernameHashResponse expectedResponse = ReserveUsernameHashResponse.newBuilder() .setUsernameHash(ByteString.copyFrom(usernameHash)) .build(); assertEquals(expectedResponse, authenticatedServiceStub().reserveUsernameHash(ReserveUsernameHashRequest.newBuilder() .addUsernameHashes(ByteString.copyFrom(usernameHash)) .build())); }
protected void replaceFiles(List<MappedFile> mappedFileList, TopicPartitionLog current, TopicPartitionLog newLog) { MappedFileQueue dest = current.getLog(); MappedFileQueue src = newLog.getLog(); long beginTime = System.nanoTime(); // List<String> fileNameToReplace = mappedFileList.stream() // .map(m -> m.getFile().getName()) // .collect(Collectors.toList()); List<String> fileNameToReplace = dest.getMappedFiles().stream() .filter(mappedFileList::contains) .map(mf -> mf.getFile().getName()) .collect(Collectors.toList()); mappedFileList.forEach(MappedFile::renameToDelete); src.getMappedFiles().forEach(mappedFile -> { try { mappedFile.flush(0); mappedFile.moveToParent(); } catch (IOException e) { log.error("move file {} to parent directory exception: ", mappedFile.getFileName()); } }); dest.getMappedFiles().stream() .filter(m -> !mappedFileList.contains(m)) .forEach(m -> src.getMappedFiles().add(m)); readMessageLock.lock(); try { mappedFileList.forEach(mappedFile -> mappedFile.destroy(1000)); dest.getMappedFiles().clear(); dest.getMappedFiles().addAll(src.getMappedFiles()); src.getMappedFiles().clear(); replaceCqFiles(getCQ(), newLog.getCQ(), fileNameToReplace); log.info("replace file elapsed {} milliseconds", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - beginTime)); } finally { readMessageLock.unlock(); } }
@Test public void testReplaceFiles() throws IOException, IllegalAccessException { Assume.assumeFalse(MixAll.isWindows()); CompactionLog clog = mock(CompactionLog.class); doCallRealMethod().when(clog).replaceFiles(anyList(), any(CompactionLog.TopicPartitionLog.class), any(CompactionLog.TopicPartitionLog.class)); doCallRealMethod().when(clog).replaceCqFiles(any(SparseConsumeQueue.class), any(SparseConsumeQueue.class), anyList()); CompactionLog.TopicPartitionLog dest = mock(CompactionLog.TopicPartitionLog.class); MappedFileQueue destMFQ = mock(MappedFileQueue.class); when(dest.getLog()).thenReturn(destMFQ); List<MappedFile> destFiles = Lists.newArrayList(); when(destMFQ.getMappedFiles()).thenReturn(destFiles); List<MappedFile> srcFiles = Lists.newArrayList(); String fileName = logPath + File.separator + COMPACTING_SUB_FOLDER + File.separator + String.format("%010d", 0); MappedFile mf = new DefaultMappedFile(fileName, 1024); srcFiles.add(mf); MappedFileQueue srcMFQ = mock(MappedFileQueue.class); when(srcMFQ.getMappedFiles()).thenReturn(srcFiles); CompactionLog.TopicPartitionLog src = mock(CompactionLog.TopicPartitionLog.class); when(src.getLog()).thenReturn(srcMFQ); FieldUtils.writeField(clog, "readMessageLock", new PutMessageSpinLock(), true); clog.replaceFiles(Lists.newArrayList(), dest, src); assertEquals(destFiles.size(), 1); destFiles.forEach(f -> { assertFalse(f.getFileName().contains(COMPACTING_SUB_FOLDER)); }); }
public Resource getIncrementAllocation() { Long memory = null; Integer vCores = null; Map<String, Long> others = new HashMap<>(); ResourceInformation[] resourceTypes = ResourceUtils.getResourceTypesArray(); for (int i=0; i < resourceTypes.length; ++i) { String name = resourceTypes[i].getName(); String propertyKey = getAllocationIncrementPropKey(name); String propValue = get(propertyKey); if (propValue != null) { Matcher matcher = RESOURCE_REQUEST_VALUE_PATTERN.matcher(propValue); if (matcher.matches()) { long value = Long.parseLong(matcher.group(1)); String unit = matcher.group(2); long valueInDefaultUnits = getValueInDefaultUnits(value, unit, name); others.put(name, valueInDefaultUnits); } else { throw new IllegalArgumentException("Property " + propertyKey + " is not in \"value [unit]\" format: " + propValue); } } } if (others.containsKey(ResourceInformation.MEMORY_MB.getName())) { memory = others.get(ResourceInformation.MEMORY_MB.getName()); if (get(RM_SCHEDULER_INCREMENT_ALLOCATION_MB) != null) { String overridingKey = getAllocationIncrementPropKey( ResourceInformation.MEMORY_MB.getName()); LOG.warn("Configuration " + overridingKey + "=" + get(overridingKey) + " is overriding the " + RM_SCHEDULER_INCREMENT_ALLOCATION_MB + "=" + get(RM_SCHEDULER_INCREMENT_ALLOCATION_MB) + " property"); } others.remove(ResourceInformation.MEMORY_MB.getName()); } else { memory = getLong( RM_SCHEDULER_INCREMENT_ALLOCATION_MB, DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB); } if (others.containsKey(ResourceInformation.VCORES.getName())) { vCores = others.get(ResourceInformation.VCORES.getName()).intValue(); if (get(RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES) != null) { String overridingKey = getAllocationIncrementPropKey( ResourceInformation.VCORES.getName()); LOG.warn("Configuration " + overridingKey + "=" + get(overridingKey) + " is overriding the " + RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES + "=" + get(RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES) + " property"); } others.remove(ResourceInformation.VCORES.getName()); } else { vCores = getInt( RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES, DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES); } return Resource.newInstance(memory, vCores, others); }
@Test(expected=IllegalArgumentException.class) public void testAllocationIncrementInvalidUnit() throws Exception { Configuration conf = new Configuration(); conf.set(YarnConfiguration.RESOURCE_TYPES + "." + ResourceInformation.MEMORY_MB.getName() + FairSchedulerConfiguration.INCREMENT_ALLOCATION, "1 Xi"); new FairSchedulerConfiguration(conf).getIncrementAllocation(); }
@Override public SelType call(String methodName, SelType[] args) { if (args.length == 1) { if ("withZone".equals(methodName)) { return new SelJodaDateTimeFormatter( val.withZone(((SelJodaDateTimeZone) args[0]).getInternalVal())); } else if ("parseDateTime".equals(methodName)) { switch (args[0].type()) { case STRING: case LONG: return SelJodaDateTime.of(val.parseDateTime(args[0].toString())); } } else if ("parseMillis".equals(methodName)) { return SelLong.of(val.parseMillis(((SelString) args[0]).getInternalVal())); } else if ("forPattern".equals(methodName)) { return new SelJodaDateTimeFormatter( DateTimeFormat.forPattern(((SelString) args[0]).getInternalVal())); } else if ("print".equals(methodName)) { switch (args[0].type()) { case LONG: return SelString.of(val.print(((SelLong) args[0]).longVal())); case DATETIME: return SelString.of(val.print(((SelJodaDateTime) args[0]).getInternalVal())); } } } throw new UnsupportedOperationException( type() + " DO NOT support calling method: " + methodName + " with args: " + Arrays.toString(args)); }
@Test(expected = ClassCastException.class) public void testInvalidCallArg() { one.call("withZone", new SelType[] {SelType.NULL}); }
public static Expression convert(Filter[] filters) { Expression expression = Expressions.alwaysTrue(); for (Filter filter : filters) { Expression converted = convert(filter); Preconditions.checkArgument( converted != null, "Cannot convert filter to Iceberg: %s", filter); expression = Expressions.and(expression, converted); } return expression; }
@Test public void testTimestampFilterConversion() { Instant instant = Instant.parse("2018-10-18T00:00:57.907Z"); Timestamp timestamp = Timestamp.from(instant); long epochMicros = ChronoUnit.MICROS.between(Instant.EPOCH, instant); Expression instantExpression = SparkFilters.convert(GreaterThan.apply("x", instant)); Expression timestampExpression = SparkFilters.convert(GreaterThan.apply("x", timestamp)); Expression rawExpression = Expressions.greaterThan("x", epochMicros); Assert.assertEquals( "Generated Timestamp expression should be correct", rawExpression.toString(), timestampExpression.toString()); Assert.assertEquals( "Generated Instant expression should be correct", rawExpression.toString(), instantExpression.toString()); }
public Map<String, Partition> getPartitionByNames(Table table, List<String> partitionNames) { String dbName = ((HiveMetaStoreTable) table).getDbName(); String tblName = ((HiveMetaStoreTable) table).getTableName(); return metastore.getPartitionsByNames(dbName, tblName, partitionNames); }
@Test public void testGetPartitionByNames() throws AnalysisException { com.starrocks.catalog.Table table = hmsOps.getTable("db1", "table1"); HiveTable hiveTable = (HiveTable) table; PartitionKey hivePartitionKey1 = PartitionUtil.createPartitionKey( Lists.newArrayList("1"), hiveTable.getPartitionColumns()); PartitionKey hivePartitionKey2 = PartitionUtil.createPartitionKey( Lists.newArrayList("2"), hiveTable.getPartitionColumns()); Map<String, Partition> partitions = hmsOps.getPartitionByPartitionKeys(hiveTable, Lists.newArrayList(hivePartitionKey1, hivePartitionKey2)); Partition partition1 = partitions.get("col1=1"); Assert.assertEquals(ORC, partition1.getFileFormat()); Assert.assertEquals("100", partition1.getParameters().get(TOTAL_SIZE)); Assert.assertEquals("hdfs://127.0.0.1:10000/hive.db/hive_tbl/col1=1", partition1.getFullPath()); Partition partition2 = partitions.get("col1=2"); Assert.assertEquals(ORC, partition2.getFileFormat()); Assert.assertEquals("100", partition2.getParameters().get(TOTAL_SIZE)); Assert.assertEquals("hdfs://127.0.0.1:10000/hive.db/hive_tbl/col1=2", partition2.getFullPath()); }