focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public void createRole( IRole newRole ) throws KettleException { ensureHasPermissions(); ProxyPentahoRole role = UserRoleHelper.convertToPentahoProxyRole( newRole ); try { ProxyPentahoRole[] existingRoles = userRoleWebService.getRoles(); if ( existsAmong( existingRoles, role ) ) { throw roleExistsException(); } } catch ( UserRoleException e ) { throw cannotCreateRoleException( newRole, e ); } try { userRoleWebService.createRole( role ); userRoleWebService.setUsers( role, UserRoleHelper.convertToPentahoProxyUsers( newRole.getUsers() ) ); lookupCache.insertRoleToLookupSet( newRole ); fireUserRoleListChange(); } catch ( UserRoleException e ) { throw cannotCreateRoleException( newRole, e ); } catch ( Exception e ) { // it is the only way to determine AlreadyExistsException if ( e.getCause().toString().contains( "org.pentaho.platform.api.engine.security.userroledao.AlreadyExistsException" ) ) { throw roleExistsException(); } } }
@Test public void createRole_CreatesSuccessfully_WhenNameIsUnique() throws Exception { final String name = "role"; delegate.createRole( new EERoleInfo( name ) ); verify( roleWebService ).createRole( any( ProxyPentahoRole.class ) ); }
public static Exception lookupExceptionInCause(Throwable source, Class<? extends Exception>... clazzes) { while (source != null) { for (Class<? extends Exception> clazz : clazzes) { if (clazz.isAssignableFrom(source.getClass())) { return (Exception) source; } } source = source.getCause(); } return null; }
@Test void givenNotWantedCause_whenLookupExceptionInCause_thenReturnNull() { final Exception cause = new IOException(); assertThat(ExceptionUtil.lookupExceptionInCause(new Exception(cause), RuntimeException.class)).isNull(); }
@InvokeOnHeader(Web3jConstants.SHH_GET_MESSAGES) void shhGetMessages(Message message) throws IOException { BigInteger filterId = message.getHeader(Web3jConstants.FILTER_ID, configuration::getFilterId, BigInteger.class); Request<?, ShhMessages> request = web3j.shhGetMessages(filterId); setRequestId(message, request); ShhMessages response = request.send(); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.getMessages()); } }
@Test public void shhGetMessagesTest() throws Exception { ShhMessages response = Mockito.mock(ShhMessages.class); Mockito.when(mockWeb3j.shhGetMessages(any())).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Mockito.when(response.getMessages()).thenReturn(Collections.EMPTY_LIST); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.SHH_GET_MESSAGES); template.send(exchange); List body = exchange.getIn().getBody(List.class); assertTrue(body.isEmpty()); }
@Override public void start(Map<String, String> props) { AbstractConfig config = new AbstractConfig(FileStreamSinkConnector.CONFIG_DEF, props); filename = config.getString(FileStreamSinkConnector.FILE_CONFIG); if (filename == null || filename.isEmpty()) { outputStream = System.out; } else { try { outputStream = new PrintStream( Files.newOutputStream(Paths.get(filename), StandardOpenOption.CREATE, StandardOpenOption.APPEND), false, StandardCharsets.UTF_8.name()); } catch (IOException e) { throw new ConnectException("Couldn't find or create file '" + filename + "' for FileStreamSinkTask", e); } } }
@Test public void testStart() throws IOException { task = new FileStreamSinkTask(); Map<String, String> props = new HashMap<>(); props.put(FileStreamSinkConnector.FILE_CONFIG, outputFile); task.start(props); HashMap<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); task.put(Collections.singletonList( new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line0", 1) )); offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(1L)); task.flush(offsets); int numLines = 3; String[] lines = new String[numLines]; int i = 0; try (BufferedReader reader = Files.newBufferedReader(Paths.get(outputFile))) { lines[i++] = reader.readLine(); task.put(Arrays.asList( new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line1", 2), new SinkRecord("topic2", 0, null, null, Schema.STRING_SCHEMA, "line2", 1) )); offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(2L)); offsets.put(new TopicPartition("topic2", 0), new OffsetAndMetadata(1L)); task.flush(offsets); lines[i++] = reader.readLine(); lines[i++] = reader.readLine(); } while (--i >= 0) { assertEquals("line" + i, lines[i]); } }
protected static PrivateKey toPrivateKey(File keyFile, String keyPassword) throws NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeySpecException, InvalidAlgorithmParameterException, KeyException, IOException { return toPrivateKey(keyFile, keyPassword, true); }
@Test public void testPkcs1AesEncryptedRsa() throws Exception { PrivateKey key = SslContext.toPrivateKey(new File(getClass().getResource("rsa_pkcs1_aes_encrypted.key") .getFile()), "example"); assertNotNull(key); }
Record convert(Object data) { return convert(data, null); }
@Test public void testEvolveTypeDetectionStruct() { org.apache.iceberg.Schema tableSchema = new org.apache.iceberg.Schema( NestedField.required(1, "ii", IntegerType.get()), NestedField.required(2, "ff", FloatType.get())); Table table = mock(Table.class); when(table.schema()).thenReturn(tableSchema); RecordConverter converter = new RecordConverter(table, config); Schema valueSchema = SchemaBuilder.struct().field("ii", Schema.INT64_SCHEMA).field("ff", Schema.FLOAT64_SCHEMA); Struct data = new Struct(valueSchema).put("ii", 11L).put("ff", 22d); SchemaUpdate.Consumer consumer = new SchemaUpdate.Consumer(); converter.convert(data, consumer); Collection<UpdateType> updates = consumer.updateTypes(); assertThat(updates).hasSize(2); Map<String, UpdateType> updateMap = Maps.newHashMap(); updates.forEach(update -> updateMap.put(update.name(), update)); assertThat(updateMap.get("ii").type()).isInstanceOf(LongType.class); assertThat(updateMap.get("ff").type()).isInstanceOf(DoubleType.class); }
public static int[] computePhysicalIndices( List<TableColumn> logicalColumns, DataType physicalType, Function<String, String> nameRemapping) { Map<TableColumn, Integer> physicalIndexLookup = computePhysicalIndices(logicalColumns.stream(), physicalType, nameRemapping); return logicalColumns.stream().mapToInt(physicalIndexLookup::get).toArray(); }
@Test void testFieldMappingLegacyCompositeType() { int[] indices = TypeMappingUtils.computePhysicalIndices( TableSchema.builder() .field("f1", DataTypes.BIGINT()) .field("f0", DataTypes.STRING()) .build() .getTableColumns(), TypeConversions.fromLegacyInfoToDataType( Types.TUPLE(Types.STRING, Types.LONG)), Function.identity()); assertThat(indices).isEqualTo(new int[] {1, 0}); }
public static void compareFileLengthsAndChecksums(long srcLen, FileSystem sourceFS, Path source, FileChecksum sourceChecksum, FileSystem targetFS, Path target, boolean skipCrc, long targetLen) throws IOException { if (srcLen != targetLen) { throw new IOException( DistCpConstants.LENGTH_MISMATCH_ERROR_MSG + source + " (" + srcLen + ") and target:" + target + " (" + targetLen + ")"); } //At this point, src & dest lengths are same. if length==0, we skip checksum if ((srcLen != 0) && (!skipCrc)) { CopyMapper.ChecksumComparison checksumComparison = checksumsAreEqual(sourceFS, source, sourceChecksum, targetFS, target, srcLen); // If Checksum comparison is false set it to false, else set to true. boolean checksumResult = !checksumComparison.equals(CopyMapper.ChecksumComparison.FALSE); if (!checksumResult) { StringBuilder errorMessage = new StringBuilder(DistCpConstants.CHECKSUM_MISMATCH_ERROR_MSG) .append(source).append(" and ").append(target).append("."); boolean addSkipHint = false; String srcScheme = sourceFS.getScheme(); String targetScheme = targetFS.getScheme(); if (!srcScheme.equals(targetScheme)) { // the filesystems are different and they aren't both hdfs connectors errorMessage.append("Source and destination filesystems are of" + " different types\n") .append("Their checksum algorithms may be incompatible"); addSkipHint = true; } else if (sourceFS.getFileStatus(source).getBlockSize() != targetFS.getFileStatus(target).getBlockSize()) { errorMessage.append(" Source and target differ in block-size.\n") .append(" Use -pb to preserve block-sizes during copy."); addSkipHint = true; } if (addSkipHint) { errorMessage .append(" You can choose file-level checksum validation via " + "-Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes" + " or filesystems are different.") .append(" Or you can skip checksum-checks altogether " + " with -skipcrccheck.\n") .append(" (NOTE: By skipping checksums, one runs the risk of " + "masking data-corruption during file-transfer.)\n"); } throw new IOException(errorMessage.toString()); } } }
@Test public void testCompareFileLengthsAndChecksums() throws Throwable { String base = "/tmp/verify-checksum/"; long srcSeed = System.currentTimeMillis(); long dstSeed = srcSeed + rand.nextLong(); short replFactor = 2; FileSystem fs = FileSystem.get(config); Path basePath = new Path(base); fs.mkdirs(basePath); // empty lengths comparison Path srcWithLen0 = new Path(base + "srcLen0"); Path dstWithLen0 = new Path(base + "dstLen0"); fs.create(srcWithLen0).close(); fs.create(dstWithLen0).close(); DistCpUtils.compareFileLengthsAndChecksums(0, fs, srcWithLen0, null, fs, dstWithLen0, false, 0); // different lengths comparison Path srcWithLen1 = new Path(base + "srcLen1"); Path dstWithLen2 = new Path(base + "dstLen2"); DFSTestUtil.createFile(fs, srcWithLen1, 1, replFactor, srcSeed); DFSTestUtil.createFile(fs, dstWithLen2, 2, replFactor, srcSeed); intercept(IOException.class, DistCpConstants.LENGTH_MISMATCH_ERROR_MSG, () -> DistCpUtils.compareFileLengthsAndChecksums(1, fs, srcWithLen1, null, fs, dstWithLen2, false, 2)); // checksums matched Path srcWithChecksum1 = new Path(base + "srcChecksum1"); Path dstWithChecksum1 = new Path(base + "dstChecksum1"); DFSTestUtil.createFile(fs, srcWithChecksum1, 1024, replFactor, srcSeed); DFSTestUtil.createFile(fs, dstWithChecksum1, 1024, replFactor, srcSeed); DistCpUtils.compareFileLengthsAndChecksums(1024, fs, srcWithChecksum1, null, fs, dstWithChecksum1, false, 1024); DistCpUtils.compareFileLengthsAndChecksums(1024, fs, srcWithChecksum1, fs.getFileChecksum(srcWithChecksum1), fs, dstWithChecksum1, false, 1024); // checksums mismatched Path dstWithChecksum2 = new Path(base + "dstChecksum2"); DFSTestUtil.createFile(fs, dstWithChecksum2, 1024, replFactor, dstSeed); intercept(IOException.class, DistCpConstants.CHECKSUM_MISMATCH_ERROR_MSG, () -> DistCpUtils.compareFileLengthsAndChecksums(1024, fs, srcWithChecksum1, null, fs, dstWithChecksum2, false, 1024)); // checksums mismatched but skipped DistCpUtils.compareFileLengthsAndChecksums(1024, fs, srcWithChecksum1, null, fs, dstWithChecksum2, true, 1024); }
public final void hasSize(int expectedSize) { checkArgument(expectedSize >= 0, "expectedSize(%s) must be >= 0", expectedSize); check("size()").that(checkNotNull(actual).size()).isEqualTo(expectedSize); }
@Test public void hasSize() { assertThat(ImmutableTable.of(1, 2, 3)).hasSize(1); }
@GetMapping("/ConfigurationCenter") public Result<ConfigServerInfo> getConfigurationCenter() { return configService.getConfigurationCenter(); }
@Test public void getConfigurationCenter() { Result<ConfigServerInfo> result = configController.getConfigurationCenter(); Assert.assertTrue(result.isSuccess()); Assert.assertNotNull(result.getData()); ConfigServerInfo configServerInfo = result.getData(); Assert.assertEquals(configServerInfo.getServerAddress(), ADDRESS); Assert.assertEquals(configServerInfo.getUserName(), USERNAME); Assert.assertEquals(configServerInfo.getDynamicConfigType(), DYNAMIC_CONFIG_TYPE); }
@Override public <K> HostToKeyMapper<K> mapKeysV3(URI serviceUri, Collection<K> keys, int limitNumHostsPerPartition) throws ServiceUnavailableException { return getHostToKeyMapper(serviceUri, keys, limitNumHostsPerPartition, null); }
@Test(dataProvider = "ringFactories") public void testMapKeysV3(RingFactory<URI> ringFactory) throws URISyntaxException, ServiceUnavailableException { URI serviceURI = new URI("d2://articles"); ConsistentHashKeyMapper mapper = getConsistentHashKeyMapper(ringFactory); List<Integer> keys = Arrays.asList(1, 2, 3, 4, 9, 10, 13, 15, 16); HostToKeyMapper<Integer> result = mapper.mapKeysV3(serviceURI, keys, 2); verifyHostToMapperWithKeys(result); }
@Override public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { ReflectionUtils.doWithMethods(bean.getClass(), recurringJobFinderMethodCallback); return bean; }
@Test void beansWithMethodsAnnotatedWithRecurringAnnotationContainingPropertyPlaceholdersWillBeResolved() { new ApplicationContextRunner() .withBean(RecurringJobPostProcessor.class) .withBean(JobScheduler.class, () -> jobScheduler) .withPropertyValues("my-job.id=my-recurring-job") .withPropertyValues("my-job.cron=0 0/15 * * *") .withPropertyValues("my-job.zone-id=Asia/Taipei") .run(context -> { context.getBean(RecurringJobPostProcessor.class) .postProcessAfterInitialization(new MyServiceWithRecurringAnnotationContainingPropertyPlaceholder(), "not important"); verify(jobScheduler).scheduleRecurrently(eq("my-recurring-job"), any(JobDetails.class), eq(CronExpression.create("0 0/15 * * *")), eq(ZoneId.of("Asia/Taipei"))); }); }
@Override public <VR> KStream<K, VR> mapValues(final ValueMapper<? super V, ? extends VR> valueMapper) { return mapValues(withKey(valueMapper)); }
@Test public void shouldNotAllowNullMapperOnMapValuesWithKey() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.mapValues((ValueMapperWithKey<Object, Object, Object>) null)); assertThat(exception.getMessage(), equalTo("valueMapperWithKey can't be null")); }
@NonNull public static SSLContext fromClientCertificate(@NonNull ECKey ecKey) { // see also: // https://connect2id.com/products/nimbus-oauth-openid-connect-sdk/examples/utils/custom-key-store if (ecKey.getParsedX509CertChain() == null || ecKey.getParsedX509CertChain().isEmpty()) { throw new IllegalArgumentException( "client key is missing certificate, kid: " + ecKey.getKeyID()); } try { var ctx = SSLContext.getInstance("TLS"); var tmf = TrustManagerFactory.getInstance("PKIX"); // Using null here initialises with the default trust store. tmf.init((KeyStore) null); ctx.init( keyManagerOf(ecKey.getParsedX509CertChain().get(0), ecKey.toPrivateKey()), tmf.getTrustManagers(), null); return ctx; } catch (JOSEException | GeneralSecurityException e) { throw new IllegalStateException("failed to initialize SSL context", e); } }
@Test void fromClientCertificate_noX509() throws Exception { var key = new ECKeyGenerator(Curve.P_256) .keyUse(KeyUse.SIGNATURE) .keyIDFromThumbprint(true) .generate(); assertThrows(IllegalArgumentException.class, () -> TlsContext.fromClientCertificate(key)); }
@Override public int getCount() { return mPopupKeyboards.length; }
@Test public void testGetCount() throws Exception { Assert.assertEquals(mOrderedEnabledQuickKeys.size(), mUnderTest.getCount()); }
public void formatSource(CharSource input, CharSink output) throws FormatterException, IOException { // TODO(cushon): proper support for streaming input/output. Input may // not be feasible (parsing) but output should be easier. output.write(formatSource(input.read())); }
@Test public void wrapLineComment() throws Exception { assertThat( new Formatter() .formatSource( "class T {\n" + " public static void main(String[] args) { // one long incredibly" + " unbroken sentence moving from topic to topic so that no-one had a" + " chance to interrupt;\n" + " }\n" + "}\n")) .isEqualTo( "class T {\n" + " public static void main(\n" + " String[]\n" + " args) { // one long incredibly unbroken sentence moving" + " from topic to topic so that no-one\n" + " // had a chance to interrupt;\n" + " }\n" + "}\n"); }
public static Matrix pdist(int[][] x) { return pdist(x, false); }
@Test public void testPdist() { double[][] data = { {-2.1968219, -0.9559913, -0.0431738, 1.0567679, 0.3853515}, {-1.7781325, -0.6659839, 0.9526148, -0.9460919, -0.3925300}, {-3.9749544, -1.6219752, 0.9094410, 0.1106760, -0.0071785} }; Matrix d = MathEx.pdist(data); for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { assertEquals(MathEx.distance(data[i], data[j]), d.get(i, j), 1E-10); } } }
public static List<Type> decode(String rawInput, List<TypeReference<Type>> outputParameters) { return decoder.decodeFunctionResult(rawInput, outputParameters); }
@Test public void testDecodeTupleOfStaticArrays() { List outputParameters = new ArrayList<TypeReference<Type>>(); outputParameters.addAll( Arrays.asList( new TypeReference<StaticArray4<Utf8String>>() {}, new TypeReference<StaticArray4<Uint256>>() {})); // tuple of (strings string[4]{"", "", "", ""}, ints int[4]{0, 0, 0, 0}) String rawInput = "0x" + "00000000000000000000000000000000000000000000000000000000000000a0" // strings array offset + "0000000000000000000000000000000000000000000000000000000000000000" // ints[0] + "0000000000000000000000000000000000000000000000000000000000000000" // ints[1] + "0000000000000000000000000000000000000000000000000000000000000000" // ints[2] + "0000000000000000000000000000000000000000000000000000000000000000" // ints[3] + "0000000000000000000000000000000000000000000000000000000000000080" // offset strings[0] + "00000000000000000000000000000000000000000000000000000000000000a0" // offset strings[1] + "00000000000000000000000000000000000000000000000000000000000000c0" // offset strings[2] + "00000000000000000000000000000000000000000000000000000000000000e0" // offset strings[3] + "0000000000000000000000000000000000000000000000000000000000000000" // strings[0] + "0000000000000000000000000000000000000000000000000000000000000000" // strings[1] + "0000000000000000000000000000000000000000000000000000000000000000" // strings[2] + "0000000000000000000000000000000000000000000000000000000000000000"; // strings[3] assertEquals( FunctionReturnDecoder.decode(rawInput, outputParameters), Arrays.asList( new StaticArray4( Utf8String.class, new Utf8String(""), new Utf8String(""), new Utf8String(""), new Utf8String("")), new StaticArray4( Uint256.class, new Uint256(0), new Uint256(0), new Uint256(0), new Uint256(0)))); }
@Override public boolean accept(final Path file, final Local local, final TransferStatus parent) throws BackgroundException { if(local.isFile()) { if(local.exists()) { if(log.isInfoEnabled()) { log.info(String.format("Skip file %s", file)); } return false; } } return super.accept(file, local, parent); }
@Test public void testAccept() throws Exception { SkipFilter f = new SkipFilter(new DisabledDownloadSymlinkResolver(), new NullSession(new Host(new TestProtocol()))); assertTrue(f.accept(new Path("a", EnumSet.of(Path.Type.file)) { }, new NullLocal("a", "b") { @Override public boolean exists() { return false; } }, new TransferStatus().exists(true) ) ); assertFalse(f.accept(new Path("a", EnumSet.of(Path.Type.file)) { }, new NullLocal("a", "b") { @Override public boolean exists() { return true; } }, new TransferStatus().exists(true) ) ); }
@Override public AppResponse process(Flow flow, AppRequest params) { var result = digidClient.getWidstatus(appSession.getWidRequestId()); switch(result.get("status").toString()){ case "NO_DOCUMENTS": appSession.setRdaSessionStatus("NO_DOCUMENTS"); appSession.setBrpIdentifier(result.get("brp_identifier").toString()); appSessionService.save(appSession); return new StatusResponse("NO_DOCUMENTS"); case "PENDING": setValid(false); // Do not progress to next state return new StatusResponse("PENDING"); case "NOK": return new NokResponse(); } digidClient.remoteLog("867", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), HIDDEN, true)); appSession.setRdaSessionStatus("DOCUMENTS_RECEIVED"); Map<String, String> rdaSession = rdaClient.startSession(returnUrl + "/iapi/rda/confirm", appSession.getId(), params.getIpAddress(), result.get("travel_documents"), result.get("driving_licences")); if (rdaSession.isEmpty()) { digidClient.remoteLog("873", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), HIDDEN, true)); return new NokResponse(); } appSession.setConfirmSecret(rdaSession.get("confirmSecret")); appSession.setUrl(rdaSession.get("url")); appSession.setRdaSessionId(rdaSession.get("sessionId")); appSession.setRdaSessionTimeoutInSeconds(rdaSession.get("expiration")); appSession.setRdaSessionStatus("SCANNING"); digidClient.remoteLog("868", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), HIDDEN, true)); return new RdaResponse(appSession.getUrl(), appSession.getRdaSessionId()); }
@Test void processWidstatusInvalid(){ when(digidClientMock.getWidstatus(mockedAppSession.getWidRequestId())).thenReturn(invalidDigidClientResponse); AppResponse appResponse = rdaPolling.process(mockedFlow, mockedAbstractAppRequest); assertEquals("NOK", ((StatusResponse)appResponse).getStatus()); }
public final void setStrictness(Strictness strictness) { Objects.requireNonNull(strictness); this.strictness = strictness; }
@Test public void testStrictModeFailsToParseUnescapedControlCharacter() { String json = "\"\0\""; JsonReader reader = new JsonReader(reader(json)); reader.setStrictness(Strictness.STRICT); IOException expected = assertThrows(IOException.class, reader::nextString); assertThat(expected) .hasMessageThat() .startsWith( "Unescaped control characters (\\u0000-\\u001F) are not allowed in strict mode"); json = "\"\t\""; reader = new JsonReader(reader(json)); reader.setStrictness(Strictness.STRICT); expected = assertThrows(IOException.class, reader::nextString); assertThat(expected) .hasMessageThat() .startsWith( "Unescaped control characters (\\u0000-\\u001F) are not allowed in strict mode"); json = "\"\u001F\""; reader = new JsonReader(reader(json)); reader.setStrictness(Strictness.STRICT); expected = assertThrows(IOException.class, reader::nextString); assertThat(expected) .hasMessageThat() .startsWith( "Unescaped control characters (\\u0000-\\u001F) are not allowed in strict mode"); }
public static byte[] getSketchFromByteBuffer(@Nullable ByteBuffer bf) { if (bf == null) { return new byte[0]; } else { byte[] result = new byte[bf.remaining()]; bf.get(result); return result; } }
@Test public void testGetSketchFromByteBufferForEmptySketch() { assertArrayEquals(HllCount.getSketchFromByteBuffer(null), EMPTY_SKETCH); }
private UpdateHostResponse resume(RestApi.RequestContext context) { String hostNameString = context.pathParameters().getStringOrThrow("hostname"); HostName hostName = new HostName(hostNameString); try { orchestrator.resume(hostName); } catch (HostNameNotFoundException e) { log.log(Level.FINE, () -> "Host not found: " + hostName); throw new RestApiException.NotFound(e); } catch (UncheckedTimeoutException e) { log.log(Level.FINE, () -> "Failed to resume " + hostName + ": " + e.getMessage()); throw restApiExceptionFromTimeout("resume", hostName, e); } catch (HostStateChangeDeniedException e) { log.log(Level.FINE, () -> "Failed to resume " + hostName + ": " + e.getMessage()); throw restApiExceptionWithDenialReason("resume", hostName, e); } return new UpdateHostResponse(hostName.s(), null); }
@Test void throws_409_on_timeout() throws HostNameNotFoundException, HostStateChangeDeniedException, IOException { Orchestrator orchestrator = mock(Orchestrator.class); doThrow(new UncheckedTimeoutException("Timeout Message")).when(orchestrator).resume(any(HostName.class)); RestApiTestDriver testDriver = createTestDriver(orchestrator); HttpResponse httpResponse = executeRequest(testDriver, Method.DELETE, "/orchestrator/v1/hosts/hostname/suspended", null); assertEquals(409, httpResponse.getStatus()); ByteArrayOutputStream out = new ByteArrayOutputStream(); httpResponse.render(out); JsonTestHelper.assertJsonEquals(""" { "hostname" : "hostname", "reason" : { "constraint" : "deadline", "message" : "resume failed: Timeout Message" } }""", out.toString()); }
public Map<String, ParamDefinition> getDefaultStepParams() { return preprocessParams(defaultStepParams); }
@Test public void testValidDefaultStepParams() { assertFalse(defaultParamManager.getDefaultStepParams().isEmpty()); assertNotNull(defaultParamManager.getDefaultStepParams().get("workflow_instance_id").getName()); }
public static <@NonNull E> CompletableSource resolveScopeFromLifecycle( final LifecycleScopeProvider<E> provider) throws OutsideScopeException { return resolveScopeFromLifecycle(provider, true); }
@Test public void resolveScopeFromLifecycle_error_noFirstElement() { PublishSubject<Integer> lifecycle = PublishSubject.create(); TestObserver<?> o = testSource(resolveScopeFromLifecycle(lifecycle, 3)); // Now we error RuntimeException expected = new RuntimeException("Expected"); lifecycle.onError(expected); o.assertError(expected); }
@Override public String toString() { return rangeSet.toString(); }
@Test public void testToString() { set = new RangeSetWrapper<>(consumer, reverseConvert, managedCursor); set.addOpenClosed(0, 97, 0, 99); assertEquals(set.toString(), "[(0:97..0:99]]"); set.addOpenClosed(0, 98, 0, 105); assertEquals(set.toString(), "[(0:97..0:105]]"); set.addOpenClosed(0, 5, 0, 75); assertEquals(set.toString(), "[(0:5..0:75],(0:97..0:105]]"); }
@POST @ApiOperation(value = "Retrieve the field list of a given set of streams") @NoAuditEvent("This is not changing any data") public Set<MappedFieldTypeDTO> byStreams(@ApiParam(name = "JSON body", required = true) @Valid @NotNull FieldTypesForStreamsRequest request, @Context SearchUser searchUser) { final ImmutableSet<String> streams = searchUser.streams().readableOrAllIfEmpty(request.streams()); return mappedFieldTypesService.fieldTypesByStreamIds(streams, request.timerange().orElse(RelativeRange.allTime())); }
@Test public void byStreamChecksPermissionsForStream() { final SearchUser searchUser = TestSearchUser.builder() .allowStream("2323") .allowStream("4242") .build(); final FieldTypesForStreamsRequest req = FieldTypesForStreamsRequest.Builder.builder() .streams(ImmutableSet.of("2323", "4242")) .build(); final MappedFieldTypesService fieldTypesService = (streamIds, timeRange) -> { // for each streamID return a field that's called exactly like the streamID return streamIds.stream() .map(streamID -> MappedFieldTypeDTO.create(streamID, FieldTypes.Type.builder().type("text").build())) .collect(Collectors.toSet()); }; final FieldTypesResource resource = new FieldTypesResource(fieldTypesService, mock(IndexFieldTypePollerPeriodical.class)); final Set<MappedFieldTypeDTO> fields = resource.byStreams(req, searchUser); assertThat(fields) .hasSize(2) .extracting(MappedFieldTypeDTO::name) .containsOnly("2323", "4242"); }
public void logOnReplayNewLeadershipTermEvent( final int memberId, final boolean isInElection, final long leadershipTermId, final long logPosition, final long timestamp, final long termBaseLogPosition, final TimeUnit timeUnit, final int appVersion) { final int length = replayNewLeadershipTermEventLength(timeUnit); final int captureLength = captureLength(length); final int encodedLength = encodedLength(captureLength); final ManyToOneRingBuffer ringBuffer = this.ringBuffer; final int index = ringBuffer.tryClaim(REPLAY_NEW_LEADERSHIP_TERM.toEventCodeId(), encodedLength); if (index > 0) { try { encodeOnReplayNewLeadershipTermEvent( (UnsafeBuffer)ringBuffer.buffer(), index, captureLength, length, memberId, isInElection, leadershipTermId, logPosition, timestamp, termBaseLogPosition, timeUnit, appVersion); } finally { ringBuffer.commit(index); } } }
@Test void logReplayNewLeadershipTerm() { final int offset = ALIGNMENT * 4; logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, offset); final int memberId = 982374; final boolean isInElection = true; final long leadershipTermId = 1233L; final long logPosition = 988723465L; final long timestamp = 890723452345L; final long termBaseLogPosition = logPosition - 32; final TimeUnit timeUnit = NANOSECONDS; final int appVersion = 13; logger.logOnReplayNewLeadershipTermEvent( memberId, isInElection, leadershipTermId, logPosition, timestamp, termBaseLogPosition, TimeUnit.NANOSECONDS, appVersion); final int length = replayNewLeadershipTermEventLength(timeUnit); verifyLogHeader(logBuffer, offset, REPLAY_NEW_LEADERSHIP_TERM.toEventCodeId(), length, length); int index = encodedMsgOffset(offset) + LOG_HEADER_LENGTH; assertEquals(leadershipTermId, logBuffer.getLong(index, LITTLE_ENDIAN)); index += SIZE_OF_LONG; assertEquals(logPosition, logBuffer.getLong(index, LITTLE_ENDIAN)); index += SIZE_OF_LONG; assertEquals(timestamp, logBuffer.getLong(index, LITTLE_ENDIAN)); index += SIZE_OF_LONG; assertEquals(termBaseLogPosition, logBuffer.getLong(index, LITTLE_ENDIAN)); index += SIZE_OF_LONG; assertEquals(memberId, logBuffer.getInt(index, LITTLE_ENDIAN)); index += SIZE_OF_INT; assertEquals(appVersion, logBuffer.getInt(index, LITTLE_ENDIAN)); index += SIZE_OF_INT; assertEquals(isInElection, 0 != logBuffer.getByte(index)); index += SIZE_OF_BYTE; assertEquals(timeUnit.name(), logBuffer.getStringAscii(index, LITTLE_ENDIAN)); final StringBuilder sb = new StringBuilder(); ClusterEventDissector.dissectReplayNewLeadershipTerm( REPLAY_NEW_LEADERSHIP_TERM, logBuffer, encodedMsgOffset(offset), sb); final String expectedMessagePattern = "\\[[0-9]+\\.[0-9]+] CLUSTER: REPLAY_NEW_LEADERSHIP_TERM " + "\\[56/56]: memberId=982374 isInElection=true leadershipTermId=1233 logPosition=988723465 " + "termBaseLogPosition=988723433 appVersion=13 timestamp=890723452345 timeUnit=NANOSECONDS"; assertThat(sb.toString(), Matchers.matchesPattern(expectedMessagePattern)); }
@Override public void upgrade() { if (hasBeenRunSuccessfully()) { LOG.debug("Migration already completed."); return; } final Set<String> dashboardIdToViewId = new HashSet<>(); final Consumer<String> recordMigratedDashboardIds = dashboardIdToViewId::add; final Map<String, Set<String>> widgetIdMigrationMapping = new HashMap<>(); final Consumer<Map<String, Set<String>>> recordMigratedWidgetIds = widgetIdMigrationMapping::putAll; final Map<View, Search> newViews = this.dashboardsService.streamAll() .sorted(Comparator.comparing(Dashboard::id)) .map(dashboard -> migrateDashboard(dashboard, recordMigratedDashboardIds, recordMigratedWidgetIds)) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); writeViews(newViews); final MigrationCompleted migrationCompleted = MigrationCompleted.create(dashboardIdToViewId, widgetIdMigrationMapping); writeMigrationCompleted(migrationCompleted); }
@Test @MongoDBFixtures("dashboard_with_missing_quickvalues_attributes.json") public void migratesADashboardWithMissingQuickValuesAttributes() { this.migration.upgrade(); final MigrationCompleted migrationCompleted = captureMigrationCompleted(); assertThat(migrationCompleted.migratedDashboardIds()).containsExactly("5ddf8ed5b2d44b2e04472992"); assertThat(migrationCompleted.widgetMigrationIds()).hasSize(6); final ArgumentCaptor<View> viewCaptor = ArgumentCaptor.forClass(View.class); verify(viewService, times(1)).save(viewCaptor.capture()); verify(searchService, times(1)).save(any()); final Set<ViewWidget> widgets = viewCaptor.getValue().state().get("0000016e-b690-427d-0000-016eb690426f").widgets(); final Function<String, Set<ViewWidget>> findNewWidgets = (String widgetId) -> { final Set<String> newWidgetIds = migrationCompleted.widgetMigrationIds().get(widgetId); return widgets.stream().filter(widget -> newWidgetIds.contains(widget.id())).collect(Collectors.toSet()); }; final List<ViewWidget> widgetWithoutAttributes = new ArrayList<>(findNewWidgets.apply("4ce93e89-4771-4ce2-8b59-6dc058cbfd3b")); assertThat(widgetWithoutAttributes).hasSize(1); assertThat(((AggregationWidget) widgetWithoutAttributes.get(0)).config().visualization()).isEqualTo("table"); final List<ViewWidget> widgetWithOnlyShowPieChartIsFalse = new ArrayList<>(findNewWidgets.apply("5c12c588-be0c-436b-b999-ee18378efd45")); assertThat(widgetWithOnlyShowPieChartIsFalse).hasSize(1); assertThat(((AggregationWidget) widgetWithOnlyShowPieChartIsFalse.get(0)).config().visualization()).isEqualTo("table"); final List<ViewWidget> widgetWithOnlyShowDataTableIsFalse = new ArrayList<>(findNewWidgets.apply("e6a16d9a-23c0-4b7f-93b5-d790b5d64672")); assertThat(widgetWithOnlyShowDataTableIsFalse).hasSize(1); assertThat(((AggregationWidget) widgetWithOnlyShowDataTableIsFalse.get(0)).config().visualization()).isEqualTo("table"); final List<ViewWidget> widgetWithBothAttributesPresentButFalse = new ArrayList<>(findNewWidgets.apply("568c005a-11ec-4be9-acd7-b2aa509c07e0")); assertThat(widgetWithBothAttributesPresentButFalse).hasSize(1); assertThat(((AggregationWidget) widgetWithBothAttributesPresentButFalse.get(0)).config().visualization()).isEqualTo("table"); final List<ViewWidget> widgetWithPieChartPresentAndTrue = new ArrayList<>(findNewWidgets.apply("2e3c5e76-bbfd-4ac3-a27b-7491a5cbf59a")); assertThat(widgetWithPieChartPresentAndTrue).hasSize(1); assertThat(((AggregationWidget) widgetWithPieChartPresentAndTrue.get(0)).config().visualization()).isEqualTo("pie"); final List<ViewWidget> widgetWithBothPieChartAndDataTablePresentAndTrue = new ArrayList<>(findNewWidgets.apply("26a0a3e1-718f-4bfe-90a2-cb441390152d")); assertThat(widgetWithBothPieChartAndDataTablePresentAndTrue).hasSize(2); assertThat(widgetWithBothPieChartAndDataTablePresentAndTrue) .extracting(viewWidget -> ((AggregationWidget) viewWidget).config().visualization()) .containsExactlyInAnyOrder("table", "pie"); }
@Override public Mono<byte[]> readPublicKey() { return Mono.just(keyPair) .map(KeyPair::getPublic) .map(PublicKey::getEncoded); }
@Test void shouldDecryptMessageCorrectly() { final String message = "halo"; var mono = service.readPublicKey() .map(pubKeyBytes -> { var pubKeySpec = new X509EncodedKeySpec(pubKeyBytes); try { var keyFactory = KeyFactory.getInstance(RsaKeyService.ALGORITHM); var pubKey = keyFactory.generatePublic(pubKeySpec); var cipher = Cipher.getInstance(RsaKeyService.TRANSFORMATION); cipher.init(Cipher.ENCRYPT_MODE, pubKey); return cipher.doFinal(message.getBytes()); } catch (NoSuchAlgorithmException | InvalidKeySpecException | NoSuchPaddingException | InvalidKeyException | IllegalBlockSizeException | BadPaddingException e) { throw Exceptions.propagate(e); } }) .flatMap(service::decrypt) .map(String::new); StepVerifier.create(mono) .expectNext(message) .verifyComplete(); }
@Override public void delete(NotificationDto nativeEntity) { notificationResourceHandler.delete(nativeEntity.id()); }
@Test @MongoDBFixtures("NotificationFacadeTest.json") public void delete() { long countBefore = notificationService.streamAll().count(); assertThat(countBefore).isEqualTo(1); final Optional<NotificationDto> notificationDto = notificationService.get( "5d4d33753d27460ad18e0c4d"); assertThat(notificationDto).isPresent(); facade.delete(notificationDto.get()); long countAfter = notificationService.streamAll().count(); assertThat(countAfter).isEqualTo(0); }
@JsonIgnore void resetIterationDetail( long iterationId, WorkflowInstance.Status newStatus, WorkflowInstance.Status oldStatus) { Checks.checkTrue( info.get(oldStatus) != null, "Invalid: the restarted iteration [%s]'s status [%s] is missing in the foreach details", iterationId, oldStatus); Interval deleted = null; Iterator<Interval> iter = info.get(oldStatus).iterator(); while (iter.hasNext()) { Interval interval = iter.next(); if (iterationId >= interval.start && iterationId <= interval.end) { deleted = interval; iter.remove(); break; } } Checks.checkTrue( deleted != null, "Invalid: the restarted iteration [%s] is missing in the foreach details", iterationId); if (deleted.start < iterationId) { info.get(oldStatus).add(new Interval(deleted.start, iterationId - 1)); } if (iterationId < deleted.end) { info.get(oldStatus).add(new Interval(iterationId + 1, deleted.end)); } if (info.get(oldStatus).isEmpty()) { info.remove(oldStatus); } add(iterationId, newStatus); }
@Test public void testResetIterationDetail() throws Exception { TestDetails testDetails = loadObject("fixtures/instances/sample-foreach-details.json", TestDetails.class); testDetails.test1.resetIterationDetail( 8, WorkflowInstance.Status.CREATED, WorkflowInstance.Status.FAILED); assertEquals( Collections.singletonList(new ForeachDetails.Interval(8L, 8L)), testDetails.test1.getPendingInfo().get(WorkflowInstance.Status.CREATED)); testDetails.test1.resetIterationDetail( 9, WorkflowInstance.Status.CREATED, WorkflowInstance.Status.FAILED); assertEquals( Collections.singletonList(new ForeachDetails.Interval(11L, 15L)), testDetails.test1.getInfo().get(WorkflowInstance.Status.FAILED)); }
@VisibleForTesting static SingleSegmentAssignment getNextSingleSegmentAssignment(Map<String, String> currentInstanceStateMap, Map<String, String> targetInstanceStateMap, int minAvailableReplicas, boolean lowDiskMode, Map<String, Integer> numSegmentsToOffloadMap, Map<Pair<Set<String>, Set<String>>, Set<String>> assignmentMap) { Map<String, String> nextInstanceStateMap = new TreeMap<>(); // Assign the segment the same way as other segments if the current and target instances are the same. We need this // to guarantee the mirror servers for replica-group based routing strategies. Set<String> currentInstances = currentInstanceStateMap.keySet(); Set<String> targetInstances = targetInstanceStateMap.keySet(); Pair<Set<String>, Set<String>> assignmentKey = Pair.of(currentInstances, targetInstances); Set<String> instancesToAssign = assignmentMap.get(assignmentKey); if (instancesToAssign != null) { Set<String> availableInstances = new TreeSet<>(); for (String instanceName : instancesToAssign) { String currentInstanceState = currentInstanceStateMap.get(instanceName); String targetInstanceState = targetInstanceStateMap.get(instanceName); if (currentInstanceState != null) { availableInstances.add(instanceName); // Use target instance state if available in case the state changes nextInstanceStateMap.put(instanceName, targetInstanceState != null ? targetInstanceState : currentInstanceState); } else { nextInstanceStateMap.put(instanceName, targetInstanceState); } } return new SingleSegmentAssignment(nextInstanceStateMap, availableInstances); } // Add all the common instances // Use target instance state in case the state changes for (Map.Entry<String, String> entry : targetInstanceStateMap.entrySet()) { String instanceName = entry.getKey(); if (currentInstanceStateMap.containsKey(instanceName)) { nextInstanceStateMap.put(instanceName, entry.getValue()); } } // Add current instances until the min available replicas achieved int numInstancesToKeep = minAvailableReplicas - nextInstanceStateMap.size(); if (numInstancesToKeep > 0) { // Sort instances by number of segments to offload, and keep the ones with the least segments to offload List<Triple<String, String, Integer>> instancesInfo = getSortedInstancesOnNumSegmentsToOffload(currentInstanceStateMap, nextInstanceStateMap, numSegmentsToOffloadMap); numInstancesToKeep = Integer.min(numInstancesToKeep, instancesInfo.size()); for (int i = 0; i < numInstancesToKeep; i++) { Triple<String, String, Integer> instanceInfo = instancesInfo.get(i); nextInstanceStateMap.put(instanceInfo.getLeft(), instanceInfo.getMiddle()); } } Set<String> availableInstances = new TreeSet<>(nextInstanceStateMap.keySet()); // After achieving the min available replicas, when low disk mode is enabled, only add new instances when all // current instances exist in the next assignment. // We want to first drop the extra instances as one step, then add the target instances as another step to avoid the // case where segments are first added to the instance before other segments are dropped from the instance, which // might cause server running out of disk. Note that even if segment addition and drop happen in the same step, // there is no guarantee that server process the segment drop before the segment addition. if (!lowDiskMode || currentInstanceStateMap.size() == nextInstanceStateMap.size()) { int numInstancesToAdd = targetInstanceStateMap.size() - nextInstanceStateMap.size(); if (numInstancesToAdd > 0) { // Sort instances by number of segments to offload, and add the ones with the least segments to offload List<Triple<String, String, Integer>> instancesInfo = getSortedInstancesOnNumSegmentsToOffload(targetInstanceStateMap, nextInstanceStateMap, numSegmentsToOffloadMap); for (int i = 0; i < numInstancesToAdd; i++) { Triple<String, String, Integer> instanceInfo = instancesInfo.get(i); nextInstanceStateMap.put(instanceInfo.getLeft(), instanceInfo.getMiddle()); } } } assignmentMap.put(assignmentKey, nextInstanceStateMap.keySet()); return new SingleSegmentAssignment(nextInstanceStateMap, availableInstances); }
@Test public void testDowntimeMode() { // With common instance, first assignment should be the same as target assignment Map<String, String> currentInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2"), ONLINE); Map<String, String> targetInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host3"), ONLINE); TableRebalancer.SingleSegmentAssignment assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 0, false); assertEquals(assignment._instanceStateMap, targetInstanceStateMap); assertEquals(assignment._availableInstances, Collections.singleton("host1")); // Without common instance, first assignment should be the same as target assignment targetInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host3", "host4"), ONLINE); assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 0, false); assertEquals(assignment._instanceStateMap, targetInstanceStateMap); assertTrue(assignment._availableInstances.isEmpty()); // With increasing number of replicas, first assignment should be the same as target assignment targetInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host3", "host4", "host5"), ONLINE); assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 0, false); assertEquals(assignment._instanceStateMap, targetInstanceStateMap); assertTrue(assignment._availableInstances.isEmpty()); // With decreasing number of replicas, first assignment should be the same as target assignment currentInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host3"), ONLINE); targetInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host4", "host5"), ONLINE); assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 0, false); assertEquals(assignment._instanceStateMap, targetInstanceStateMap); assertTrue(assignment._availableInstances.isEmpty()); }
public synchronized void write(final LocalCommand localCommand) throws IOException { if (writer == null) { throw new IOException("Write permission denied."); } final byte[] bytes = MAPPER.writeValueAsBytes(localCommand); writer.write(bytes); writer.write(NEW_LINE_SEPARATOR_BYTES); writer.flush(); }
@Test public void shouldWriteRecord() throws IOException { // When localCommandsFile.write(LOCAL_COMMAND1); localCommandsFile.write(LOCAL_COMMAND2); // Then final List<String> commands = Files.readAllLines(internalCommandsFile.toPath()); assertThat(commands.size(), is(2)); assertThat(commands.get(0), is("{\"@type\":\"transient_query\",\"queryApplicationId\":" + "\"_confluent-ksql-default_transient_932097300573686369_1606940079718\"}")); assertThat(commands.get(1), is("{\"@type\":\"transient_query\",\"queryApplicationId\":" + "\"_confluent-ksql-default_transient_123457300573686369_1606940012343\"}")); }
public static TxnOffsetCommitResponse parse(ByteBuffer buffer, short version) { return new TxnOffsetCommitResponse(new TxnOffsetCommitResponseData(new ByteBufferAccessor(buffer), version)); }
@Test @Override public void testParse() { TxnOffsetCommitResponseData data = new TxnOffsetCommitResponseData() .setThrottleTimeMs(throttleTimeMs) .setTopics(Arrays.asList( new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic().setPartitions( Collections.singletonList(new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(partitionOne) .setErrorCode(errorOne.code()))), new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic().setPartitions( Collections.singletonList(new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(partitionTwo) .setErrorCode(errorTwo.code()))) )); for (short version : ApiKeys.TXN_OFFSET_COMMIT.allVersions()) { TxnOffsetCommitResponse response = TxnOffsetCommitResponse.parse( MessageUtil.toByteBuffer(data, version), version); assertEquals(expectedErrorCounts, response.errorCounts()); assertEquals(throttleTimeMs, response.throttleTimeMs()); assertEquals(version >= 1, response.shouldClientThrottle(version)); } }
@Override public void releaseAllReservations() { for (Long counter : reservedCapacityCountByTxId.values()) { nodeWideUsedCapacityCounter.add(-counter); } reservedCapacityCountByTxId.clear(); }
@Test public void releaseAllReservations() { UUID txnId = UuidUtil.newSecureUUID(); for (int i = 0; i < 11; i++) { counter.increment(txnId, false); } counter.releaseAllReservations(); Map<UUID, Long> countPerTxnId = counter.getReservedCapacityCountPerTxnId(); assertNull(countPerTxnId.get(txnId)); assertEquals(0L, nodeWideUsedCapacityCounter.currentValue()); }
@Override public PageResult<ApiAccessLogDO> getApiAccessLogPage(ApiAccessLogPageReqVO pageReqVO) { return apiAccessLogMapper.selectPage(pageReqVO); }
@Test public void testGetApiAccessLogPage() { ApiAccessLogDO apiAccessLogDO = randomPojo(ApiAccessLogDO.class, o -> { o.setUserId(2233L); o.setUserType(UserTypeEnum.ADMIN.getValue()); o.setApplicationName("yudao-test"); o.setRequestUrl("foo"); o.setBeginTime(buildTime(2021, 3, 13)); o.setDuration(1000); o.setResultCode(GlobalErrorCodeConstants.SUCCESS.getCode()); }); apiAccessLogMapper.insert(apiAccessLogDO); // 测试 userId 不匹配 apiAccessLogMapper.insert(cloneIgnoreId(apiAccessLogDO, o -> o.setUserId(3344L))); // 测试 userType 不匹配 apiAccessLogMapper.insert(cloneIgnoreId(apiAccessLogDO, o -> o.setUserType(UserTypeEnum.MEMBER.getValue()))); // 测试 applicationName 不匹配 apiAccessLogMapper.insert(cloneIgnoreId(apiAccessLogDO, o -> o.setApplicationName("test"))); // 测试 requestUrl 不匹配 apiAccessLogMapper.insert(cloneIgnoreId(apiAccessLogDO, o -> o.setRequestUrl("bar"))); // 测试 beginTime 不匹配:构造一个早期时间 2021-02-06 00:00:00 apiAccessLogMapper.insert(cloneIgnoreId(apiAccessLogDO, o -> o.setBeginTime(buildTime(2021, 2, 6)))); // 测试 duration 不匹配 apiAccessLogMapper.insert(cloneIgnoreId(apiAccessLogDO, o -> o.setDuration(100))); // 测试 resultCode 不匹配 apiAccessLogMapper.insert(cloneIgnoreId(apiAccessLogDO, o -> o.setResultCode(2))); // 准备参数 ApiAccessLogPageReqVO reqVO = new ApiAccessLogPageReqVO(); reqVO.setUserId(2233L); reqVO.setUserType(UserTypeEnum.ADMIN.getValue()); reqVO.setApplicationName("yudao-test"); reqVO.setRequestUrl("foo"); reqVO.setBeginTime(buildBetweenTime(2021, 3, 13, 2021, 3, 13)); reqVO.setDuration(1000); reqVO.setResultCode(GlobalErrorCodeConstants.SUCCESS.getCode()); // 调用 PageResult<ApiAccessLogDO> pageResult = apiAccessLogService.getApiAccessLogPage(reqVO); // 断言,只查到了一条符合条件的 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(apiAccessLogDO, pageResult.getList().get(0)); }
@CheckForNull @Override public Map<Path, Set<Integer>> branchChangedLines(String targetBranchName, Path projectBaseDir, Set<Path> changedFiles) { return branchChangedLinesWithFileMovementDetection(targetBranchName, projectBaseDir, toChangedFileByPathsMap(changedFiles)); }
@Test public void branchChangedLines_should_not_fail_if_there_is_no_merge_base() throws GitAPIException, IOException { createAndCommitFile("file-m1.xoo"); git.checkout().setOrphan(true).setName("b1").call(); createAndCommitFile("file-b1.xoo"); Map<Path, Set<Integer>> changedLines = newScmProvider().branchChangedLines("main", worktree, Collections.singleton(Paths.get(""))); assertThat(changedLines).isNull(); }
public static String encode(Object... elements) { final StringBuilder sb = new StringBuilder(); buildEnabled(sb, elements); return sb.toString(); }
@Test public void colorMappingTest(){ String text4 = "RGB:({},{},{})--4bit "; String text8 = "RGB:({},{},{})--8bit "; final AnsiColors ansiColors4Bit = new AnsiColors(AnsiColors.BitDepth.FOUR); final AnsiColors ansiColors8Bit = new AnsiColors(AnsiColors.BitDepth.EIGHT); int count = 0; int from = 100000; int until = 120000; for (int r = 0; r < 256; r++) { if (count>until)break; for (int g = 0; g < 256; g++) { if (count>until)break; for (int b = 0; b < 256; b++) { count++; if (count<from)continue; if (count>until)break; AnsiElement backElement4bit = ansiColors4Bit.findClosest(new Color(r,g,b)).toAnsiElement(ForeOrBack.BACK); AnsiElement backElement8bit = ansiColors8Bit.findClosest(new Color(r,g,b)).toAnsiElement(ForeOrBack.BACK); String encode4 = AnsiEncoder.encode( backElement4bit,text4); String encode8 = AnsiEncoder.encode( backElement8bit,text8); //Console.log(StrUtil.format(encode4,r,g,b)+StrUtil.format(encode8,r,g,b)); } } } }
public static String normalizeUri(String uri) throws URISyntaxException { // try to parse using the simpler and faster Camel URI parser String[] parts = CamelURIParser.fastParseUri(uri); if (parts != null) { // we optimized specially if an empty array is returned if (parts == URI_ALREADY_NORMALIZED) { return uri; } // use the faster and more simple normalizer return doFastNormalizeUri(parts); } else { // use the legacy normalizer as the uri is complex and may have unsafe URL characters return doComplexNormalizeUri(uri); } }
@Test public void testRawParameter() throws Exception { String out = URISupport.normalizeUri( "xmpp://camel-user@localhost:123/test-user@localhost?password=RAW(++?w0rd)&serviceName=some chat"); assertEquals("xmpp://camel-user@localhost:123/test-user@localhost?password=RAW(++?w0rd)&serviceName=some+chat", out); String out2 = URISupport.normalizeUri( "xmpp://camel-user@localhost:123/test-user@localhost?password=RAW(foo %% bar)&serviceName=some chat"); // Just make sure the RAW parameter can be resolved rightly, we need to replace the % into %25 assertEquals("xmpp://camel-user@localhost:123/test-user@localhost?password=RAW(foo %25%25 bar)&serviceName=some+chat", out2); }
public int validate( final ServiceContext serviceContext, final List<ParsedStatement> statements, final SessionProperties sessionProperties, final String sql ) { requireSandbox(serviceContext); final KsqlExecutionContext ctx = requireSandbox(snapshotSupplier.apply(serviceContext)); final Injector injector = injectorFactory.apply(ctx, serviceContext); final KsqlConfig ksqlConfig = ctx.getKsqlConfig(); int numPersistentQueries = 0; for (final ParsedStatement parsed : statements) { final PreparedStatement<?> prepared = ctx.prepare( parsed, (isVariableSubstitutionEnabled(sessionProperties, ksqlConfig) ? sessionProperties.getSessionVariables() : Collections.emptyMap()) ); final ConfiguredStatement<?> configured = ConfiguredStatement.of(prepared, SessionConfig.of(ksqlConfig, sessionProperties.getMutableScopedProperties()) ); final int currNumPersistentQueries = validate( serviceContext, configured, sessionProperties, ctx, injector ); numPersistentQueries += currNumPersistentQueries; if (currNumPersistentQueries > 0 && QueryCapacityUtil.exceedsPersistentQueryCapacity(ctx, ksqlConfig)) { QueryCapacityUtil.throwTooManyActivePersistentQueriesException(ctx, ksqlConfig, sql); } } return numPersistentQueries; }
@Test public void shouldCallStatementValidator() { // Given: givenRequestValidator( ImmutableMap.of(CreateStream.class, statementValidator) ); final List<ParsedStatement> statements = givenParsed(SOME_STREAM_SQL); // When: validator.validate(serviceContext, statements, sessionProperties, "sql"); // Then: verify(statementValidator, times(1)).validate( argThat(is(configured(preparedStatement(instanceOf(CreateStream.class))))), eq(sessionProperties), eq(executionContext), any() ); }
public static String padEnd(String str, int targetLength, char padString) { while (str.length() < targetLength) { str = str + padString; } return str; }
@Test public void padEnd_Test() { String binaryString = "010011"; String expected = "01001100"; Assertions.assertEquals(expected, TbUtils.padEnd(binaryString, 8, '0')); binaryString = "1001010011"; expected = "1001010011"; Assertions.assertEquals(expected, TbUtils.padEnd(binaryString, 8, '0')); binaryString = "1001010011"; expected = "1001010011******"; Assertions.assertEquals(expected, TbUtils.padEnd(binaryString, 16, '*')); String fullNumber = "203439900FFCD5581"; String last4Digits = fullNumber.substring(0, 11); expected = "203439900FF******"; Assertions.assertEquals(expected, TbUtils.padEnd(last4Digits, fullNumber.length(), '*')); }
@Override protected SchemaTransform from(SchemaTransformConfiguration configuration) { return new IcebergWriteSchemaTransform(configuration); }
@Test public void testSimpleAppend() { String identifier = "default.table_" + Long.toString(UUID.randomUUID().hashCode(), 16); TableIdentifier tableId = TableIdentifier.parse(identifier); // Create a table and add records to it. Table table = warehouse.createTable(tableId, TestFixtures.SCHEMA); Map<String, String> properties = new HashMap<>(); properties.put("type", CatalogUtil.ICEBERG_CATALOG_TYPE_HADOOP); properties.put("warehouse", warehouse.location); SchemaTransformConfiguration config = SchemaTransformConfiguration.builder() .setTable(identifier) .setCatalogName("name") .setCatalogProperties(properties) .build(); PCollectionRowTuple input = PCollectionRowTuple.of( INPUT_TAG, testPipeline .apply( "Records To Add", Create.of(TestFixtures.asRows(TestFixtures.FILE1SNAPSHOT1))) .setRowSchema(IcebergUtils.icebergSchemaToBeamSchema(TestFixtures.SCHEMA))); PCollection<Row> result = input .apply("Append To Table", new IcebergWriteSchemaTransformProvider().from(config)) .get(OUTPUT_TAG); PAssert.that(result).satisfies(new VerifyOutputs(identifier, "append")); testPipeline.run().waitUntilFinish(); List<Record> writtenRecords = ImmutableList.copyOf(IcebergGenerics.read(table).build()); assertThat(writtenRecords, Matchers.containsInAnyOrder(TestFixtures.FILE1SNAPSHOT1.toArray())); }
@Override public String toString() { StringBuilder sb = new StringBuilder(); int idx = 0; for (Flag flag : FLAG_SET) { if ((flags & flag.getFlagBit()) != 0) { if (idx != 0) { sb.append(" | "); } sb.append(flag.getComment()); idx++; } } return sb.toString(); }
@Test public void testToString() { MysqlCapability capability = new MysqlCapability(1); Assert.assertEquals("CLIENT_LONG_PASSWORD", capability.toString()); capability = new MysqlCapability(0x3); Assert.assertEquals("CLIENT_LONG_PASSWORD | CLIENT_FOUND_ROWS", capability.toString()); capability = new MysqlCapability(0xfffffff); Assert.assertEquals("CLIENT_LONG_PASSWORD | CLIENT_FOUND_ROWS | CLIENT_LONG_FLAG | CLIENT_CONNECT_WITH_DB" + " | CLIENT_NO_SCHEMA | CLIENT_COMPRESS | CLIENT_ODBC | CLIENT_LOCAL_FILES" + " | CLIENT_IGNORE_SPACE | CLIENT_PROTOCOL_41 | CLIENT_INTERACTIVE | CLIENT_SSL" + " | CLIENT_IGNORE_SIGPIPE | CLIENT_TRANSACTIONS | CLIENT_RESERVED | CLIENT_SECURE_CONNECTION" + " | CLIENT_MULTI_STATEMENTS | CLIENT_MULTI_RESULTS | CLIENT_PS_MULTI_RESULTS | CLIENT_PLUGIN_AUTH" + " | CLIENT_CONNECT_ATTRS | CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA" + " | CLIENT_CAN_HANDLE_EXPIRED_PASSWORDS | CLIENT_SESSION_TRACK | CLIENT_DEPRECATE_EOF", capability.toString()); }
@Operation(summary = "verifyQueue", description = "VERIFY_QUEUE_NOTES") @Parameters({ @Parameter(name = "queue", description = "YARN_QUEUE_NAME", required = true, schema = @Schema(implementation = String.class)), @Parameter(name = "queueName", description = "QUEUE_NAME", required = true, schema = @Schema(implementation = String.class)) }) @PostMapping(value = "/verify") @ResponseStatus(HttpStatus.OK) @ApiException(VERIFY_QUEUE_ERROR) public Result<Boolean> verifyQueue(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "queue") String queue, @RequestParam(value = "queueName") String queueName) { queueService.verifyQueue(queue, queueName); return Result.success(true); }
@Test public void testVerifyQueue() throws Exception { // queue value exist MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("queue", QUEUE_MODIFY_NAME); paramsMap.add("queueName", NOT_EXISTS_NAME); MvcResult mvcResult = mockMvc.perform(post("/queues/verify") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assertions.assertNotNull(result); Assertions.assertEquals(Status.QUEUE_VALUE_EXIST.getCode(), result.getCode().intValue()); // queue name exist paramsMap.clear(); paramsMap.add("queue", NOT_EXISTS_NAME); paramsMap.add("queueName", QUEUE_NAME_CREATE_NAME); mvcResult = mockMvc.perform(post("/queues/verify") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assertions.assertNotNull(result); Assertions.assertEquals(Status.QUEUE_NAME_EXIST.getCode(), result.getCode().intValue()); // success paramsMap.clear(); paramsMap.add("queue", NOT_EXISTS_NAME); paramsMap.add("queueName", NOT_EXISTS_NAME); mvcResult = mockMvc.perform(post("/queues/verify") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assertions.assertNotNull(result); Assertions.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); logger.info("verify queue return result:{}", mvcResult.getResponse().getContentAsString()); }
@Override public boolean onOptionsItemSelected(MenuItem item) { switch (item.getItemId()) { case R.id.about_menu_option: Navigation.findNavController(requireView()) .navigate(MainFragmentDirections.actionMainFragmentToAboutAnySoftKeyboardFragment()); return true; case R.id.tweaks_menu_option: Navigation.findNavController(requireView()) .navigate(MainFragmentDirections.actionMainFragmentToMainTweaksFragment()); return true; case R.id.backup_prefs: mDialogController.showDialog(R.id.backup_prefs); return true; case R.id.restore_prefs: mDialogController.showDialog(R.id.restore_prefs); return true; default: return super.onOptionsItemSelected(item); } }
@Test public void testCompleteOperation() throws Exception { final var shadowApplication = Shadows.shadowOf((Application) getApplicationContext()); final MainFragment fragment = startFragment(); final FragmentActivity activity = fragment.getActivity(); fragment.onOptionsItemSelected( Shadows.shadowOf(activity).getOptionsMenu().findItem(R.id.backup_prefs)); TestRxSchedulers.foregroundFlushAllJobs(); Assert.assertNotSame( GeneralDialogTestUtil.NO_DIALOG, GeneralDialogTestUtil.getLatestShownDialog()); Assert.assertTrue( GeneralDialogTestUtil.getLatestShownDialog() .getButton(DialogInterface.BUTTON_POSITIVE) .callOnClick()); TestRxSchedulers.foregroundAdvanceBy(1); // this will open the System's file chooser ShadowActivity.IntentForResult fileRequest = shadowApplication.getNextStartedActivityForResult(); Assert.assertNotNull(fileRequest); Assert.assertEquals(Intent.ACTION_CREATE_DOCUMENT, fileRequest.intent.getAction()); final var backupFile = Files.createTempFile("ask-backup", ".xml"); Shadows.shadowOf(activity.getContentResolver()) .registerOutputStream( Uri.fromFile(backupFile.toFile()), new FileOutputStream(backupFile.toFile())); Intent resultData = new Intent(); resultData.setData(Uri.fromFile(backupFile.toFile())); Shadows.shadowOf(activity).receiveResult(fileRequest.intent, Activity.RESULT_OK, resultData); TestRxSchedulers.drainAllTasks(); // back up was done Assert.assertEquals( getApplicationContext().getText(R.string.prefs_providers_operation_success), GeneralDialogTestUtil.getTitleFromDialog(GeneralDialogTestUtil.getLatestShownDialog())); // verifying that progress-dialog was shown Assert.assertNotNull( TestRxSchedulers.blockingGet( Observable.fromIterable(ShadowDialog.getShownDialogs()) .filter(dialog -> !dialog.isShowing()) .filter( dialog -> dialog.findViewById(R.id.progress_dialog_message_text_view) != null) .lastOrError())); // closing dialog Assert.assertTrue( GeneralDialogTestUtil.getLatestShownDialog() .getButton(DialogInterface.BUTTON_POSITIVE) .callOnClick()); Assert.assertSame( GeneralDialogTestUtil.NO_DIALOG, GeneralDialogTestUtil.getLatestShownDialog()); // good! ShadowDialog.getShownDialogs().clear(); Shadows.shadowOf(activity.getContentResolver()) .registerInputStream( Uri.fromFile(backupFile.toFile()), new FileInputStream(backupFile.toFile())); // now, restoring fragment.onOptionsItemSelected( Shadows.shadowOf(activity).getOptionsMenu().findItem(R.id.restore_prefs)); TestRxSchedulers.foregroundFlushAllJobs(); Assert.assertTrue( GeneralDialogTestUtil.getLatestShownDialog() .getButton(DialogInterface.BUTTON_POSITIVE) .callOnClick()); TestRxSchedulers.foregroundAdvanceBy(1); // this will open the System's file chooser fileRequest = shadowApplication.getNextStartedActivityForResult(); Assert.assertNotNull(fileRequest); Assert.assertEquals(Intent.ACTION_OPEN_DOCUMENT, fileRequest.intent.getAction()); resultData = new Intent(); resultData.setData(Uri.fromFile(backupFile.toFile())); Shadows.shadowOf(activity).receiveResult(fileRequest.intent, Activity.RESULT_OK, resultData); TestRxSchedulers.drainAllTasks(); // back up was done Assert.assertEquals( getApplicationContext().getText(R.string.prefs_providers_operation_success), GeneralDialogTestUtil.getTitleFromDialog(GeneralDialogTestUtil.getLatestShownDialog())); // verifying that progress-dialog was shown Assert.assertNotNull( TestRxSchedulers.blockingGet( Observable.fromIterable(ShadowDialog.getShownDialogs()) .filter(dialog -> !dialog.isShowing()) .filter( dialog -> dialog.findViewById(R.id.progress_dialog_message_text_view) != null) .lastOrError())); // closing dialog Assert.assertTrue( GeneralDialogTestUtil.getLatestShownDialog() .getButton(DialogInterface.BUTTON_POSITIVE) .callOnClick()); Assert.assertSame( GeneralDialogTestUtil.NO_DIALOG, GeneralDialogTestUtil.getLatestShownDialog()); }
@ScalarOperator(GREATER_THAN) @SqlType(StandardTypes.BOOLEAN) public static boolean greaterThan(@SqlType(StandardTypes.SMALLINT) long left, @SqlType(StandardTypes.SMALLINT) long right) { return left > right; }
@Test public void testGreaterThan() { assertFunction("SMALLINT'37' > SMALLINT'37'", BOOLEAN, false); assertFunction("SMALLINT'37' > SMALLINT'17'", BOOLEAN, true); assertFunction("SMALLINT'17' > SMALLINT'37'", BOOLEAN, false); assertFunction("SMALLINT'17' > SMALLINT'17'", BOOLEAN, false); }
public String getClientIp() { return clientIp; }
@Test void testGetClientIp() { assertEquals("127.0.0.1", requestMeta.getClientIp()); }
@Override public void calculate(TradePriceCalculateReqBO param, TradePriceCalculateRespBO result) { // 0. 初始化积分 MemberUserRespDTO user = memberUserApi.getUser(param.getUserId()); result.setTotalPoint(user.getPoint()).setUsePoint(0); // 1.1 校验是否使用积分 if (!BooleanUtil.isTrue(param.getPointStatus())) { return; } // 1.2 校验积分抵扣是否开启 MemberConfigRespDTO config = memberConfigApi.getConfig(); if (!isDeductPointEnable(config)) { return; } // 1.3 校验用户积分余额 if (user.getPoint() == null || user.getPoint() <= 0) { return; } // 2.1 计算积分优惠金额 int pointPrice = calculatePointPrice(config, user.getPoint(), result); // 2.2 计算分摊的积分、抵扣金额 List<TradePriceCalculateRespBO.OrderItem> orderItems = filterList(result.getItems(), TradePriceCalculateRespBO.OrderItem::getSelected); List<Integer> dividePointPrices = TradePriceCalculatorHelper.dividePrice(orderItems, pointPrice); List<Integer> divideUsePoints = TradePriceCalculatorHelper.dividePrice(orderItems, result.getUsePoint()); // 3.1 记录优惠明细 TradePriceCalculatorHelper.addPromotion(result, orderItems, param.getUserId(), "积分抵扣", PromotionTypeEnum.POINT.getType(), StrUtil.format("积分抵扣:省 {} 元", TradePriceCalculatorHelper.formatPrice(pointPrice)), dividePointPrices); // 3.2 更新 SKU 优惠金额 for (int i = 0; i < orderItems.size(); i++) { TradePriceCalculateRespBO.OrderItem orderItem = orderItems.get(i); orderItem.setPointPrice(dividePointPrices.get(i)); orderItem.setUsePoint(divideUsePoints.get(i)); TradePriceCalculatorHelper.recountPayPrice(orderItem); } TradePriceCalculatorHelper.recountAllPrice(result); }
@Test public void testCalculate_TradeDeductMaxPrice() { // 准备参数 TradePriceCalculateReqBO param = new TradePriceCalculateReqBO() .setUserId(233L).setPointStatus(true) // 是否使用积分 .setItems(asList( new TradePriceCalculateReqBO.Item().setSkuId(10L).setCount(2).setSelected(true), // 使用积分 new TradePriceCalculateReqBO.Item().setSkuId(20L).setCount(3).setSelected(true), // 使用积分 new TradePriceCalculateReqBO.Item().setSkuId(30L).setCount(5).setSelected(false) // 未选中,不使用积分 )); TradePriceCalculateRespBO result = new TradePriceCalculateRespBO() .setType(TradeOrderTypeEnum.NORMAL.getType()) .setPrice(new TradePriceCalculateRespBO.Price()) .setPromotions(new ArrayList<>()) .setItems(asList( new TradePriceCalculateRespBO.OrderItem().setSkuId(10L).setCount(2).setSelected(true) .setPrice(100).setSpuId(1L), new TradePriceCalculateRespBO.OrderItem().setSkuId(20L).setCount(3).setSelected(true) .setPrice(50).setSpuId(2L), new TradePriceCalculateRespBO.OrderItem().setSkuId(30L).setCount(5).setSelected(false) .setPrice(30).setSpuId(3L) )); // 保证价格被初始化上 TradePriceCalculatorHelper.recountPayPrice(result.getItems()); TradePriceCalculatorHelper.recountAllPrice(result); // mock 方法(积分配置 信息) MemberConfigRespDTO memberConfig = randomPojo(MemberConfigRespDTO.class, o -> o.setPointTradeDeductEnable(true) // 启用积分折扣 .setPointTradeDeductUnitPrice(1) // 1 积分抵扣多少金额(单位分) .setPointTradeDeductMaxPrice(50)); // 积分抵扣最大值 when(memberConfigApi.getConfig()).thenReturn(memberConfig); // mock 方法(会员 信息) MemberUserRespDTO user = randomPojo(MemberUserRespDTO.class, o -> o.setId(param.getUserId()).setPoint(100)); when(memberUserApi.getUser(user.getId())).thenReturn(user); // 调用 tradePointUsePriceCalculator.calculate(param, result); // 断言:使用了多少积分 assertEquals(result.getUsePoint(), 50); // 断言:Price 部分 TradePriceCalculateRespBO.Price price = result.getPrice(); assertEquals(price.getTotalPrice(), 350); assertEquals(price.getPayPrice(), 300); assertEquals(price.getPointPrice(), 50); // 断言:SKU 1 TradePriceCalculateRespBO.OrderItem orderItem01 = result.getItems().get(0); assertEquals(orderItem01.getSkuId(), 10L); assertEquals(orderItem01.getCount(), 2); assertEquals(orderItem01.getPrice(), 100); assertEquals(orderItem01.getPointPrice(), 28); assertEquals(orderItem01.getPayPrice(), 172); // 断言:SKU 2 TradePriceCalculateRespBO.OrderItem orderItem02 = result.getItems().get(1); assertEquals(orderItem02.getSkuId(), 20L); assertEquals(orderItem02.getCount(), 3); assertEquals(orderItem02.getPrice(), 50); assertEquals(orderItem02.getPointPrice(), 22); assertEquals(orderItem02.getPayPrice(), 128); // 断言:SKU 3 TradePriceCalculateRespBO.OrderItem orderItem03 = result.getItems().get(2); assertEquals(orderItem03.getSkuId(), 30L); assertEquals(orderItem03.getCount(), 5); assertEquals(orderItem03.getPrice(), 30); assertEquals(orderItem03.getPointPrice(), 0); assertEquals(orderItem03.getPayPrice(), 150); // 断言:Promotion 部分 assertEquals(result.getPromotions().size(), 1); TradePriceCalculateRespBO.Promotion promotion01 = result.getPromotions().get(0); assertEquals(promotion01.getId(), user.getId()); assertEquals(promotion01.getName(), "积分抵扣"); assertEquals(promotion01.getType(), PromotionTypeEnum.POINT.getType()); assertEquals(promotion01.getTotalPrice(), 350); assertEquals(promotion01.getDiscountPrice(), 50); assertTrue(promotion01.getMatch()); assertEquals(promotion01.getDescription(), "积分抵扣:省 0.50 元"); assertEquals(promotion01.getItems().size(), 2); TradePriceCalculateRespBO.PromotionItem promotionItem011 = promotion01.getItems().get(0); assertEquals(promotionItem011.getSkuId(), 10L); assertEquals(promotionItem011.getTotalPrice(), 200); assertEquals(promotionItem011.getDiscountPrice(), 28); TradePriceCalculateRespBO.PromotionItem promotionItem012 = promotion01.getItems().get(1); assertEquals(promotionItem012.getSkuId(), 20L); assertEquals(promotionItem012.getTotalPrice(), 150); assertEquals(promotionItem012.getDiscountPrice(), 22); }
@Override public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay readerWay, IntsRef relationFlags) { RoadEnvironment roadEnvironment = OTHER; if (FerrySpeedCalculator.isFerry(readerWay)) roadEnvironment = FERRY; else if (readerWay.hasTag("bridge") && !readerWay.hasTag("bridge", "no")) roadEnvironment = BRIDGE; else if (readerWay.hasTag("tunnel") && !readerWay.hasTag("tunnel", "no")) roadEnvironment = TUNNEL; else if (readerWay.hasTag("ford") || readerWay.hasTag("highway", "ford")) roadEnvironment = FORD; else { List<Map<String, Object>> nodeTags = readerWay.getTag("node_tags", Collections.emptyList()); // a barrier edge has the restriction in both nodes and the tags are the same if (readerWay.hasTag("gh:barrier_edge") && nodeTags.get(0).containsKey("ford")) roadEnvironment = FORD; else if (readerWay.hasTag("highway")) roadEnvironment = ROAD; } if (roadEnvironment != OTHER) roadEnvEnc.setEnum(false, edgeId, edgeIntAccess, roadEnvironment); }
@Test void ferry() { EnumEncodedValue<RoadEnvironment> roadEnvironmentEnc = RoadEnvironment.create(); roadEnvironmentEnc.init(new EncodedValue.InitializerConfig()); OSMRoadEnvironmentParser parser = new OSMRoadEnvironmentParser(roadEnvironmentEnc); EdgeIntAccess edgeIntAccess = new ArrayEdgeIntAccess(1); int edgeId = 0; ReaderWay way = new ReaderWay(0); way.setTag("route", "shuttle_train"); parser.handleWayTags(edgeId, edgeIntAccess, way, new IntsRef(2)); RoadEnvironment roadEnvironment = roadEnvironmentEnc.getEnum(false, edgeId, edgeIntAccess); assertEquals(RoadEnvironment.FERRY, roadEnvironment); way = new ReaderWay(1); way.setTag("highway", "footway"); way.setTag("route", "ferry"); parser.handleWayTags(edgeId, edgeIntAccess = new ArrayEdgeIntAccess(1), way, new IntsRef(2)); roadEnvironment = roadEnvironmentEnc.getEnum(false, edgeId, edgeIntAccess); assertEquals(RoadEnvironment.FERRY, roadEnvironment); }
@Override public byte[] serialize(final String topic, final T data) { if (data == null) { return inner.serialize(topic, null); } final List<?> list = accessor.apply(data); SerdeUtils.throwOnColumnCountMismatch(numColumns, list.size(), true, topic); return inner.serialize(topic, list); }
@Test public void shouldConvertRowToListWhenSerializing() { // Given: final TestListWrapper list = list("hello", 10); // When: serializer.serialize("topicName", list); // Then: verify(innerSerializer).serialize("topicName", list.getList()); }
public WorkflowInstance createWorkflowInstance( Workflow workflowDef, Long internalId, long workflowVersionId, RunProperties runProperties, RunRequest runRequest) { WorkflowInstance instance = new WorkflowInstance(); instance.setWorkflowId(workflowDef.getId()); instance.setInternalId(internalId); instance.setWorkflowVersionId(workflowVersionId); // latest workflow instance id is unknown, update it later. instance.setWorkflowInstanceId(Constants.LATEST_ONE); // set correlation id if request contains it, otherwise, update it later inside DAO instance.setCorrelationId(runRequest.getCorrelationId()); instance.setRunProperties(runProperties); // it includes runtime params and tags. Its dag is versioned dag. Workflow workflow = overrideWorkflowConfig(workflowDef, runRequest); instance.setRuntimeWorkflow(workflow); // update newly created workflow instance updateWorkflowInstance(instance, runRequest); return instance; }
@Test public void testCreateWorkflowInstanceWithOverriddenWorkflowConfig() { ForeachInitiator initiator = new ForeachInitiator(); initiator.setAncestors(Collections.singletonList(new UpstreamInitiator.Info())); RunRequest request = RunRequest.builder() .initiator(initiator) .currentPolicy(RunPolicy.START_FRESH_NEW_RUN) .build(); Workflow workflow = definition.getWorkflow().toBuilder().instanceStepConcurrency(20L).build(); WorkflowInstance instance = workflowHelper.createWorkflowInstance(workflow, 12345L, 1, new RunProperties(), request); assertNull(instance.getRuntimeWorkflow().getInstanceStepConcurrency()); assertEquals(12345L, instance.getInternalId().longValue()); request = RunRequest.builder() .initiator(initiator) .currentPolicy(RunPolicy.START_FRESH_NEW_RUN) .instanceStepConcurrency(15L) .build(); instance = workflowHelper.createWorkflowInstance(workflow, 12345L, 1, new RunProperties(), request); assertEquals(15, instance.getRuntimeWorkflow().getInstanceStepConcurrency().longValue()); assertEquals(12345L, instance.getInternalId().longValue()); request = RunRequest.builder() .initiator(new ManualInitiator()) .currentPolicy(RunPolicy.START_FRESH_NEW_RUN) .build(); instance = workflowHelper.createWorkflowInstance(workflow, 12345L, 1, new RunProperties(), request); assertEquals(20, instance.getRuntimeWorkflow().getInstanceStepConcurrency().longValue()); assertEquals(12345L, instance.getInternalId().longValue()); request = RunRequest.builder() .initiator(new ManualInitiator()) .currentPolicy(RunPolicy.START_FRESH_NEW_RUN) .instanceStepConcurrency(15L) .build(); instance = workflowHelper.createWorkflowInstance(workflow, 12345L, 1, new RunProperties(), request); assertEquals(15, instance.getRuntimeWorkflow().getInstanceStepConcurrency().longValue()); assertEquals(12345L, instance.getInternalId().longValue()); }
public long requireActiveSessionOf(ApplicationId applicationId) { return activeSessionOf(applicationId) .orElseThrow(() -> new IllegalArgumentException("Application '" + applicationId + "' has no active session.")); }
@Test(expected = IllegalArgumentException.class) public void require_that_requesting_session_for_unknown_application_throws_exception() { TenantApplications repo = createZKAppRepo(); repo.requireActiveSessionOf(createApplicationId("nonexistent")); }
@Override public void save(Product product) throws SQLException { var sql = "insert into PRODUCTS (name, price, expiration_date) values (?, ?, ?)"; try (var connection = dataSource.getConnection(); var preparedStatement = connection.prepareStatement(sql)) { preparedStatement.setString(1, product.getName()); preparedStatement.setBigDecimal(2, product.getPrice().getAmount()); preparedStatement.setDate(3, Date.valueOf(product.getExpirationDate())); preparedStatement.executeUpdate(); } }
@Test void shouldSaveProduct() throws SQLException { productDao.save(product); try (var connection = dataSource.getConnection(); var statement = connection.createStatement(); ResultSet rs = statement.executeQuery(SELECT_PRODUCTS_SQL)) { assertTrue(rs.next()); assertEquals(product.getName(), rs.getString("name")); assertEquals(product.getPrice(), Money.of(USD, rs.getBigDecimal("price"))); assertEquals(product.getExpirationDate(), rs.getDate("expiration_date").toLocalDate()); } assertThrows(SQLException.class, () -> productDao.save(product)); }
public static boolean ping(String ip) { return ping(ip, 200); }
@Test public void pingTest(){ assertTrue(NetUtil.ping("127.0.0.1")); }
@Override public String toString() { StringBuilder sb = new StringBuilder("jmx:").append(getServerName()); if (!mQueryProps.isEmpty()) { sb.append('?'); String delim = ""; for (Entry<String, String> entry : mQueryProps.entrySet()) { sb.append(delim); sb.append(entry.getKey()).append('=').append(entry.getValue()); delim = "&"; } } return sb.toString(); }
@Test public void defaultsToPlatform() { assertEquals("jmx:platform", new JMXUriBuilder().toString()); }
@SuppressWarnings("unchecked") public static Builder fromMap(final Map<String, Object> data) { final Action action = Action.valueOf(data.getOrDefault(ACTION, DEFAULT_ACTION.name()).toString().toUpperCase()); final long sequence = ObjectHelper.cast(Long.class, data.getOrDefault(SEQUENCE, DEFAULT_SEQUENCE)); final Map<String, Object> inputData = ObjectHelper.cast(Map.class, data.getOrDefault(DATA, Collections.emptyMap())); return new Builder() .withAction(action) .withData(inputData) .withSequence(sequence); }
@Test void testIfNotCreateFromMapFromInvalidData() { final LinkedHashMap<String, Object> data = new LinkedHashMap<>(); data.put(StitchMessage.ACTION, "upsert"); data.put(StitchMessage.DATA, 1); data.put(StitchMessage.SEQUENCE, 1122544L); assertThrows(IllegalArgumentException.class, () -> StitchMessage .fromMap(data)); }
@Override public void write(Message message) throws Exception { if (LOG.isTraceEnabled()) { LOG.trace("Writing message id to [{}]: <{}>", NAME, message.getId()); } writeMessageEntries(List.of(DefaultFilteredMessage.forDestinationKeys(message, Set.of(FILTER_KEY)))); }
@Test public void writeList() throws Exception { final List<Message> messageList = buildMessages(3); output.write(messageList); verify(messages, times(1)).bulkIndex(eq(List.of( new MessageWithIndex(wrap(messageList.get(0)), defaultIndexSet), new MessageWithIndex(wrap(messageList.get(1)), defaultIndexSet), new MessageWithIndex(wrap(messageList.get(2)), defaultIndexSet) ))); verifyNoMoreInteractions(messages); }
public static <T> Collector<T, List<T>, List<T>> batchCollector(int batchSize, Consumer<List<T>> batchProcessor) { return new BatchCollector<>(batchSize, batchProcessor); }
@Test void canBatch() { List<Integer> input = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); List<Integer> output = new ArrayList<>(); int batchSize = 3; Consumer<List<Integer>> batchProcessor = xs -> { System.out.println("Adding " + xs.size()); output.addAll(xs); }; input.stream() .collect(batchCollector(batchSize, batchProcessor)); assertThat(output).containsExactly(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); }
public Optional<KeyStore> get() throws KeyStoreStorageException { return loadKeystore().filter(this::isValidKeyAndCert); }
@Test void testReadLegacyKeystore() throws Exception { final MongoConnection mongoConnection = mongodb.mongoConnection(); final String passwordSecret = "this_is_my_secret_password"; final SimpleNodeId nodeId = new SimpleNodeId("5ca1ab1e-0000-4000-a000-000000000000"); final EncryptedValueService encryptedValueService = new EncryptedValueService(passwordSecret); final KeyStore keystore = createSignedKeystore(passwordSecret); final String keystoreStringRepresentation = keystoreToBase64(keystore, passwordSecret.toCharArray()); writeCertToMongo(mongoConnection.getMongoDatabase(), nodeId, keystoreStringRepresentation, encryptedValueService); final LegacyDatanodeKeystoreProvider legacyDatanodeKeystoreProvider = new LegacyDatanodeKeystoreProvider(nodeId, passwordSecret, Mockito.mock(DatanodeConfiguration.class), mongoConnection, encryptedValueService); final Optional<KeyStore> legacyKeystore = legacyDatanodeKeystoreProvider.get(); Assertions.assertThat(legacyKeystore) .isPresent() .hasValueSatisfying(keyStore -> { try { Assertions.assertThat(keyStore.getKey("datanode", passwordSecret.toCharArray())).isNotNull(); Assertions.assertThat(keyStore.getCertificateChain("datanode")).isNotNull().hasSize(2); } catch (KeyStoreException | NoSuchAlgorithmException | UnrecoverableKeyException e) { throw new RuntimeException(e); } }); }
@Override public String doSharding(final Collection<String> availableTargetNames, final PreciseShardingValue<Comparable<?>> shardingValue) { ShardingSpherePreconditions.checkNotNull(shardingValue.getValue(), NullShardingValueException::new); return doSharding(availableTargetNames, Range.singleton(shardingValue.getValue())).stream().findFirst().orElse(null); }
@Test void assertRangeDoShardingByDaysInLocalDate() { int stepAmount = 2; Collection<String> availableTargetNames = new LinkedList<>(); for (int j = 6; j <= 7; j++) { for (int i = 1; j == 6 ? i <= 30 : i <= 31; i = i + stepAmount) { availableTargetNames.add(String.format("t_order_%04d%02d%02d", 2021, j, i)); } } Collection<String> actualAsLocalDate = createAlgorithm("yyyy-MM-dd", "2021-06-01", "2021-07-31", "yyyyMMdd", stepAmount, null) .doSharding(availableTargetNames, createShardingValue(LocalDate.of(2021, 6, 15), LocalDate.of(2021, 7, 31))); assertThat(actualAsLocalDate.size(), is(24)); }
public static String getComputeNodePath() { return String.join("/", "", ROOT_NODE, COMPUTE_NODE); }
@Test void assertGetComputeNodePath() { assertThat(ComputeNode.getComputeNodePath(), is("/nodes/compute_nodes")); }
public DeterministicSeed getKeyChainSeed() { keyChainGroupLock.lock(); try { DeterministicSeed seed = keyChainGroup.getActiveKeyChain().getSeed(); if (seed == null) throw new ECKey.MissingPrivateKeyException(); return seed; } finally { keyChainGroupLock.unlock(); } }
@Test public void getSeedAsWords1() { // Can't verify much here as the wallet is random each time. We could fix the RNG for the unit tests and solve. assertEquals(12, wallet.getKeyChainSeed().getMnemonicCode().size()); }
public Map<ExecNode<?>, Integer> calculate() { createTopologyGraph(); // some boundaries node may be connected from the outside of the sub-graph, // which we cannot deduce by the above process, // so we need to check each pair of boundaries and see if they're related dealWithPossiblyRelatedBoundaries(); Map<ExecNode<?>, Integer> distances = graph.calculateMaximumDistance(); // extract only the distances of the boundaries and renumbering the distances // so that the smallest value starts from 0 // the smaller the distance, the higher the priority Set<Integer> boundaryDistanceSet = new HashSet<>(); for (ExecNode<?> boundary : boundaries) { boundaryDistanceSet.add(distances.getOrDefault(boundary, 0)); } List<Integer> boundaryDistanceList = new ArrayList<>(boundaryDistanceSet); Collections.sort(boundaryDistanceList); Map<ExecNode<?>, Integer> results = new HashMap<>(); for (ExecNode<?> boundary : boundaries) { results.put(boundary, boundaryDistanceList.indexOf(distances.get(boundary))); } return results; }
@Test void testCalculateInputOrderWithUnaffectedRelatedBoundaries() { // P = InputProperty.DamBehavior.PIPELINED, B = InputProperty.DamBehavior.BLOCKING // P1 = PIPELINED + priority 1 // // 0 --(P0)-> 1 -------(B0)-----> 2 -(P0)-\ // \ \--(B0)-> 3 -(P1)-/ 4 // \-(B0)-> 5 -------(P1)-----> 6 -(P0)-/ // 7 --(B0)--/ TestingBatchExecNode[] nodes = new TestingBatchExecNode[8]; for (int i = 0; i < nodes.length; i++) { nodes[i] = new TestingBatchExecNode("TestingBatchExecNode" + i); } nodes[1].addInput(nodes[0]); nodes[2].addInput( nodes[1], InputProperty.builder().damBehavior(InputProperty.DamBehavior.BLOCKING).build()); nodes[2].addInput(nodes[3], InputProperty.builder().priority(1).build()); nodes[3].addInput( nodes[1], InputProperty.builder().damBehavior(InputProperty.DamBehavior.BLOCKING).build()); nodes[4].addInput(nodes[2]); nodes[4].addInput(nodes[6]); nodes[5].addInput( nodes[0], InputProperty.builder().damBehavior(InputProperty.DamBehavior.BLOCKING).build()); nodes[6].addInput(nodes[5], InputProperty.builder().priority(1).build()); nodes[6].addInput( nodes[7], InputProperty.builder().damBehavior(InputProperty.DamBehavior.BLOCKING).build()); InputOrderCalculator calculator = new InputOrderCalculator( nodes[4], new HashSet<>(Arrays.asList(nodes[1], nodes[3], nodes[5], nodes[7])), InputProperty.DamBehavior.BLOCKING); Map<ExecNode<?>, Integer> result = calculator.calculate(); assertThat(result).hasSize(4); assertThat(result.get(nodes[1]).intValue()).isEqualTo(0); assertThat(result.get(nodes[3]).intValue()).isEqualTo(1); assertThat(result.get(nodes[5]).intValue()).isEqualTo(1); assertThat(result.get(nodes[7]).intValue()).isEqualTo(0); }
@Override @PublicAPI(usage = ACCESS) public Set<JavaClass> getAllInvolvedRawTypes() { return getType().getAllInvolvedRawTypes(); }
@Test public void offers_all_involved_raw_types() { class SomeClass { @SuppressWarnings("unused") List<? super Map<? extends Serializable, ? super Set<Number[][]>>> field; } JavaField field = new ClassFileImporter().importClass(SomeClass.class).getField("field"); assertThatTypes(field.getAllInvolvedRawTypes()).matchInAnyOrder(List.class, Map.class, Serializable.class, Set.class, Number.class); }
public URLNormalizer removeTrailingQuestionMark() { if (url.endsWith("?") && StringUtils.countMatches(url, "?") == 1) { url = StringUtils.removeEnd(url, "?"); } return this; }
@Test public void testRemoveTrailingQuestionMark() { s = "http://www.example.com/remove?"; t = "http://www.example.com/remove"; assertEquals(t, n(s).removeTrailingQuestionMark().toString()); s = "http://www.example.com/keep?a=b"; t = "http://www.example.com/keep?a=b"; assertEquals(t, n(s).removeTrailingQuestionMark().toString()); s = "http://www.example.com/keep?a=b?"; t = "http://www.example.com/keep?a=b?"; assertEquals(t, n(s).removeTrailingQuestionMark().toString()); }
@Override public List<Intent> compile(HostToHostIntent intent, List<Intent> installable) { // If source and destination are the same, there are never any installables. if (Objects.equals(intent.one(), intent.two())) { return ImmutableList.of(); } boolean isAsymmetric = intent.constraints().contains(new AsymmetricPathConstraint()); Path pathOne = getPathOrException(intent, intent.one(), intent.two()); Path pathTwo = isAsymmetric ? getPathOrException(intent, intent.two(), intent.one()) : invertPath(pathOne); Host one = hostService.getHost(intent.one()); Host two = hostService.getHost(intent.two()); return Arrays.asList(createLinkCollectionIntent(pathOne, one, two, intent), createLinkCollectionIntent(pathTwo, two, one, intent)); }
@Test public void testSingleLongPathCompilation() { HostToHostIntent intent = makeIntent(HOST_ONE, HOST_TWO); assertThat(intent, is(notNullValue())); String[] hops = {HOST_ONE, S1, S2, S3, S4, S5, S6, S7, S8, HOST_TWO}; HostToHostIntentCompiler compiler = makeCompiler(hops); assertThat(compiler, is(notNullValue())); List<Intent> result = compiler.compile(intent, null); assertThat(result, is(Matchers.notNullValue())); assertThat(result, hasSize(2)); Intent forwardIntent = result.get(0); assertThat(forwardIntent instanceof LinkCollectionIntent, is(true)); Intent reverseIntent = result.get(1); assertThat(reverseIntent instanceof LinkCollectionIntent, is(true)); LinkCollectionIntent forwardLCIntent = (LinkCollectionIntent) forwardIntent; Set<Link> links = forwardLCIntent.links(); assertThat(links, hasSize(7)); Set<FilteredConnectPoint> ingressPoints = ImmutableSet.of( new FilteredConnectPoint(new ConnectPoint(DID_S1, PORT_1)) ); assertThat(forwardLCIntent.filteredIngressPoints(), is(ingressPoints)); assertThat(links, linksHasPath(S1, S2)); assertThat(links, linksHasPath(S2, S3)); assertThat(links, linksHasPath(S3, S4)); assertThat(links, linksHasPath(S4, S5)); assertThat(links, linksHasPath(S5, S6)); assertThat(links, linksHasPath(S6, S7)); assertThat(links, linksHasPath(S7, S8)); Set<FilteredConnectPoint> egressPoints = ImmutableSet.of( new FilteredConnectPoint(new ConnectPoint(DID_S8, PORT_2)) ); assertThat(forwardLCIntent.filteredEgressPoints(), is(egressPoints)); LinkCollectionIntent reverseLCIntent = (LinkCollectionIntent) reverseIntent; links = reverseLCIntent.links(); assertThat(reverseLCIntent.links(), hasSize(7)); ingressPoints = ImmutableSet.of(new FilteredConnectPoint(new ConnectPoint(DID_S8, PORT_2))); assertThat(reverseLCIntent.filteredIngressPoints(), is(ingressPoints)); assertThat(links, linksHasPath(S2, S1)); assertThat(links, linksHasPath(S3, S2)); assertThat(links, linksHasPath(S4, S3)); assertThat(links, linksHasPath(S5, S4)); assertThat(links, linksHasPath(S6, S5)); assertThat(links, linksHasPath(S7, S6)); assertThat(links, linksHasPath(S8, S7)); egressPoints = ImmutableSet.of(new FilteredConnectPoint(new ConnectPoint(DID_S1, PORT_1))); assertThat(reverseLCIntent.filteredEgressPoints(), is(egressPoints)); assertThat("key is inherited", result.stream().map(Intent::key).collect(Collectors.toList()), everyItem(is(intent.key()))); }
@Override public OID getAuthenticationProtocol() { return this.authProtocol; }
@Test public void testGetAuthenticationProtocol() { assertEquals(AuthSHA.ID, v3SnmpConfiguration.getAuthenticationProtocol()); }
public final void hasSize(int expectedSize) { checkArgument(expectedSize >= 0, "expectedSize(%s) must be >= 0", expectedSize); check("size()").that(checkNotNull(actual).size()).isEqualTo(expectedSize); }
@Test public void hasSizeZero() { assertThat(ImmutableTable.of()).hasSize(0); }
public static Path[] stat2Paths(FileStatus[] stats) { if (stats == null) return null; Path[] ret = new Path[stats.length]; for (int i = 0; i < stats.length; ++i) { ret[i] = stats[i].getPath(); } return ret; }
@Test (timeout = 30000) public void testStat2Paths1() { assertNull(FileUtil.stat2Paths(null)); FileStatus[] fileStatuses = new FileStatus[0]; Path[] paths = FileUtil.stat2Paths(fileStatuses); assertEquals(0, paths.length); Path path1 = new Path("file://foo"); Path path2 = new Path("file://moo"); fileStatuses = new FileStatus[] { new FileStatus(3, false, 0, 0, 0, path1), new FileStatus(3, false, 0, 0, 0, path2) }; paths = FileUtil.stat2Paths(fileStatuses); assertEquals(2, paths.length); assertEquals(paths[0], path1); assertEquals(paths[1], path2); }
public static boolean test(byte[] bloomBytes, byte[]... topics) { Bloom bloom = new Bloom(bloomBytes); if (topics == null) { throw new IllegalArgumentException("topics can not be null"); } for (byte[] topic : topics) { if (!bloom.test(topic)) { return false; } } return true; }
@Test public void testStaticMethodTestWhenAllTopicsIsInBloomForHexInput() { boolean result = Bloom.test( ethereumSampleLogsBloom, ethereumSampleLogs.get(0), ethereumSampleLogs.get(100)); assertTrue(result, "must return true"); }
public static String execCommand(String... cmd) throws IOException { return execCommand(cmd, -1); }
@Test public void testAttemptToRunNonExistentProgram() { IOException e = assertThrows(IOException.class, () -> Shell.execCommand(NONEXISTENT_PATH), "Expected to get an exception when trying to run a program that does not exist"); assertTrue(e.getMessage().contains("No such file"), "Unexpected error message '" + e.getMessage() + "'"); }
public Map<String, List<PartitionInfo>> getAllTopicMetadata(Timer timer) { MetadataRequest.Builder request = MetadataRequest.Builder.allTopics(); return getTopicMetadata(request, timer); }
@Test public void testGetAllTopicsDisconnect() { // first try gets a disconnect, next succeeds buildFetcher(); assignFromUser(singleton(tp0)); client.prepareResponse(null, true); client.prepareResponse(newMetadataResponse(Errors.NONE)); Map<String, List<PartitionInfo>> allTopics = topicMetadataFetcher.getAllTopicMetadata(time.timer(5000L)); assertEquals(initialUpdateResponse.topicMetadata().size(), allTopics.size()); }
public T random() { List<T> items = ref.items; if (items.size() == 0) { return null; } if (items.size() == 1) { return items.get(0); } return items.get(ThreadLocalRandom.current().nextInt(items.size())); }
@Test void testChooserRandomForOneSizeList() { List<Pair<String>> list = new LinkedList<>(); list.add(new Pair<>("test", 1)); Chooser<String, String> chooser = new Chooser<>("test", list); String actual = chooser.random(); assertNotNull(actual); assertEquals("test", actual); }
public CreateStreamCommand createStreamCommand(final KsqlStructuredDataOutputNode outputNode) { return new CreateStreamCommand( outputNode.getSinkName().get(), outputNode.getSchema(), outputNode.getTimestampColumn(), outputNode.getKsqlTopic().getKafkaTopicName(), Formats.from(outputNode.getKsqlTopic()), outputNode.getKsqlTopic().getKeyFormat().getWindowInfo(), Optional.of(outputNode.getOrReplace()), Optional.of(false) ); }
@Test public void shouldThrowIfTopicDoesNotExistForStream() { // Given: when(topicClient.isTopicExists(any())).thenReturn(false); final CreateStream statement = new CreateStream(SOME_NAME, ONE_KEY_ONE_VALUE, false, true, withProperties, false); // When: final Exception e = assertThrows( KsqlException.class, () -> createSourceFactory.createStreamCommand(statement, ksqlConfig) ); // Then: assertThat(e.getMessage(), containsString( "Kafka topic does not exist: " + TOPIC_NAME)); }
@Override public void run(DiagnosticsLogWriter writer) { writer.startSection("NetworkingImbalance"); writer.startSection("InputThreads"); render(writer, networking.getInputThreads()); writer.endSection(); writer.startSection("OutputThreads"); render(writer, networking.getOutputThreads()); writer.endSection(); writer.endSection(); }
@Test public void noNaNPercentagesForZeroAmounts() { spawn((Runnable) () -> hz.getMap("foo").put("key", "value")); assertTrueEventually(() -> { plugin.run(logWriter); assertNotContains("NaN"); }); }
@Nonnull public static <T> AggregateOperation1<T, MutableReference<T>, T> maxBy( @Nonnull ComparatorEx<? super T> comparator ) { checkSerializable(comparator, "comparator"); return AggregateOperation .withCreate(MutableReference<T>::new) .andAccumulate((MutableReference<T> a, T i) -> { if (a.isNull() || comparator.compare(i, a.get()) > 0) { a.set(i); } }) .andCombine((a1, a2) -> { if (a1.isNull() || (!a2.isNull() && comparator.compare(a1.get(), a2.get()) < 0)) { a1.set(a2.get()); } }) .andExportFinish(MutableReference::get); }
@Test public void when_maxBy_noInput_then_nullResult() { // Given AggregateOperation1<String, MutableReference<String>, String> aggrOp = maxBy(naturalOrder()); MutableReference<String> acc = aggrOp.createFn().get(); // When String result = aggrOp.finishFn().apply(acc); // Then assertNull(result); }
public Optional<Account> getByServiceIdentifier(final ServiceIdentifier serviceIdentifier) { return switch (serviceIdentifier.identityType()) { case ACI -> getByAccountIdentifier(serviceIdentifier.uuid()); case PNI -> getByPhoneNumberIdentifier(serviceIdentifier.uuid()); }; }
@Test void testGetByServiceIdentifier() { final UUID aci = UUID.randomUUID(); final UUID pni = UUID.randomUUID(); when(commands.get(eq("AccountMap::" + pni))).thenReturn(aci.toString()); when(commands.get(eq("Account3::" + aci))).thenReturn( "{\"number\": \"+14152222222\", \"pni\": \"" + pni + "\"}"); assertTrue(accountsManager.getByServiceIdentifier(new AciServiceIdentifier(aci)).isPresent()); assertTrue(accountsManager.getByServiceIdentifier(new PniServiceIdentifier(pni)).isPresent()); assertFalse(accountsManager.getByServiceIdentifier(new AciServiceIdentifier(pni)).isPresent()); assertFalse(accountsManager.getByServiceIdentifier(new PniServiceIdentifier(aci)).isPresent()); }
public static boolean parse(final String str, ResTable_config out) { return parse(str, out, true); }
@Test public void parse_screenSize() { ResTable_config config = new ResTable_config(); ConfigDescription.parse("480x320", config); assertThat(config.screenWidth).isEqualTo(480); assertThat(config.screenHeight).isEqualTo(320); }
public void setCallbackExecutor(final ExecutorService callbackExecutor) { this.defaultMQProducerImpl.setCallbackExecutor(callbackExecutor); }
@Test public void testSetCallbackExecutor() throws MQClientException { String producerGroupTemp = "testSetCallbackExecutor_" + System.currentTimeMillis(); producer = new DefaultMQProducer(producerGroupTemp); producer.setNamesrvAddr("127.0.0.1:9876"); producer.start(); ExecutorService customized = Executors.newCachedThreadPool(); producer.setCallbackExecutor(customized); NettyRemotingClient remotingClient = (NettyRemotingClient) producer.getDefaultMQProducerImpl() .getMqClientFactory().getMQClientAPIImpl().getRemotingClient(); assertThat(remotingClient.getCallbackExecutor()).isEqualTo(customized); }
public DurationConfParser durationConf() { return new DurationConfParser(); }
@Test public void testDurationConf() { Map<String, String> writeOptions = ImmutableMap.of("write-prop", "111s"); ConfigOption<Duration> configOption = ConfigOptions.key("conf-prop").durationType().noDefaultValue(); Configuration flinkConf = new Configuration(); flinkConf.setString(configOption.key(), "222s"); Table table = mock(Table.class); when(table.properties()).thenReturn(ImmutableMap.of("table-prop", "333s")); FlinkConfParser confParser = new FlinkConfParser(table, writeOptions, flinkConf); Duration defaultVal = Duration.ofMillis(999); Duration result = confParser.durationConf().option("write-prop").defaultValue(defaultVal).parse(); assertThat(result).isEqualTo(Duration.ofSeconds(111)); result = confParser.durationConf().flinkConfig(configOption).defaultValue(defaultVal).parse(); assertThat(result).isEqualTo(Duration.ofSeconds(222)); result = confParser.durationConf().tableProperty("table-prop").defaultValue(defaultVal).parse(); assertThat(result).isEqualTo(Duration.ofSeconds(333)); }
@Override public List<RemoteInstance> queryRemoteNodes() { List<RemoteInstance> remoteInstances = new ArrayList<>(); try { List<Instance> instances = namingService.selectInstances(config.getServiceName(), true); if (CollectionUtils.isNotEmpty(instances)) { instances.forEach(instance -> { Address address = new Address(instance.getIp(), instance.getPort(), false); if (address.equals(selfAddress)) { address.setSelf(true); } remoteInstances.add(new RemoteInstance(address)); }); } ClusterHealthStatus healthStatus = OAPNodeChecker.isHealth(remoteInstances); if (healthStatus.isHealth()) { this.healthChecker.health(); } else { this.healthChecker.unHealth(healthStatus.getReason()); } } catch (Throwable e) { healthChecker.unHealth(e); throw new ServiceQueryException(e.getMessage()); } if (log.isDebugEnabled()) { log.debug("Nacos cluster instances:{}", remoteInstances); } return remoteInstances; }
@Test public void queryRemoteNodesWithNullSelf() throws NacosException { List<Instance> instances = mockInstance(); when(namingService.selectInstances(anyString(), anyBoolean())).thenReturn(instances); List<RemoteInstance> remoteInstances = coordinator.queryRemoteNodes(); assertEquals(remoteInstances.size(), instances.size()); }
public InitialLoadMode getInitialLoadMode() { return initialLoadMode; }
@Test public void getInitialLoadMode() { assertEquals(LAZY, new MapStoreConfig().getInitialLoadMode()); }
public final void removeInboundHandler() { checkAdded(); inboundCtx.remove(); }
@Test public void testInboundEvents() { InboundEventHandler inboundHandler = new InboundEventHandler(); CombinedChannelDuplexHandler<ChannelInboundHandler, ChannelOutboundHandler> handler = new CombinedChannelDuplexHandler<ChannelInboundHandler, ChannelOutboundHandler>( inboundHandler, new ChannelOutboundHandlerAdapter()); EmbeddedChannel channel = new EmbeddedChannel(); channel.pipeline().addLast(handler); assertEquals(Event.HANDLER_ADDED, inboundHandler.pollEvent()); doInboundOperations(channel); assertInboundOperations(inboundHandler); handler.removeInboundHandler(); assertEquals(Event.HANDLER_REMOVED, inboundHandler.pollEvent()); // These should not be handled by the inboundHandler anymore as it was removed before doInboundOperations(channel); // Should have not received any more events as it was removed before via removeInboundHandler() assertNull(inboundHandler.pollEvent()); try { channel.checkException(); fail(); } catch (Throwable cause) { assertSame(CAUSE, cause); } assertTrue(channel.finish()); assertNull(inboundHandler.pollEvent()); }
@Override public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return true; } try { if(containerService.isContainer(file)) { try { if(log.isDebugEnabled()) { log.debug(String.format("Test if bucket %s is accessible", file)); } return session.getClient().isBucketAccessible(containerService.getContainer(file).getName()); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); } } if(file.isFile() || file.isPlaceholder()) { attributes.find(file, listener); return true; } else { if(log.isDebugEnabled()) { log.debug(String.format("Search for common prefix %s", file)); } // Check for common prefix try { new S3ObjectListService(session, acl).list(file, new CancellingListProgressListener(), String.valueOf(Path.DELIMITER), 1); return true; } catch(ListCanceledException l) { // Found common prefix return true; } catch(NotfoundException e) { throw e; } } } catch(NotfoundException e) { return false; } catch(RetriableAccessDeniedException e) { // Must fail with server error throw e; } catch(AccessDeniedException e) { // Object is inaccessible to current user, but does exist. return true; } }
@Test public void testFindBucket() throws Exception { final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); assertTrue(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(container)); }
public static ShowResultSet execute(ShowStmt statement, ConnectContext context) { return GlobalStateMgr.getCurrentState().getShowExecutor().showExecutorVisitor.visit(statement, context); }
@Test public void testShowVariable() throws AnalysisException, DdlException { // Mock variable VariableMgr variableMgr = new VariableMgr(); List<List<String>> rows = Lists.newArrayList(); rows.add(Lists.newArrayList("var1", "abc")); rows.add(Lists.newArrayList("var2", "abc")); new Expectations(variableMgr) { { VariableMgr.dump((SetType) any, (SessionVariable) any, (PatternMatcher) any); minTimes = 0; result = rows; VariableMgr.dump((SetType) any, (SessionVariable) any, null); minTimes = 0; result = rows; } }; ShowVariablesStmt stmt = new ShowVariablesStmt(SetType.SESSION, "var%"); ShowResultSet resultSet = ShowExecutor.execute(stmt, ctx); Assert.assertEquals(2, resultSet.getMetaData().getColumnCount()); Assert.assertEquals(2, resultSet.getResultRows().get(0).size()); Assert.assertTrue(resultSet.next()); Assert.assertEquals("var1", resultSet.getString(0)); Assert.assertTrue(resultSet.next()); Assert.assertEquals("var2", resultSet.getString(0)); Assert.assertFalse(resultSet.next()); stmt = new ShowVariablesStmt(SetType.SESSION, null); resultSet = ShowExecutor.execute(stmt, ctx); Assert.assertTrue(resultSet.next()); Assert.assertEquals("var1", resultSet.getString(0)); Assert.assertTrue(resultSet.next()); Assert.assertEquals("var2", resultSet.getString(0)); Assert.assertFalse(resultSet.next()); }
public String getValue(String template) { StringBuilder builder = new StringBuilder(); // Just delegate parsing stuffs to Expression parser to retrieve all the expressions ordered. Expression[] expressions = ExpressionParser.parseExpressions(template, context, expressionPrefix, expressionSuffix); // Now just go through expressions and evaluate them. for (Expression expression : expressions) { builder.append(expression.getValue(context)); } return builder.toString(); }
@Test void testContextlessTemplate() { String template = "{\"signedAt\": \"{{ now() }}\", \"fullName\": \"Laurent Broudoux\", \"email\": \"laurent@microcks.io\", \"age\": {{ randomInt(20, 99) }}} \n"; TemplateEngine engine = TemplateEngineFactory.getTemplateEngine(); String content = null; try { content = engine.getValue(template); } catch (Throwable t) { fail("Contextless template should not fail."); } assertTrue(content.startsWith("{\"signedAt\": \"1")); }
@VisibleForTesting void mutateRowWithHardTimeout(RowMutation rowMutation) { ApiFuture<Void> mutateRowFuture = dataClient.mutateRowAsync(rowMutation); try { mutateRowFuture.get( BigtableChangeStreamAccessor.MUTATE_ROW_DEADLINE.getSeconds() + 10, TimeUnit.SECONDS); } catch (TimeoutException timeoutException) { mutateRowFuture.cancel(true); throw new RuntimeException( "Cancelled mutateRow request after exceeding deadline", timeoutException); } catch (ExecutionException executionException) { if (executionException.getCause() instanceof RuntimeException) { throw (RuntimeException) executionException.getCause(); } throw new RuntimeException(executionException); } catch (InterruptedException interruptedException) { Thread.currentThread().interrupt(); throw new RuntimeException(interruptedException); } }
@Test public void mutateRowWithHardTimeoutErrorHandling() throws ExecutionException, InterruptedException, TimeoutException { BigtableDataClient mockClient = Mockito.mock(BigtableDataClient.class); MetadataTableDao daoWithMock = new MetadataTableDao(mockClient, "test-table", ByteString.copyFromUtf8("test")); ApiFuture<Void> mockFuture = mock(ApiFuture.class); when(mockClient.mutateRowAsync(any())).thenReturn(mockFuture); when(mockFuture.get(40, TimeUnit.SECONDS)) .thenThrow(TimeoutException.class) .thenThrow(InterruptedException.class) .thenThrow(ExecutionException.class); assertThrows( RuntimeException.class, () -> daoWithMock.mutateRowWithHardTimeout(RowMutation.create("test", "test").deleteRow())); assertThrows( RuntimeException.class, () -> daoWithMock.mutateRowWithHardTimeout(RowMutation.create("test", "test").deleteRow())); assertThrows( RuntimeException.class, () -> daoWithMock.mutateRowWithHardTimeout(RowMutation.create("test", "test").deleteRow())); }
@Override public EntityStatementJWS establishIdpTrust(URI issuer) { var trustedFederationStatement = fetchTrustedFederationStatement(issuer); // the federation statement from the master will establish trust in the JWKS and the issuer URL // of the idp, // we still need to fetch the entity configuration directly afterward to get the full // entity statement return fetchTrustedEntityConfiguration(issuer, trustedFederationStatement.body().jwks()); }
@Test void establishTrust_badSignatureFederationStatement() { var client = new FederationMasterClientImpl(FEDERATION_MASTER, federationApiClient, clock); var issuer = URI.create("https://idp-tk.example.com"); var federationFetchUrl = FEDERATION_MASTER.resolve("/fetch"); var fedmasterKeypair = ECKeyGenerator.example(); var fedmasterEntityConfigurationJws = federationFetchFedmasterConfiguration(federationFetchUrl, fedmasterKeypair); var trustedSectoralIdpKeypair = ECKeyGenerator.generate(); var badKeypair = ECKeyGenerator.generate(); var trustedFederationStatement = trustedFederationStatement(issuer, trustedSectoralIdpKeypair, badKeypair); when(federationApiClient.fetchEntityConfiguration(FEDERATION_MASTER)) .thenReturn(fedmasterEntityConfigurationJws); when(federationApiClient.fetchFederationStatement( federationFetchUrl, FEDERATION_MASTER.toString(), issuer.toString())) .thenReturn(trustedFederationStatement); // when var e = assertThrows(FederationException.class, () -> client.establishIdpTrust(issuer)); // then assertEquals( "federation statement of 'https://idp-tk.example.com' has a bad signature", e.getMessage()); }
public static void init(String[] args) throws UnknownHostException { localAddr = null; if (!"0.0.0.0".equals(Config.frontend_address)) { if (!InetAddressValidator.getInstance().isValid(Config.frontend_address)) { throw new UnknownHostException("invalid frontend_address: " + Config.frontend_address); } localAddr = InetAddress.getByName(Config.frontend_address); LOG.info("use configured address. {}", localAddr); return; } List<InetAddress> hosts = NetUtils.getHosts(); if (hosts.isEmpty()) { LOG.error("fail to get localhost"); System.exit(-1); } HostType specifiedHostType = HostType.NOT_SPECIFIED; for (int i = 0; i < args.length; i++) { if (args[i].equalsIgnoreCase("-host_type")) { if (i + 1 >= args.length) { System.out.println("-host_type need parameter FQDN or IP"); System.exit(-1); } String inputHostType = args[i + 1]; try { inputHostType = inputHostType.toUpperCase(); specifiedHostType = HostType.valueOf(inputHostType); } catch (Exception e) { System.out.println("-host_type need parameter FQDN or IP"); System.exit(-1); } } } if (specifiedHostType == HostType.FQDN) { initAddrUseFqdn(hosts); return; } if (specifiedHostType == HostType.IP) { initAddrUseIp(hosts); return; } // Check if it is a new cluster, new clusters start with IP by default String roleFilePath = Config.meta_dir + ROLE_FILE_PATH; File roleFile = new File(roleFilePath); if (!roleFile.exists()) { initAddrUseIp(hosts); return; } Properties prop = new Properties(); String fileStoredHostType; try (FileInputStream in = new FileInputStream(roleFile)) { prop.load(in); } catch (IOException e) { LOG.error("failed to read role file"); System.exit(-1); } fileStoredHostType = prop.getProperty(HOST_TYPE, null); // Check if the ROLE file has property 'hostType' // If it not has property 'hostType', start with IP // If it has property 'hostType' & hostType = IP, start with IP if (Strings.isNullOrEmpty(fileStoredHostType) || fileStoredHostType.equals(HostType.IP.toString())) { initAddrUseIp(hosts); return; } // If it has property 'hostType' & hostType = FQDN, start with FQDN initAddrUseFqdn(hosts); }
@Test public void testChooseHostTypeByFile() throws UnknownHostException { mockNet(); Config.meta_dir = "feOpTestDir1"; String metaPath = Config.meta_dir + "/"; // fqdn mkdir(true, metaPath); useFqdnFile = false; FrontendOptions.init(new String[] {}); Assert.assertTrue(useFqdnFile); File dir = new File(metaPath); deleteDir(dir); // ip mkdir(false, metaPath); useFqdnFile = true; FrontendOptions.init(new String[] {}); Assert.assertTrue(!useFqdnFile); dir = new File(metaPath); deleteDir(dir); }
@Override public void doStart() throws Exception { LOG.debug("Starting CSV data adapter for file: {}", config.path()); if (isNullOrEmpty(config.path())) { throw new IllegalStateException("File path needs to be set"); } if (!pathChecker.fileIsInAllowedPath(Paths.get(config.path()))) { throw new IllegalStateException(ALLOWED_PATH_ERROR); } if (config.checkInterval() < 1) { throw new IllegalStateException("Check interval setting cannot be smaller than 1"); } // Set file info before parsing the data for the first time fileInfo = getNewFileInfo(); lookupRef.set(parseCSVFile()); }
@Test public void doGet_failure_filePathInvalid() throws Exception { final Config config = baseConfig(); when(pathChecker.fileIsInAllowedPath((isA(Path.class)))).thenReturn(false); csvFileDataAdapter = new CSVFileDataAdapter("id", "name", config, new MetricRegistry(), pathChecker); assertThatThrownBy(() -> csvFileDataAdapter.doStart()) .isExactlyInstanceOf(IllegalStateException.class) .hasMessageStartingWith(CSVFileDataAdapter.ALLOWED_PATH_ERROR); }
public static DeploymentDescriptor merge(List<DeploymentDescriptor> descriptorHierarchy, MergeMode mode) { if (descriptorHierarchy == null || descriptorHierarchy.isEmpty()) { throw new IllegalArgumentException("Descriptor hierarchy list cannot be empty"); } if (descriptorHierarchy.size() == 1) { return descriptorHierarchy.get(0); } Deque<DeploymentDescriptor> stack = new ArrayDeque<>(); descriptorHierarchy.forEach(stack::push); while (stack.size() > 1) { stack.push(merge(stack.pop(), stack.pop(), mode)); } // last element from the stack is the one that contains all merged descriptors return stack.pop(); }
@Test public void testDeploymentDesciptorMergeOverrideAll() { DeploymentDescriptor primary = new DeploymentDescriptorImpl("org.jbpm.domain"); primary.getBuilder() .addMarshalingStrategy(new ObjectModel("org.jbpm.test.CustomStrategy", new Object[]{"param2"})) .setLimitSerializationClasses(true); assertThat(primary).isNotNull(); assertThat(primary.getPersistenceUnit()).isEqualTo("org.jbpm.domain"); assertThat(primary.getAuditPersistenceUnit()).isEqualTo("org.jbpm.domain"); assertThat(primary.getAuditMode()).isEqualTo(AuditMode.JPA); assertThat(primary.getPersistenceMode()).isEqualTo(PersistenceMode.JPA); assertThat(primary.getRuntimeStrategy()).isEqualTo(RuntimeStrategy.SINGLETON); assertThat(primary.getMarshallingStrategies().size()).isEqualTo(1); assertThat(primary.getConfiguration().size()).isEqualTo(0); assertThat(primary.getEnvironmentEntries().size()).isEqualTo(0); assertThat(primary.getEventListeners().size()).isEqualTo(0); assertThat(primary.getGlobals().size()).isEqualTo(0); assertThat(primary.getTaskEventListeners().size()).isEqualTo(0); assertThat(primary.getWorkItemHandlers().size()).isEqualTo(0); assertThat(primary.getLimitSerializationClasses()).isTrue(); DeploymentDescriptor secondary = new DeploymentDescriptorImpl("org.jbpm.domain"); secondary.getBuilder() .auditMode(AuditMode.JMS) .persistenceMode(PersistenceMode.JPA) .persistenceUnit("my.custom.unit") .auditPersistenceUnit("my.custom.unit2") .setLimitSerializationClasses(false); assertThat(secondary).isNotNull(); assertThat(secondary.getPersistenceUnit()).isEqualTo("my.custom.unit"); assertThat(secondary.getAuditPersistenceUnit()).isEqualTo("my.custom.unit2"); assertThat(secondary.getAuditMode()).isEqualTo(AuditMode.JMS); assertThat(secondary.getPersistenceMode()).isEqualTo(PersistenceMode.JPA); assertThat(secondary.getRuntimeStrategy()).isEqualTo(RuntimeStrategy.SINGLETON); assertThat(secondary.getMarshallingStrategies().size()).isEqualTo(0); assertThat(secondary.getConfiguration().size()).isEqualTo(0); assertThat(secondary.getEnvironmentEntries().size()).isEqualTo(0); assertThat(secondary.getEventListeners().size()).isEqualTo(0); assertThat(secondary.getGlobals().size()).isEqualTo(0); assertThat(secondary.getTaskEventListeners().size()).isEqualTo(0); assertThat(secondary.getWorkItemHandlers().size()).isEqualTo(0); assertThat(secondary.getLimitSerializationClasses()).isFalse(); // and now let's merge them DeploymentDescriptor outcome = DeploymentDescriptorMerger.merge(primary, secondary, MergeMode.OVERRIDE_ALL); assertThat(outcome).isNotNull(); assertThat(outcome.getPersistenceUnit()).isEqualTo("my.custom.unit"); assertThat(outcome.getAuditPersistenceUnit()).isEqualTo("my.custom.unit2"); assertThat(outcome.getAuditMode()).isEqualTo(AuditMode.JMS); assertThat(outcome.getPersistenceMode()).isEqualTo(PersistenceMode.JPA); assertThat(outcome.getRuntimeStrategy()).isEqualTo(RuntimeStrategy.SINGLETON); assertThat(outcome.getMarshallingStrategies().size()).isEqualTo(0); assertThat(outcome.getConfiguration().size()).isEqualTo(0); assertThat(outcome.getEnvironmentEntries().size()).isEqualTo(0); assertThat(outcome.getEventListeners().size()).isEqualTo(0); assertThat(outcome.getGlobals().size()).isEqualTo(0); assertThat(outcome.getTaskEventListeners().size()).isEqualTo(0); assertThat(outcome.getWorkItemHandlers().size()).isEqualTo(0); assertThat(outcome.getLimitSerializationClasses()).isFalse(); }
public CeQueueDto setTaskType(String s) { checkArgument(s.length() <= 40, "Value of task type is too long: %s", s); this.taskType = s; return this; }
@Test void setTaskType_throws_IAE_if_value_is_41_chars() { String str_41_chars = STR_40_CHARS + "a"; assertThatThrownBy(() -> underTest.setTaskType(str_41_chars)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Value of task type is too long: " + str_41_chars); }
protected WordComposer getCurrentComposedWord() { return mWord; }
@Test public void testSuggestionsRestartWhenBackSpace() { simulateFinishInputFlow(); SharedPrefsHelper.setPrefsValue(R.string.settings_key_allow_suggestions_restart, true); simulateOnStartInputFlow(); mAnySoftKeyboardUnderTest.simulateTextTyping("hell face"); verifySuggestions(true, "face"); mAnySoftKeyboardUnderTest.simulateKeyPress(' '); Assert.assertEquals( "hell face ", getCurrentTestInputConnection().getCurrentTextInInputConnection()); verifySuggestions(true); mAnySoftKeyboardUnderTest.resetMockCandidateView(); for (int deleteKeyPress = 6; deleteKeyPress > 0; deleteKeyPress--) { // really quickly mAnySoftKeyboardUnderTest.simulateKeyPress(KeyCodes.DELETE, false); TestRxSchedulers.foregroundAdvanceBy( 50 /*that's the key-repeat delay in AnyKeyboardViewBase*/); } TestRxSchedulers.drainAllTasksUntilEnd(); // lots of events in the queue... TestRxSchedulers.foregroundAdvanceBy(100); verifySuggestions(true, "hell", "hello"); Assert.assertEquals("hell", getCurrentTestInputConnection().getCurrentTextInInputConnection()); Assert.assertEquals(4, getCurrentTestInputConnection().getCurrentStartPosition()); Assert.assertEquals(4, mAnySoftKeyboardUnderTest.getCurrentComposedWord().cursorPosition()); Assert.assertEquals( "hell", mAnySoftKeyboardUnderTest.getCurrentComposedWord().getTypedWord().toString()); mAnySoftKeyboardUnderTest.simulateKeyPress(KeyCodes.DELETE); Assert.assertEquals("hel", getCurrentTestInputConnection().getCurrentTextInInputConnection()); Assert.assertEquals(3, getCurrentTestInputConnection().getCurrentStartPosition()); Assert.assertEquals( "hel", mAnySoftKeyboardUnderTest.getCurrentComposedWord().getTypedWord().toString()); verifySuggestions(true, "hel", "he'll", "hello", "hell"); mAnySoftKeyboardUnderTest.simulateKeyPress('l'); Assert.assertEquals("hell", getCurrentTestInputConnection().getCurrentTextInInputConnection()); verifySuggestions(true, "hell", "hello"); Assert.assertEquals(4, getCurrentTestInputConnection().getCurrentStartPosition()); Assert.assertEquals( "hell", mAnySoftKeyboardUnderTest.getCurrentComposedWord().getTypedWord().toString()); }
@Override public KubevirtFloatingIp floatingIp(String id) { checkArgument(!Strings.isNullOrEmpty(id), ERR_NULL_FLOATING_IP_ID); return kubevirtRouterStore.floatingIp(id); }
@Test public void testGetFloatingIpById() { createBasicFloatingIpDisassociated(); assertNotNull("Floating IP did not match", target.floatingIp(FLOATING_IP_ID)); assertNull("Floating IP did not match", target.floatingIp(UNKNOWN_ID)); }
@Override public Map<String, LocalIndexStats> getIndexStats() { throw new UnsupportedOperationException("Queries on replicated maps are not supported."); }
@Test(expected = UnsupportedOperationException.class) public void testGetIndexStats() { localReplicatedMapStats.getIndexStats(); }
public long getQuorumVersion(int quorum) { Map<Long, Integer> versionCnt = new HashMap<>(); try (CloseableLock ignored = CloseableLock.lock(this.rwLock.readLock())) { for (Replica replica : replicas) { if (replica.getState() == ReplicaState.NORMAL && !replica.isBad()) { versionCnt.put(replica.getVersion(), 1 + versionCnt.getOrDefault(replica.getVersion(), 0)); } } } for (Map.Entry<Long, Integer> entry : versionCnt.entrySet()) { if (entry.getValue() >= quorum) { return entry.getKey(); } } return -1L; }
@Test public void testGetQuorumVersion() { List<Replica> replicas = Lists.newArrayList(new Replica(10001, 20001, ReplicaState.NORMAL, 10, -1), new Replica(10002, 20002, ReplicaState.NORMAL, 10, -1), new Replica(10003, 20003, ReplicaState.NORMAL, 9, -1)); LocalTablet tablet = new LocalTablet(10004, replicas); Assert.assertEquals(-1L, tablet.getQuorumVersion(3)); Assert.assertEquals(10L, tablet.getQuorumVersion(2)); Replica replica = tablet.getReplicaByBackendId(20001L); replica.setBad(true); Assert.assertEquals(-1L, tablet.getQuorumVersion(2)); replica.setBad(false); replica.setState(ReplicaState.DECOMMISSION); Assert.assertEquals(-1L, tablet.getQuorumVersion(2)); replica.setState(ReplicaState.NORMAL); }
@Override public ListTopicsResult listTopics(final ListTopicsOptions options) { final KafkaFutureImpl<Map<String, TopicListing>> topicListingFuture = new KafkaFutureImpl<>(); final long now = time.milliseconds(); runnable.call(new Call("listTopics", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @Override MetadataRequest.Builder createRequest(int timeoutMs) { return MetadataRequest.Builder.allTopics(); } @Override void handleResponse(AbstractResponse abstractResponse) { MetadataResponse response = (MetadataResponse) abstractResponse; Map<String, TopicListing> topicListing = new HashMap<>(); for (MetadataResponse.TopicMetadata topicMetadata : response.topicMetadata()) { String topicName = topicMetadata.topic(); boolean isInternal = topicMetadata.isInternal(); if (!topicMetadata.isInternal() || options.shouldListInternal()) topicListing.put(topicName, new TopicListing(topicName, topicMetadata.topicId(), isInternal)); } topicListingFuture.complete(topicListing); } @Override void handleFailure(Throwable throwable) { topicListingFuture.completeExceptionally(throwable); } }, now); return new ListTopicsResult(topicListingFuture); }
@Test public void testClientSideTimeoutAfterFailureToReceiveResponse() throws Exception { Cluster cluster = mockCluster(3, 0); CompletableFuture<String> disconnectFuture = new CompletableFuture<>(); MockTime time = new MockTime(); try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, newStrMap(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "1", AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, "100000", AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "0"))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().setDisconnectFuture(disconnectFuture); final ListTopicsResult result = env.adminClient().listTopics(); TestUtils.waitForCondition(() -> { time.sleep(1); return disconnectFuture.isDone(); }, 5000, 1, () -> "Timed out waiting for expected disconnect"); assertFalse(disconnectFuture.isCompletedExceptionally()); assertFalse(result.future.isDone()); TestUtils.waitForCondition(env.kafkaClient()::hasInFlightRequests, "Timed out waiting for retry"); env.kafkaClient().respond(prepareMetadataResponse(cluster, Errors.NONE)); assertEquals(0, result.listings().get().size()); } }
@Override public Response listReservation(String queue, String reservationId, long startTime, long endTime, boolean includeResourceAllocations, HttpServletRequest hsr) throws Exception { if (queue == null || queue.isEmpty()) { routerMetrics.incrListReservationFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), LIST_RESERVATIONS, UNKNOWN, TARGET_WEB_SERVICE, "Parameter error, the queue is empty or null."); throw new IllegalArgumentException("Parameter error, the queue is empty or null."); } if (reservationId == null || reservationId.isEmpty()) { routerMetrics.incrListReservationFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), LIST_RESERVATIONS, UNKNOWN, TARGET_WEB_SERVICE, "Parameter error, the reservationId is empty or null."); throw new IllegalArgumentException("Parameter error, the reservationId is empty or null."); } // Check that the reservationId format is accurate try { RouterServerUtil.validateReservationId(reservationId); } catch (IllegalArgumentException e) { routerMetrics.incrListReservationFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), LIST_RESERVATIONS, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); throw e; } try { long startTime1 = clock.getTime(); SubClusterInfo subClusterInfo = getHomeSubClusterInfoByReservationId(reservationId); DefaultRequestInterceptorREST interceptor = getOrCreateInterceptorForSubCluster( subClusterInfo.getSubClusterId(), subClusterInfo.getRMWebServiceAddress()); HttpServletRequest hsrCopy = clone(hsr); Response response = interceptor.listReservation(queue, reservationId, startTime, endTime, includeResourceAllocations, hsrCopy); if (response != null) { long stopTime = clock.getTime(); RouterAuditLogger.logSuccess(getUser().getShortUserName(), LIST_RESERVATIONS, TARGET_WEB_SERVICE); routerMetrics.succeededListReservationRetrieved(stopTime - startTime1); return response; } } catch (YarnException e) { routerMetrics.incrListReservationFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), LIST_RESERVATIONS, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); RouterServerUtil.logAndThrowRunTimeException("listReservation error.", e); } routerMetrics.incrListReservationFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), LIST_RESERVATIONS, UNKNOWN, TARGET_WEB_SERVICE, "listReservation Failed."); throw new YarnException("listReservation Failed."); }
@Test public void testListReservation() throws Exception { // submitReservation ReservationId reservationId = ReservationId.newInstance(Time.now(), 1); submitReservation(reservationId); // Call the listReservation method String applyReservationId = reservationId.toString(); Response listReservationResponse = interceptor.listReservation( QUEUE_DEDICATED_FULL, applyReservationId, -1, -1, false, null); Assert.assertNotNull(listReservationResponse); Assert.assertNotNull(listReservationResponse.getStatus()); Status status = Status.fromStatusCode(listReservationResponse.getStatus()); Assert.assertEquals(Status.OK, status); Object entity = listReservationResponse.getEntity(); Assert.assertNotNull(entity); Assert.assertNotNull(entity instanceof ReservationListInfo); Assert.assertTrue(entity instanceof ReservationListInfo); ReservationListInfo listInfo = (ReservationListInfo) entity; Assert.assertNotNull(listInfo); List<ReservationInfo> reservationInfoList = listInfo.getReservations(); Assert.assertNotNull(reservationInfoList); Assert.assertEquals(1, reservationInfoList.size()); ReservationInfo reservationInfo = reservationInfoList.get(0); Assert.assertNotNull(reservationInfo); Assert.assertEquals(applyReservationId, reservationInfo.getReservationId()); ReservationDefinitionInfo definitionInfo = reservationInfo.getReservationDefinition(); Assert.assertNotNull(definitionInfo); ReservationRequestsInfo reservationRequestsInfo = definitionInfo.getReservationRequests(); Assert.assertNotNull(reservationRequestsInfo); ArrayList<ReservationRequestInfo> reservationRequestInfoList = reservationRequestsInfo.getReservationRequest(); Assert.assertNotNull(reservationRequestInfoList); Assert.assertEquals(1, reservationRequestInfoList.size()); ReservationRequestInfo reservationRequestInfo = reservationRequestInfoList.get(0); Assert.assertNotNull(reservationRequestInfo); Assert.assertEquals(4, reservationRequestInfo.getNumContainers()); ResourceInfo resourceInfo = reservationRequestInfo.getCapability(); Assert.assertNotNull(resourceInfo); int vCore = resourceInfo.getvCores(); long memory = resourceInfo.getMemorySize(); Assert.assertEquals(1, vCore); Assert.assertEquals(1024, memory); }
@Override public org.apache.parquet.hadoop.api.ReadSupport.ReadContext init( Configuration configuration, Map<String, String> keyValueMetaData, MessageType fileSchema) { return init(new HadoopParquetConfiguration(configuration), keyValueMetaData, fileSchema); }
@Test public void testInitWithoutSpecifyingRequestSchema() throws Exception { GroupReadSupport s = new GroupReadSupport(); Configuration configuration = new Configuration(); Map<String, String> keyValueMetaData = new HashMap<String, String>(); MessageType fileSchema = MessageTypeParser.parseMessageType(fullSchemaStr); ReadSupport.ReadContext context = s.init(configuration, keyValueMetaData, fileSchema); assertEquals(context.getRequestedSchema(), fileSchema); }