focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public Digest digest() { return new Digest(timestamp, isTombstone()); }
@Test public void testDigest() { Timestamp ts = new LogicalTimestamp(10); MapValue<String> mv = new MapValue<>("foo", ts); Digest actual = mv.digest(); Digest expected = new MapValue.Digest(ts, false); assertEquals(actual, expected); }
public String vote(Set<String> candidates) { for (JoinGroupRequestProtocol protocol : supportedProtocols) { if (candidates.contains(protocol.name())) { return protocol.name(); } } throw new IllegalArgumentException("Member does not support any of the candidate protocols"); }
@Test public void testVoteRaisesOnNoSupportedProtocols() { JoinGroupRequestProtocolCollection protocols = new JoinGroupRequestProtocolCollection(); protocols.add(new JoinGroupRequestProtocol() .setName("range") .setMetadata(new byte[]{0})); protocols.add(new JoinGroupRequestProtocol() .setName("roundrobin") .setMetadata(new byte[]{1})); ClassicGroupMember member = new ClassicGroupMember( "member", Optional.of("group-instance-id"), "client-id", "client-host", 10, 4500, "generic", protocols, EMPTY_ASSIGNMENT ); assertThrows(IllegalArgumentException.class, () -> member.vote(Collections.singleton("unknown")) ); }
boolean isSyncForced(BackupAwareOperation backupAwareOp) { if (disabled) { return false; } // if there are no asynchronous backups, there is nothing to regulate. if (backupAwareOp.getAsyncBackupCount() == 0) { return false; } if (backupAwareOp instanceof UrgentSystemOperation) { return false; } for (; ; ) { int current = syncCountdown.decrementAndGet(); if (current > 0) { return false; } if (syncCountdown.compareAndSet(current, randomSyncDelay())) { return true; } } }
@Test public void isSyncForced_whenUrgentOperation_thenFalse() { BackpressureRegulator regulator = newEnabledBackPressureService(); UrgentOperation operation = new UrgentOperation(); operation.setPartitionId(1); boolean result = regulator.isSyncForced(operation); assertFalse(result); }
@Override public void onMsg(TbContext ctx, TbMsg msg) { var tbMsg = ackIfNeeded(ctx, msg); withCallback(publishMessageAsync(ctx, tbMsg), m -> tellSuccess(ctx, m), t -> tellFailure(ctx, processException(tbMsg, t), t)); }
@Test void givenForceAckIsFalseAndErrorOccursDuringProcessingRequest_whenOnMsg_thenTellFailure() { ReflectionTestUtils.setField(node, "forceAck", false); ListeningExecutor listeningExecutor = mock(ListeningExecutor.class); given(ctxMock.getExternalCallExecutor()).willReturn(listeningExecutor); String errorMsg = "Something went wrong"; ListenableFuture<TbMsg> failedFuture = Futures.immediateFailedFuture(new RuntimeException(errorMsg)); given(listeningExecutor.executeAsync(any(Callable.class))).willReturn(failedFuture); TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, DEVICE_ID, TbMsgMetaData.EMPTY, TbMsg.EMPTY_JSON_OBJECT); node.onMsg(ctxMock, msg); then(ctxMock).should(never()).enqueueForTellNext(any(), any(String.class)); ArgumentCaptor<TbMsg> actualMsgCaptor = ArgumentCaptor.forClass(TbMsg.class); ArgumentCaptor<Throwable> throwableCaptor = ArgumentCaptor.forClass(Throwable.class); then(ctxMock).should().tellFailure(actualMsgCaptor.capture(), throwableCaptor.capture()); TbMsg actualMsg = actualMsgCaptor.getValue(); assertThat(actualMsg) .usingRecursiveComparison() .ignoringFields("metaData", "ctx") .isEqualTo(msg); assertThat(actualMsg.getMetaData().getData()) .hasFieldOrPropertyWithValue("error", RuntimeException.class + ": " + errorMsg); assertThat(throwableCaptor.getValue()).isInstanceOf(RuntimeException.class).hasMessage(errorMsg); verifyNoMoreInteractions(ctxMock, sqsClientMock); }
@Override public String getOtp() { if (_pin == null) { throw new IllegalStateException("PIN must be set before generating an OTP"); } try { MOTP otp = MOTP.generateOTP(getSecret(), getAlgorithm(false), getDigits(), getPeriod(), getPin()); return otp.toString(); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } }
@Test public void testMotpInfoOtp() throws OtpInfoException, EncodingException { for (MOTPTest.Vector vector : MOTPTest.VECTORS) { MotpInfo info = new MotpInfo(Hex.decode(vector.Secret), vector.Pin); assertEquals(vector.OTP, info.getOtp(vector.Time)); } }
@Override public Result reconcile(Request request) { return client.fetch(Backup.class, request.name()) .map(backup -> { var metadata = backup.getMetadata(); var status = backup.getStatus(); var spec = backup.getSpec(); if (isDeleted(backup)) { if (removeFinalizers(metadata, Set.of(HOUSE_KEEPER_FINALIZER))) { migrationService.cleanup(backup).block(); client.update(backup); } return doNotRetry(); } if (addFinalizers(metadata, Set.of(HOUSE_KEEPER_FINALIZER))) { client.update(backup); } if (Phase.PENDING.equals(status.getPhase())) { // Do backup try { status.setPhase(Phase.RUNNING); status.setStartTimestamp(Instant.now(clock)); updateStatus(request.name(), status); // Long period execution when backing up migrationService.backup(backup).block(); status.setPhase(Phase.SUCCEEDED); status.setCompletionTimestamp(Instant.now(clock)); updateStatus(request.name(), status); } catch (Throwable t) { var unwrapped = Exceptions.unwrap(t); log.error("Failed to backup", unwrapped); // Only happen when shutting down status.setPhase(Phase.FAILED); if (unwrapped instanceof InterruptedException) { status.setFailureReason("Interrupted"); status.setFailureMessage("The backup process was interrupted."); } else { status.setFailureReason("SystemError"); status.setFailureMessage( "Something went wrong! Error message: " + unwrapped.getMessage()); } updateStatus(request.name(), status); } } // Only happen when failing to update status when interrupted if (Phase.RUNNING.equals(status.getPhase())) { status.setPhase(Phase.FAILED); status.setFailureReason("UnexpectedExit"); status.setFailureMessage("The backup process may exit abnormally."); updateStatus(request.name(), status); } // Check the expires at and requeue if necessary if (isTerminal(status.getPhase())) { var expiresAt = spec.getExpiresAt(); if (expiresAt != null) { var now = Instant.now(clock); if (now.isBefore(expiresAt)) { return new Result(true, Duration.between(now, expiresAt)); } client.delete(backup); } } return doNotRetry(); }).orElseGet(Result::doNotRetry); }
@Test void whenBackupDeleted() { var name = "fake-deleted-backup"; var backup = createPureBackup(name); backup.getMetadata().setDeletionTimestamp(Instant.now()); addFinalizers(backup.getMetadata(), Set.of(Constant.HOUSE_KEEPER_FINALIZER)); when(client.fetch(Backup.class, name)).thenReturn(Optional.of(backup)); when(migrationService.cleanup(backup)).thenReturn(Mono.empty()); doNothing().when(client).update(backup); var result = reconciler.reconcile(new Reconciler.Request(name)); assertNotNull(result); assertFalse(result.reEnqueue()); assertFalse(backup.getMetadata().getFinalizers().contains(Constant.HOUSE_KEEPER_FINALIZER)); verify(client).fetch(Backup.class, name); verify(migrationService).cleanup(backup); verify(client).update(backup); }
public Set<Metric> getMetrics() { return metrics; }
@Test public void should_not_crash_on_null_metrics_from_faulty_plugins() { Metrics faultyMetrics = () -> null; Metrics okMetrics = new FakeMetrics(); List<Metric> metrics = metrics(okMetrics, faultyMetrics); metrics.removeAll(SENSOR_METRICS_WITHOUT_METRIC_PLUGIN); assertThat(metrics).isEqualTo(okMetrics.getMetrics()); }
@Override public Response updateAppState(AppState targetState, HttpServletRequest hsr, String appId) throws AuthorizationException, YarnException, InterruptedException, IOException { long startTime = clock.getTime(); ApplicationId applicationId; try { applicationId = ApplicationId.fromString(appId); } catch (IllegalArgumentException e) { routerMetrics.incrAppsFailedKilled(); return Response .status(Status.BAD_REQUEST) .entity(e.getLocalizedMessage()) .build(); } SubClusterInfo subClusterInfo; SubClusterId subClusterId; try { subClusterId = federationFacade.getApplicationHomeSubCluster(applicationId); subClusterInfo = federationFacade.getSubCluster(subClusterId); } catch (YarnException e) { routerMetrics.incrAppsFailedKilled(); return Response .status(Status.BAD_REQUEST) .entity(e.getLocalizedMessage()) .build(); } Response response = getOrCreateInterceptorForSubCluster(subClusterId, subClusterInfo.getRMWebServiceAddress()).updateAppState(targetState, hsr, appId); long stopTime = clock.getTime(); routerMetrics.succeededAppsRetrieved(stopTime - startTime); return response; }
@Test public void testForceKillApplicationNotExists() throws YarnException, IOException, InterruptedException { ApplicationId appId = ApplicationId.newInstance(Time.now(), 1); AppState appState = new AppState("KILLED"); Response response = interceptor.updateAppState(appState, null, appId.toString()); Assert.assertEquals(BAD_REQUEST, response.getStatus()); }
public static <E> E checkInstanceOf(Class<E> type, Object object, String errorMessage) { isNotNull(type, "type"); if (!type.isInstance(object)) { throw new IllegalArgumentException(errorMessage); } return (E) object; }
@Test(expected = IllegalArgumentException.class) public void test_checkInstanceOf_withNullType() { checkInstanceOf(null, Integer.MAX_VALUE, "argumentName"); }
private <T> T accept(Expression<T> expr) { return expr.accept(this); }
@Test public void testCombined() throws Exception { assertThat(loadCondition("condition-combined.json").accept(new BooleanNumberConditionsVisitor())) .isTrue(); }
@Override protected String getAnalyzerEnabledSettingKey() { return Settings.KEYS.ANALYZER_DEPENDENCY_MERGING_ENABLED; }
@Test public void testGetAnalyzerEnabledSettingKey() { DependencyMergingAnalyzer instance = new DependencyMergingAnalyzer(); String expResult = Settings.KEYS.ANALYZER_DEPENDENCY_MERGING_ENABLED; String result = instance.getAnalyzerEnabledSettingKey(); assertEquals(expResult, result); }
static String decodeString(String toDecode) { return URLDecoder.decode(toDecode, StandardCharsets.UTF_8); }
@Test void decodeString() { String toDecode = "To+decode+string"; String retrieved = ModelLocalUriIdSerializer.decodeString(toDecode); String expected = "To decode string"; assertThat(retrieved).isEqualTo(expected); }
public static final String[] convertLineToStrings( LogChannelInterface log, String line, TextFileInputMeta inf, String delimiter, String enclosure, String escapeCharacters ) throws KettleException { String[] strings = new String[inf.inputFields.length]; int fieldnr; String pol; // piece of line try { if ( line == null ) { return null; } if ( inf.content.fileType.equalsIgnoreCase( "CSV" ) ) { // Split string in pieces, only for CSV! fieldnr = 0; int pos = 0; int length = line.length(); boolean dencl = false; int len_encl = ( enclosure == null ? 0 : enclosure.length() ); int len_esc = ( escapeCharacters == null ? 0 : escapeCharacters.length() ); while ( pos < length ) { int from = pos; int next; boolean encl_found; boolean contains_escaped_enclosures = false; boolean contains_escaped_separators = false; boolean contains_escaped_escape = false; // Is the field beginning with an enclosure? // "aa;aa";123;"aaa-aaa";000;... if ( len_encl > 0 && line.substring( from, from + len_encl ).equalsIgnoreCase( enclosure ) ) { if ( log.isRowLevel() ) { log.logRowlevel( BaseMessages.getString( PKG, "TextFileInput.Log.ConvertLineToRowTitle" ), BaseMessages .getString( PKG, "TextFileInput.Log.Encloruse", line.substring( from, from + len_encl ) ) ); } encl_found = true; int p = from + len_encl; boolean is_enclosure = len_encl > 0 && p + len_encl < length && line.substring( p, p + len_encl ).equalsIgnoreCase( enclosure ); boolean is_escape = len_esc > 0 && p + len_esc < length && line.substring( p, p + len_esc ).equalsIgnoreCase( inf.content.escapeCharacter ); boolean enclosure_after = false; // Is it really an enclosure? See if it's not repeated twice or escaped! if ( ( is_enclosure || is_escape ) && p < length - 1 ) { String strnext = line.substring( p + len_encl, p + 2 * len_encl ); if ( strnext.equalsIgnoreCase( enclosure ) ) { p++; enclosure_after = true; dencl = true; // Remember to replace them later on! if ( is_escape ) { contains_escaped_enclosures = true; } } else if ( strnext.equals( inf.content.escapeCharacter ) ) { p++; // Remember to replace them later on! if ( is_escape ) { contains_escaped_escape = true; // remember } } } // Look for a closing enclosure! while ( ( !is_enclosure || enclosure_after ) && p < line.length() ) { p++; enclosure_after = false; is_enclosure = len_encl > 0 && p + len_encl < length && line.substring( p, p + len_encl ).equals( enclosure ); is_escape = len_esc > 0 && p + len_esc < length && line.substring( p, p + len_esc ).equals( inf.content.escapeCharacter ); // Is it really an enclosure? See if it's not repeated twice or escaped! if ( ( is_enclosure || is_escape ) && p < length - 1 ) { String strnext = line.substring( p + len_encl, p + 2 * len_encl ); if ( strnext.equals( enclosure ) ) { p++; enclosure_after = true; dencl = true; // Remember to replace them later on! if ( is_escape ) { contains_escaped_enclosures = true; // remember } } else if ( strnext.equals( inf.content.escapeCharacter ) ) { p++; // Remember to replace them later on! if ( is_escape ) { contains_escaped_escape = true; // remember } } } } if ( p >= length ) { next = p; } else { next = p + len_encl; } if ( log.isRowLevel() ) { log.logRowlevel( BaseMessages.getString( PKG, "TextFileInput.Log.ConvertLineToRowTitle" ), BaseMessages .getString( PKG, "TextFileInput.Log.EndOfEnclosure", "" + p ) ); } } else { encl_found = false; boolean found = false; int startpoint = from; // int tries = 1; do { next = line.indexOf( delimiter, startpoint ); // See if this position is preceded by an escape character. if ( len_esc > 0 && next > 0 ) { String before = line.substring( next - len_esc, next ); if ( inf.content.escapeCharacter.equals( before ) ) { int previous_escapes = 1; int start = next - len_esc - 1; int end = next - 1; while ( start >= 0 ) { if ( inf.content.escapeCharacter.equals( line.substring( start, end ) ) ) { previous_escapes++; start--; end--; } else { break; } } // If behind the seperator there are a odd number of escaped // The separator is escaped. if ( previous_escapes % 2 != 0 ) { // take the next separator, this one is escaped... startpoint = next + 1; // tries++; contains_escaped_separators = true; } else { found = true; } } else { found = true; } } else { found = true; } } while ( !found && next >= 0 ); } if ( next == -1 ) { next = length; } if ( encl_found && ( ( from + len_encl ) <= ( next - len_encl ) ) ) { pol = line.substring( from + len_encl, next - len_encl ); if ( log.isRowLevel() ) { log.logRowlevel( BaseMessages.getString( PKG, "TextFileInput.Log.ConvertLineToRowTitle" ), BaseMessages .getString( PKG, "TextFileInput.Log.EnclosureFieldFound", "" + pol ) ); } } else { pol = line.substring( from, next ); if ( log.isRowLevel() ) { log.logRowlevel( BaseMessages.getString( PKG, "TextFileInput.Log.ConvertLineToRowTitle" ), BaseMessages .getString( PKG, "TextFileInput.Log.NormalFieldFound", "" + pol ) ); } } if ( dencl && Utils.isEmpty( inf.content.escapeCharacter ) ) { StringBuilder sbpol = new StringBuilder( pol ); int idx = sbpol.indexOf( enclosure + enclosure ); while ( idx >= 0 ) { sbpol.delete( idx, idx + enclosure.length() ); idx = sbpol.indexOf( enclosure + enclosure ); } pol = sbpol.toString(); } if ( !Utils.isEmpty( inf.content.escapeCharacter ) && ( inf.content.escapeCharacter.equals( enclosure ) ) && ( contains_escaped_escape || contains_escaped_enclosures ) ) { // replace the escaped enclosures with enclosures... String replace = inf.content.escapeCharacter + enclosure; String replaceWith = enclosure; pol = Const.replace( pol, replace, replaceWith ); } else { if ( contains_escaped_enclosures ) { String replace = inf.content.escapeCharacter + enclosure; String replaceWith = enclosure; pol = Const.replace( pol, replace, replaceWith ); } contains_escaped_escape = !Utils.isEmpty( inf.content.escapeCharacter ) && pol.contains( inf.content.escapeCharacter + inf.content.escapeCharacter ); if ( contains_escaped_escape ) { String replace = inf.content.escapeCharacter + inf.content.escapeCharacter; String replaceWith = inf.content.escapeCharacter; pol = Const.replace( pol, replace, replaceWith ); } } // replace the escaped separators with separators... if ( contains_escaped_separators ) { String replace = inf.content.escapeCharacter + delimiter; String replaceWith = delimiter; pol = Const.replace( pol, replace, replaceWith ); } // Now add pol to the strings found! try { strings[fieldnr] = pol; } catch ( ArrayIndexOutOfBoundsException e ) { // In case we didn't allocate enough space. // This happens when you have less header values specified than there are actual values in the rows. // As this is "the exception" we catch and resize here. // String[] newStrings = new String[strings.length]; for ( int x = 0; x < strings.length; x++ ) { newStrings[x] = strings[x]; } strings = newStrings; } pos = next + delimiter.length(); fieldnr++; } if ( pos == length ) { if ( log.isRowLevel() ) { log.logRowlevel( BaseMessages.getString( PKG, "TextFileInput.Log.ConvertLineToRowTitle" ), BaseMessages .getString( PKG, "TextFileInput.Log.EndOfEmptyLineFound" ) ); } if ( fieldnr < strings.length ) { strings[fieldnr] = Const.EMPTY_STRING; } fieldnr++; } } else { // Fixed file format: Simply get the strings at the required positions... // Note - charBased is the old default behavior. If this is an old transformation, content.length will be null // and should be processed as before. If the content.length is equal to "Characters" or there is no specified encoding, // it will still use the old behavior. The *only* way to get the new behavior is if content.length = "Bytes" and // the encoding is specified. boolean charBased = ( inf.content.length == null || inf.content.length.equalsIgnoreCase( "Characters" ) || inf.getEncoding() == null ); // Default to classic behavior for ( int i = 0; i < inf.inputFields.length; i++ ) { BaseFileField field = inf.inputFields[i]; int length; int fPos = field.getPosition(); int fLength = field.getLength(); int fPl = fPos + fLength; if ( charBased ) { length = line.length(); if ( fPl <= length ) { strings[i] = line.substring( fPos, fPl ); } else { if ( fPos < length ) { strings[i] = line.substring( fPos ); } else { strings[i] = ""; } } } else { byte[] b = null; String enc = inf.getEncoding(); b = line.getBytes( enc ); length = b.length; if ( fPl <= length ) { strings[i] = new String( Arrays.copyOfRange( b, fPos, fPl ), enc ); } else { if ( fPos < length ) { strings[i] = new String( Arrays.copyOfRange( b, fPos, length - 1 ), enc ); } else { strings[i] = ""; } } } } } } catch ( Exception e ) { throw new KettleException( BaseMessages.getString( PKG, "TextFileInput.Log.Error.ErrorConvertingLine", e .toString() ), e ); } return strings; }
@Test public void convertCSVLinesToStringsWithEnclosure() throws Exception { TextFileInputMeta inputMeta = Mockito.mock( TextFileInputMeta.class ); inputMeta.content = new TextFileInputMeta.Content(); inputMeta.content.fileType = "CSV"; inputMeta.inputFields = new BaseFileField[ 2 ]; inputMeta.content.escapeCharacter = "\\"; inputMeta.content.enclosure = "\""; String line = "\"A\\\\\",\"B\""; // "A\\","B" String[] strings = TextFileInputUtils .convertLineToStrings( Mockito.mock( LogChannelInterface.class ), line, inputMeta, ",", "\"", "\\" ); Assert.assertNotNull( strings ); Assert.assertEquals( "A\\", strings[ 0 ] ); Assert.assertEquals( "B", strings[ 1 ] ); line = "\"\\\\\",\"AB\""; // "\\","AB" strings = TextFileInputUtils .convertLineToStrings( Mockito.mock( LogChannelInterface.class ), line, inputMeta, ",", "\"", "\\" ); Assert.assertNotNull( strings ); Assert.assertEquals( "\\", strings[ 0 ] ); Assert.assertEquals( "AB", strings[ 1 ] ); line = "\"A\\B\",\"C\""; // "A\B","C" strings = TextFileInputUtils .convertLineToStrings( Mockito.mock( LogChannelInterface.class ), line, inputMeta, ",", "\"", "\\" ); Assert.assertNotNull( strings ); Assert.assertEquals( "A\\B", strings[ 0 ] ); Assert.assertEquals( "C", strings[ 1 ] ); }
public static KeyFormat sanitizeKeyFormat( final KeyFormat keyFormat, final List<SqlType> newKeyColumnSqlTypes, final boolean allowKeyFormatChangeToSupportNewKeySchema ) { return sanitizeKeyFormatWrapping( !allowKeyFormatChangeToSupportNewKeySchema ? keyFormat : sanitizeKeyFormatForTypeCompatibility( sanitizeKeyFormatForMultipleColumns( keyFormat, newKeyColumnSqlTypes.size()), newKeyColumnSqlTypes ), newKeyColumnSqlTypes.size() == 1 ); }
@Test public void shouldConvertDelimitedFormatForSingleKeyWithNonPrimitiveType() { // Given: final KeyFormat format = KeyFormat.nonWindowed( FormatInfo.of(DelimitedFormat.NAME), SerdeFeatures.of()); // When: final KeyFormat sanitized = SerdeFeaturesFactory.sanitizeKeyFormat(format, ImmutableList.of(SqlTypes.struct().build()), true); // Then: assertThat(sanitized.getFormatInfo(), equalTo(FormatInfo.of(JsonFormat.NAME))); assertThat(sanitized.getFeatures(), equalTo(SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES))); }
@Override public Collection<DatabasePacket> execute() throws SQLException { switch (packet.getType()) { case 'S': return describePreparedStatement(); case 'P': return Collections.singleton(portalContext.get(packet.getName()).describe()); default: throw new UnsupportedSQLOperationException("Unsupported describe type: " + packet.getType()); } }
@Test void assertDescribePreparedStatementInsertWithCaseInsensitiveColumns() throws SQLException { when(packet.getType()).thenReturn('S'); final String statementId = "S_2"; when(packet.getName()).thenReturn(statementId); String sql = "insert into t_order (iD, k, c, PaD) values (1, ?, ?, ?), (?, 2, ?, '')"; SQLStatement sqlStatement = SQL_PARSER_ENGINE.parse(sql, false); List<PostgreSQLColumnType> parameterTypes = new ArrayList<>(sqlStatement.getParameterCount()); for (int i = 0; i < sqlStatement.getParameterCount(); i++) { parameterTypes.add(PostgreSQLColumnType.UNSPECIFIED); } SQLStatementContext sqlStatementContext = mock(InsertStatementContext.class); when(sqlStatementContext.getSqlStatement()).thenReturn(sqlStatement); ContextManager contextManager = mockContextManager(); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); List<Integer> parameterIndexes = IntStream.range(0, sqlStatement.getParameterCount()).boxed().collect(Collectors.toList()); connectionSession.getServerPreparedStatementRegistry().addPreparedStatement(statementId, new PostgreSQLServerPreparedStatement(sql, sqlStatementContext, new HintValueContext(), parameterTypes, parameterIndexes)); Collection<DatabasePacket> actualPackets = executor.execute(); assertThat(actualPackets.size(), is(2)); Iterator<DatabasePacket> actualPacketsIterator = actualPackets.iterator(); PostgreSQLParameterDescriptionPacket actualParameterDescription = (PostgreSQLParameterDescriptionPacket) actualPacketsIterator.next(); PostgreSQLPacketPayload mockPayload = mock(PostgreSQLPacketPayload.class); actualParameterDescription.write(mockPayload); verify(mockPayload).writeInt2(5); verify(mockPayload, times(2)).writeInt4(23); verify(mockPayload, times(3)).writeInt4(18); assertThat(actualPacketsIterator.next(), is(PostgreSQLNoDataPacket.getInstance())); }
@Override public byte[] get(byte[] key) { return read(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key); }
@Test public void testGeo() { RedisTemplate<String, String> redisTemplate = new RedisTemplate<>(); redisTemplate.setConnectionFactory(new RedissonConnectionFactory(redisson)); redisTemplate.afterPropertiesSet(); String key = "test_geo_key"; Point point = new Point(116.401001, 40.119499); redisTemplate.opsForGeo().add(key, point, "a"); point = new Point(111.545998, 36.133499); redisTemplate.opsForGeo().add(key, point, "b"); point = new Point(111.483002, 36.030998); redisTemplate.opsForGeo().add(key, point, "c"); Circle within = new Circle(116.401001, 40.119499, 80000); RedisGeoCommands.GeoRadiusCommandArgs args = RedisGeoCommands.GeoRadiusCommandArgs.newGeoRadiusArgs().includeCoordinates(); GeoResults<RedisGeoCommands.GeoLocation<String>> res = redisTemplate.opsForGeo().radius(key, within, args); assertThat(res.getContent().get(0).getContent().getName()).isEqualTo("a"); GeoResults<RedisGeoCommands.GeoLocation<String>> res2 = redisTemplate.opsForGeo().search(key, within); assertThat(res2.getContent().size()).isEqualTo(1); }
Counter readFromFile() throws IOException { if (storageDisabled) { return null; } final File file = getFile(); if (file.exists()) { return readFromFile(file); } // ou on retourne null si le fichier n'existe pas return null; }
@Test public void testCorruptedFile() throws IOException { final File tempFile = corruptedTempFile.newFile("test.ser.gz"); try (ObjectOutputStream outputStream = new ObjectOutputStream( new GZIPOutputStream(new FileOutputStream(tempFile)))) { outputStream.writeObject("java melody"); } assertNull("String object can not be cast to the Counter, but the result should be null", CounterStorage.readFromFile(tempFile)); assertFalse("corrupted file should be deleted", tempFile.exists()); }
@Deprecated public static <K, V> PTransform<PCollection<? extends KV<K, V>>, PCollection<KV<K, TimestampedValue<V>>>> inValues() { return new InValues<>(); }
@Test @Category(ValidatesRunner.class) public void inValuesSucceeds() { PCollection<KV<String, Integer>> timestamped = pipeline .apply(Create.of(KV.of("foo", 0), KV.of("foo", 1), KV.of("bar", 2), KV.of("baz", 3))) .apply(WithTimestamps.of(input -> new Instant(input.getValue().longValue()))); PCollection<KV<String, TimestampedValue<Integer>>> reified = timestamped.apply(ReifyTimestamps.inValues()); PAssert.that(reified) .containsInAnyOrder( KV.of("foo", TimestampedValue.of(0, new Instant(0))), KV.of("foo", TimestampedValue.of(1, new Instant(1))), KV.of("bar", TimestampedValue.of(2, new Instant(2))), KV.of("baz", TimestampedValue.of(3, new Instant(3)))); pipeline.run(); }
public static String substringAfterLast(String s, String splitter) { return s.substring(s.lastIndexOf(splitter) + 1); }
@Test void testSubstringAfterLast() { String input = "jar:file:/home/ronald/Projects/Personal/JobRunr/bugs/jobrunr_issue/target/demo-0.0.1-SNAPSHOT.jar!/BOOT-INF/lib/jobrunr-1.0.0-SNAPSHOT.jar!/org/jobrunr/storage/sql/common/migrations"; assertThat(substringAfterLast(input, "!")).isEqualTo("/org/jobrunr/storage/sql/common/migrations"); }
@Override public DevOpsProjectCreationContext create(AlmSettingDto almSettingDto, DevOpsProjectDescriptor devOpsProjectDescriptor) { AccessToken accessToken = getAccessToken(almSettingDto); return createDevOpsProject(almSettingDto, devOpsProjectDescriptor, accessToken); }
@Test void create_whenNoPat_shouldThrow() { AlmSettingDto almSettingDto = mock(); when(userSession.getUuid()).thenReturn("user-uuid"); when(dbClient.almPatDao().selectByUserAndAlmSetting(dbClient.openSession(false), userSession.getUuid(), almSettingDto)).thenReturn(Optional.empty()); assertThatIllegalArgumentException() .isThrownBy(() -> githubDevOpsProjectService.create(almSettingDto, DEV_OPS_PROJECT_DESCRIPTOR)) .withMessage("No personal access token found"); }
public LatencyProbe newProbe(String serviceName, String dataStructureName, String methodName) { ServiceProbes serviceProbes = getOrPutIfAbsent( metricsPerServiceMap, serviceName, metricsPerServiceConstructorFunction); return serviceProbes.newProbe(dataStructureName, methodName); }
@Test public void testTotalMicros() { LatencyProbeImpl probe = (LatencyProbeImpl) plugin.newProbe("foo", "queue", "somemethod"); probe.recordValue(MICROSECONDS.toNanos(10)); probe.recordValue(MICROSECONDS.toNanos(20)); probe.recordValue(MICROSECONDS.toNanos(30)); assertEquals(60, probe.distribution.totalMicros()); }
public static boolean isAsterisk(String asterisk) { return asterisk.indexOf('*') > -1; }
@Test public void testIsAsterisk() { boolean isAsterisk = AclUtils.isAsterisk("*"); Assert.assertTrue(isAsterisk); isAsterisk = AclUtils.isAsterisk(","); Assert.assertFalse(isAsterisk); }
public void registerDatanode(DatanodeRegistration nodeReg) throws DisallowedDatanodeException, UnresolvedTopologyException { InetAddress dnAddress = Server.getRemoteIp(); if (dnAddress != null) { // Mostly called inside an RPC, update ip and peer hostname String hostname = dnAddress.getHostName(); String ip = dnAddress.getHostAddress(); if (checkIpHostnameInRegistration && !isNameResolved(dnAddress)) { // Reject registration of unresolved datanode to prevent performance // impact of repetitive DNS lookups later. final String message = "hostname cannot be resolved (ip=" + ip + ", hostname=" + hostname + ")"; LOG.warn("Unresolved datanode registration: " + message); throw new DisallowedDatanodeException(nodeReg, message); } // update node registration with the ip and hostname from rpc request nodeReg.setIpAddr(ip); nodeReg.setPeerHostName(hostname); } try { nodeReg.setExportedKeys(blockManager.getBlockKeys()); // Checks if the node is not on the hosts list. If it is not, then // it will be disallowed from registering. if (!hostConfigManager.isIncluded(nodeReg)) { throw new DisallowedDatanodeException(nodeReg); } NameNode.stateChangeLog.info("BLOCK* registerDatanode: from " + nodeReg + " storage " + nodeReg.getDatanodeUuid()); DatanodeDescriptor nodeS = getDatanode(nodeReg.getDatanodeUuid()); DatanodeDescriptor nodeN = host2DatanodeMap.getDatanodeByXferAddr( nodeReg.getIpAddr(), nodeReg.getXferPort()); if (nodeN != null && nodeN != nodeS) { NameNode.LOG.info("BLOCK* registerDatanode: " + nodeN); // nodeN previously served a different data storage, // which is not served by anybody anymore. removeDatanode(nodeN); // physically remove node from datanodeMap wipeDatanode(nodeN); nodeN = null; } boolean updateHost2DatanodeMap = false; if (nodeS != null) { if (nodeN == nodeS) { // The same datanode has been just restarted to serve the same data // storage. We do not need to remove old data blocks, the delta will // be calculated on the next block report from the datanode NameNode.stateChangeLog.debug("BLOCK* registerDatanode: node restarted."); } else { // nodeS is found /* The registering datanode is a replacement node for the existing data storage, which from now on will be served by a new node. If this message repeats, both nodes might have same storageID by (insanely rare) random chance. User needs to restart one of the nodes with its data cleared (or user can just remove the StorageID value in "VERSION" file under the data directory of the datanode, but this is might not work if VERSION file format has changed */ // Check if nodeS's host information is same as nodeReg's, if not, // it needs to update host2DatanodeMap accordringly. updateHost2DatanodeMap = !nodeS.getXferAddr().equals(nodeReg.getXferAddr()); NameNode.stateChangeLog.info("BLOCK* registerDatanode: " + nodeS + " is replaced by " + nodeReg + " with the same storageID " + nodeReg.getDatanodeUuid()); } boolean success = false; try { // update cluster map getNetworkTopology().remove(nodeS); // Update Host2DatanodeMap if (updateHost2DatanodeMap) { getHost2DatanodeMap().remove(nodeS); } if(shouldCountVersion(nodeS)) { decrementVersionCount(nodeS.getSoftwareVersion()); } nodeS.updateRegInfo(nodeReg); nodeS.setSoftwareVersion(nodeReg.getSoftwareVersion()); nodeS.setDisallowed(false); // Node is in the include list // resolve network location if(this.rejectUnresolvedTopologyDN) { nodeS.setNetworkLocation(resolveNetworkLocation(nodeS)); nodeS.setDependentHostNames(getNetworkDependencies(nodeS)); } else { nodeS.setNetworkLocation( resolveNetworkLocationWithFallBackToDefaultLocation(nodeS)); nodeS.setDependentHostNames( getNetworkDependenciesWithDefault(nodeS)); } if (updateHost2DatanodeMap) { getHost2DatanodeMap().add(nodeS); } getNetworkTopology().add(nodeS); resolveUpgradeDomain(nodeS); // also treat the registration message as a heartbeat heartbeatManager.register(nodeS); incrementVersionCount(nodeS.getSoftwareVersion()); startAdminOperationIfNecessary(nodeS); success = true; } finally { if (!success) { removeDatanode(nodeS); wipeDatanode(nodeS); countSoftwareVersions(); } } return; } DatanodeDescriptor nodeDescr = new DatanodeDescriptor(nodeReg, NetworkTopology.DEFAULT_RACK); boolean success = false; try { // resolve network location if(this.rejectUnresolvedTopologyDN) { nodeDescr.setNetworkLocation(resolveNetworkLocation(nodeDescr)); nodeDescr.setDependentHostNames(getNetworkDependencies(nodeDescr)); } else { nodeDescr.setNetworkLocation( resolveNetworkLocationWithFallBackToDefaultLocation(nodeDescr)); nodeDescr.setDependentHostNames( getNetworkDependenciesWithDefault(nodeDescr)); } nodeDescr.setSoftwareVersion(nodeReg.getSoftwareVersion()); resolveUpgradeDomain(nodeDescr); // register new datanode addDatanode(nodeDescr); blockManager.getBlockReportLeaseManager().register(nodeDescr); // also treat the registration message as a heartbeat // no need to update its timestamp // because its is done when the descriptor is created heartbeatManager.addDatanode(nodeDescr); heartbeatManager.updateDnStat(nodeDescr); incrementVersionCount(nodeReg.getSoftwareVersion()); startAdminOperationIfNecessary(nodeDescr); success = true; } finally { if (!success) { removeDatanode(nodeDescr); wipeDatanode(nodeDescr); countSoftwareVersions(); } } } catch (InvalidTopologyException e) { // If the network location is invalid, clear the cached mappings // so that we have a chance to re-add this DataNode with the // correct network location later. List<String> invalidNodeNames = new ArrayList<>(3); // clear cache for nodes in IP or Hostname invalidNodeNames.add(nodeReg.getIpAddr()); invalidNodeNames.add(nodeReg.getHostName()); invalidNodeNames.add(nodeReg.getPeerHostName()); dnsToSwitchMapping.reloadCachedMappings(invalidNodeNames); throw e; } }
@Test (timeout = 100000) public void testRejectUnresolvedDatanodes() throws IOException { //Create the DatanodeManager which will be tested FSNamesystem fsn = Mockito.mock(FSNamesystem.class); Mockito.when(fsn.hasWriteLock()).thenReturn(true); Configuration conf = new Configuration(); //Set configuration property for rejecting unresolved topology mapping conf.setBoolean( DFSConfigKeys.DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_KEY, true); //set TestDatanodeManager.MyResolver to be used for topology resolving conf.setClass( CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, TestDatanodeManager.MyResolver.class, DNSToSwitchMapping.class); //create DatanodeManager DatanodeManager dm = mockDatanodeManager(fsn, conf); //storageID to register. String storageID = "someStorageID-123"; DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class); Mockito.when(dr.getDatanodeUuid()).thenReturn(storageID); try { //Register this node dm.registerDatanode(dr); Assert.fail("Expected an UnresolvedTopologyException"); } catch (UnresolvedTopologyException ute) { LOG.info("Expected - topology is not resolved and " + "registration is rejected."); } catch (Exception e) { Assert.fail("Expected an UnresolvedTopologyException"); } }
@Override public PollResult poll(long currentTimeMs) { return pollInternal( prepareFetchRequests(), this::handleFetchSuccess, this::handleFetchFailure ); }
@Test public void testSeekBeforeException() { buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), 2, IsolationLevel.READ_UNCOMMITTED); assignFromUser(mkSet(tp0)); subscriptions.seek(tp0, 1); assertEquals(1, sendFetches()); Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new HashMap<>(); partitions.put(tidp0, new FetchResponseData.PartitionData() .setPartitionIndex(tp0.partition()) .setHighWatermark(100) .setRecords(records)); client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0)); networkClientDelegate.poll(time.timer(0)); assertEquals(2, fetchRecords().get(tp0).size()); subscriptions.assignFromUser(mkSet(tp0, tp1)); subscriptions.seekUnvalidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1))); assertEquals(1, sendFetches()); partitions = new HashMap<>(); partitions.put(tidp1, new FetchResponseData.PartitionData() .setPartitionIndex(tp1.partition()) .setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()) .setHighWatermark(100)); client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); networkClientDelegate.poll(time.timer(0)); assertEquals(1, fetchRecords().get(tp0).size()); subscriptions.seek(tp1, 10); // Should not throw OffsetOutOfRangeException after the seek assertEmptyFetch("Should not return records or advance position after seeking to end of topic partitions"); }
public static boolean isSystemTopic(String topic) { return SYSTEM_TOPIC_SET.contains(topic) || topic.startsWith(SYSTEM_TOPIC_PREFIX); }
@Test public void testIsSystemTopic() { boolean res; for (String topic : TopicValidator.getSystemTopicSet()) { res = TopicValidator.isSystemTopic(topic); assertThat(res).isTrue(); } String topic = TopicValidator.SYSTEM_TOPIC_PREFIX + "_test"; res = TopicValidator.isSystemTopic(topic); assertThat(res).isTrue(); topic = "test_not_system_topic"; res = TopicValidator.isSystemTopic(topic); assertThat(res).isFalse(); }
@Override public void write(final PostgreSQLPacketPayload payload, final Object value) { throw new UnsupportedSQLOperationException("PostgreSQLFloat8ArrayBinaryProtocolValue.write()"); }
@Test void assertWrite() { assertThrows(UnsupportedSQLOperationException.class, () -> newInstance().write(new PostgreSQLPacketPayload(null, StandardCharsets.UTF_8), "val")); }
public static WebSocketRemoteConnectionEndpoint from(CharSequence uriCharSequence) throws URISyntaxException { String uriString = uriCharSequence.toString(); URI uri = URI.create(uriString); return from(uri); }
@Test public void faultyEndpointTest() { String faultyProtocolString = "wst://fooDomain.org:7070/ws/"; assertThrows(IllegalArgumentException.class, () -> { WebSocketRemoteConnectionEndpoint.from(faultyProtocolString); }); }
@Override public int hashCode() { return Objects.hash(taskId, topicPartitions); }
@Test public void shouldBeEqualsIfOnlyDifferInIdlingTime() { final TaskMetadataImpl stillSameDifferIdlingTime = new TaskMetadataImpl( TASK_ID, TOPIC_PARTITIONS, COMMITTED_OFFSETS, END_OFFSETS, Optional.empty()); assertThat(taskMetadata, equalTo(stillSameDifferIdlingTime)); assertThat(taskMetadata.hashCode(), equalTo(stillSameDifferIdlingTime.hashCode())); }
@Override public boolean isInSameDatabaseInstance(final ConnectionProperties connectionProps) { return hostname.equals(connectionProps.getHostname()) && port == connectionProps.getPort(); }
@Test void assertIsNotInSameDatabaseInstanceWithDifferentHostname() { assertFalse(new StandardConnectionProperties("127.0.0.1", 9999, "foo", "foo") .isInSameDatabaseInstance(new StandardConnectionProperties("127.0.0.2", 9999, "foo", "foo"))); }
public static ExpressionTree parseFilterTree(String filter) throws MetaException { return PartFilterParser.parseFilter(filter); }
@Test public void testParseFilterWithInvalidTimeStampWithoutTypeNorQuoted() { MetaException exception = assertThrows(MetaException.class, () -> PartFilterExprUtil.parseFilterTree("(j = 2023-06-02 99:35:00)")); assertTrue(exception.getMessage().contains("Error parsing partition filter")); }
@Override public BigDecimal getBigNumber( Object object ) throws KettleValueException { InetAddress address = getInternetAddress( object ); if ( null == address ) { return null; } BigInteger bi = BigInteger.ZERO; byte[] addr = address.getAddress(); for ( byte aByte : addr ) { bi = bi.shiftLeft( 8 ).add( BigInteger.valueOf( aByte & 0xFF ) ); } return new BigDecimal( bi ); }
@Test public void testGetBigNumber_NullParameter() throws UnknownHostException, KettleValueException { ValueMetaInternetAddress vm = new ValueMetaInternetAddress(); assertNull( vm.getBigNumber( null ) ); }
@Override public int residue(int n1, int n2) throws Exception { return 0; }
@Test public void testResidue() throws Exception { Controlador controlador = new Controlador(); int result = controlador.residue(7, 3); assertEquals(0, result); }
public String specifyPrincipalsExample() { IamPolicy policy = IamPolicy.builder() .addStatement(b -> b .effect(IamEffect.DENY) .addAction("s3:*") .addPrincipal(IamPrincipal.ALL) .addResource("arn:aws:s3:::BUCKETNAME/*") .addResource("arn:aws:s3:::BUCKETNAME") .addCondition(b1 -> b1 .operator(IamConditionOperator.ARN_NOT_EQUALS) .key("aws:PrincipalArn") .value("arn:aws:iam::444455556666:user/user-name"))) .build(); return policy.toJson(IamPolicyWriter.builder() .prettyPrint(true).build()); }
@Test @Tag("IntegrationTest") void specifyPrincipalsExample() { String policyJson = examples.specifyPrincipalsExample(); logger.info(policyJson); analyze(policyJson, PolicyType.RESOURCE_POLICY); }
public Optional<UserDto> authenticate(HttpRequest request) { return extractCredentialsFromHeader(request) .flatMap(credentials -> Optional.ofNullable(authenticate(credentials, request))); }
@Test public void does_not_authenticate_when_no_authorization_header() { underTest.authenticate(request); verifyNoInteractions(credentialsAuthentication, authenticationEvent); }
public static Builder builder() { return new Builder(); }
@TestTemplate public void mergeAppendWithDuplicates() { assertThat(listManifestFiles()).isEmpty(); table .newAppend() .appendFile(FILE_A) .appendFile(DataFiles.builder(SPEC).copy(FILE_A).build()) .appendFile(FILE_A) .commit(); assertThat(table.currentSnapshot().summary()) .hasSize(11) .containsEntry(SnapshotSummary.ADDED_FILES_PROP, "1") .containsEntry(SnapshotSummary.ADDED_FILE_SIZE_PROP, "10") .containsEntry(SnapshotSummary.ADDED_RECORDS_PROP, "1") .containsEntry(SnapshotSummary.CHANGED_PARTITION_COUNT_PROP, "1") .containsEntry(SnapshotSummary.TOTAL_DATA_FILES_PROP, "1") .containsEntry(SnapshotSummary.TOTAL_DELETE_FILES_PROP, "0") .containsEntry(SnapshotSummary.TOTAL_EQ_DELETES_PROP, "0") .containsEntry(SnapshotSummary.TOTAL_POS_DELETES_PROP, "0") .containsEntry(SnapshotSummary.TOTAL_FILE_SIZE_PROP, "10") .containsEntry(SnapshotSummary.TOTAL_RECORDS_PROP, "1"); }
@Override protected <R> EurekaHttpResponse<R> execute(RequestExecutor<R> requestExecutor) { List<EurekaEndpoint> candidateHosts = null; int endpointIdx = 0; for (int retry = 0; retry < numberOfRetries; retry++) { EurekaHttpClient currentHttpClient = delegate.get(); EurekaEndpoint currentEndpoint = null; if (currentHttpClient == null) { if (candidateHosts == null) { candidateHosts = getHostCandidates(); if (candidateHosts.isEmpty()) { throw new TransportException("There is no known eureka server; cluster server list is empty"); } } if (endpointIdx >= candidateHosts.size()) { throw new TransportException("Cannot execute request on any known server"); } currentEndpoint = candidateHosts.get(endpointIdx++); currentHttpClient = clientFactory.newClient(currentEndpoint); } try { EurekaHttpResponse<R> response = requestExecutor.execute(currentHttpClient); if (serverStatusEvaluator.accept(response.getStatusCode(), requestExecutor.getRequestType())) { delegate.set(currentHttpClient); if (retry > 0) { logger.info("Request execution succeeded on retry #{}", retry); } return response; } logger.warn("Request execution failure with status code {}; retrying on another server if available", response.getStatusCode()); } catch (Exception e) { logger.warn("Request execution failed with message: {}", e.getMessage()); // just log message as the underlying client should log the stacktrace } // Connection error or 5xx from the server that must be retried on another server delegate.compareAndSet(currentHttpClient, null); if (currentEndpoint != null) { quarantineSet.add(currentEndpoint); } } throw new TransportException("Retry limit reached; giving up on completing the request"); }
@Test public void test5xxStatusCodeResultsInRequestRetry() throws Exception { when(clientFactory.newClient(Matchers.<EurekaEndpoint>anyVararg())).thenReturn(clusterDelegates.get(0), clusterDelegates.get(1)); when(requestExecutor.execute(clusterDelegates.get(0))).thenReturn(EurekaHttpResponse.status(500)); when(requestExecutor.execute(clusterDelegates.get(1))).thenReturn(EurekaHttpResponse.status(200)); EurekaHttpResponse<Void> httpResponse = retryableClient.execute(requestExecutor); assertThat(httpResponse.getStatusCode(), is(equalTo(200))); verify(requestExecutor, times(1)).execute(clusterDelegates.get(0)); verify(requestExecutor, times(1)).execute(clusterDelegates.get(1)); }
public UserType getUserType(String userId) { return adminUserId.equalsIgnoreCase(userId) ? UserType.ADMIN_USER : UserType.GENERAL_USER; }
@Test public void getUserType() { UserType userType = ldapService.getUserType("read-only-admin"); Assertions.assertEquals(UserType.ADMIN_USER, userType); }
@Override public boolean matchToken(TokenQueue tokenQueue, List<Token> matchedTokenList) { if (!tokenQueue.isNextTokenValue(lToken)) { return false; } int stack = 0; while (tokenQueue.peek() != null) { Token token = tokenQueue.poll(); if (lToken.equals(token.getValue())) { stack++; } else if (rToken.equals(token.getValue())) { stack--; } matchedTokenList.add(token); if (stack == 0) { return true; } } return false; }
@Test public void shouldMatch() { Token t1 = new Token("(", 1, 1); Token t2 = new Token("a", 2, 1); Token t3 = new Token("(", 3, 1); Token t4 = new Token("b", 4, 1); Token t5 = new Token(")", 5, 1); Token t6 = new Token("c", 6, 1); Token t7 = new Token(")", 7, 1); TokenQueue tokenQueue = spy(new TokenQueue(Arrays.asList(t1, t2, t3, t4, t5, t6, t7))); List<Token> output = mock(List.class); BridgeTokenMatcher matcher = new BridgeTokenMatcher("(", ")"); assertThat(matcher.matchToken(tokenQueue, output), is(true)); verify(tokenQueue, times(1)).isNextTokenValue("("); verify(tokenQueue, times(7)).poll(); verify(tokenQueue, times(7)).peek(); verifyNoMoreInteractions(tokenQueue); verify(output).add(t1); verify(output).add(t2); verify(output).add(t3); verify(output).add(t4); verify(output).add(t5); verify(output).add(t6); verify(output).add(t7); verifyNoMoreInteractions(output); }
public boolean isFinished() { return unfinishedVertices.isEmpty(); }
@Test public void regionIsUnfinishedIfNotAllVerticesAreFinished() { final PipelinedRegionExecutionView pipelinedRegionExecutionView = new PipelinedRegionExecutionView(TEST_PIPELINED_REGION); assertThat(pipelinedRegionExecutionView.isFinished()).isFalse(); }
@Override public boolean encode( @NonNull Resource<GifDrawable> resource, @NonNull File file, @NonNull Options options) { GifDrawable drawable = resource.get(); Transformation<Bitmap> transformation = drawable.getFrameTransformation(); boolean isTransformed = !(transformation instanceof UnitTransformation); if (isTransformed && options.get(ENCODE_TRANSFORMATION)) { return encodeTransformedToFile(drawable, file); } else { return writeDataDirect(drawable.getBuffer(), file); } }
@Test public void testReturnsFalseIfAddingFrameFails() { when(decoder.getFrameCount()).thenReturn(1); when(decoder.getNextFrame()).thenReturn(Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_8888)); when(gifEncoder.start(any(OutputStream.class))).thenReturn(true); when(gifEncoder.addFrame(anyBitmapOrNull())).thenReturn(false); assertFalse(encoder.encode(resource, file, options)); }
ControllerResult<Map<String, ApiError>> updateFeatures( Map<String, Short> updates, Map<String, FeatureUpdate.UpgradeType> upgradeTypes, boolean validateOnly ) { TreeMap<String, ApiError> results = new TreeMap<>(); List<ApiMessageAndVersion> records = BoundedList.newArrayBacked(MAX_RECORDS_PER_USER_OP); for (Entry<String, Short> entry : updates.entrySet()) { results.put(entry.getKey(), updateFeature(entry.getKey(), entry.getValue(), upgradeTypes.getOrDefault(entry.getKey(), FeatureUpdate.UpgradeType.UPGRADE), records)); } if (validateOnly) { return ControllerResult.of(Collections.emptyList(), results); } else { return ControllerResult.atomicOf(records, results); } }
@Test public void testCanUpgradeToHigherVersion() { FeatureControlManager manager = TEST_MANAGER_BUILDER1.build(); assertEquals(ControllerResult.of(Collections.emptyList(), singletonMap(MetadataVersion.FEATURE_NAME, ApiError.NONE)), manager.updateFeatures( singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_3_IV3.featureLevel()), singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.UPGRADE), true)); }
@Override public byte[] evaluateResponse( @Nonnull final byte[] response ) throws SaslException { if ( isComplete() ) { throw new IllegalStateException( "Authentication exchange already completed." ); } // The value as sent to us in the 'from' attribute of the stream element sent by the remote server. final String defaultIdentity = session.getDefaultIdentity(); // RFC 6120 Section 4.7.1: // "Because a server is a "public entity" on the XMPP network, it MUST include the 'from' attribute after the // confidentiality and integrity of the stream are protected via TLS or an equivalent security layer." // // When doing SASL EXTERNAL, TLS must already have been negotiated, which means that the 'from' attribute must have been set. if (defaultIdentity == null || defaultIdentity.isEmpty()) { throw new SaslFailureException(Failure.NOT_AUTHORIZED, "Peer does not provide 'from' attribute value on stream."); } final String requestedId; if (response.length == 0 && session.getSessionData(SASLAuthentication.SASL_LAST_RESPONSE_WAS_PROVIDED_BUT_EMPTY) == null) { if (PROPERTY_SASL_EXTERNAL_SERVER_REQUIRE_AUTHZID.getValue()) { // No initial response. Send a challenge to get one, per RFC 4422 appendix-A. return new byte[0]; } else { requestedId = defaultIdentity; } } else { requestedId = new String( response, StandardCharsets.UTF_8 ); } complete = true; Log.trace("Completing handshake with '{}' using authzid value: '{}'", defaultIdentity, requestedId); // Added for backwards compatibility. Not required by XMPP, but versions of Openfire prior to 4.8.0 did require the authzid to be present. if (SASLAuthentication.EXTERNAL_S2S_REQUIRE_AUTHZID.getValue() && requestedId.isEmpty()) { throw new SaslFailureException(Failure.INVALID_AUTHZID, "Peer does not provide authzid, which is required by configuration."); } // When an authorization identity is provided, make sure that it matches the 'from' value from the session stream. if (!requestedId.isEmpty() && !requestedId.equals(defaultIdentity)) { throw new SaslFailureException(Failure.INVALID_AUTHZID, "Stream 'from' attribute value '" + defaultIdentity + "' does not equal SASL authzid '" + requestedId + "'"); } if (!SASLAuthentication.verifyCertificates(session.getConnection().getPeerCertificates(), defaultIdentity, true)) { throw new SaslFailureException(Failure.NOT_AUTHORIZED, "Server-to-Server certificate verification failed."); } authorizationID = defaultIdentity; Log.trace("Successfully authenticated '{}'", authorizationID); return null; // Success! }
@Test public void testNoInitialResponseWhileNotRequired() throws Exception { // Setup test fixture. final String streamID = "example.org"; when(session.getSessionData(SASLAuthentication.SASL_LAST_RESPONSE_WAS_PROVIDED_BUT_EMPTY)).thenReturn(null); // TODO explicitly set SASLAuthentication#PROPERTY_SASL_EXTERNAL_SERVER_REQUIRE_AUTHZID instead of depending on the default value. // The following stubs are only used when the implementation under test progresses beyond the check for the // emptiness of the initial response. It should not progress beyond that point. I'm leaving the stubs in, for // the test to fail gracefully (rather than throw an exception) when the system under test misbehaves. when(session.getDefaultIdentity()).thenReturn(streamID); when(session.getConnection()).thenReturn(connection); saslAuthentication.when(() -> SASLAuthentication.verifyCertificates(any(), eq(streamID), anyBoolean())).thenReturn(true); final ExternalServerSaslServer server = new ExternalServerSaslServer(session); final byte[] input = new byte[]{}; // Execute system under test. final byte[] response = server.evaluateResponse(input); // Verify results. assertNull(response); }
public static int getTotalLines(File file) { return getTotalLines(file, 1024); }
@Test public void getTotalLinesTest() { // 此文件最后一行有换行符,则最后的空行算作一行 final int totalLines = FileUtil.getTotalLines(FileUtil.file("test_lines.csv")); assertEquals(8, totalLines); }
public int size() { return members.size(); }
@Test public void testSize() { JsonObject o = new JsonObject(); assertThat(o.size()).isEqualTo(0); o.add("Hello", new JsonPrimitive(1)); assertThat(o.size()).isEqualTo(1); o.add("Hi", new JsonPrimitive(1)); assertThat(o.size()).isEqualTo(2); o.remove("Hello"); assertThat(o.size()).isEqualTo(1); }
public static void trackJPushOpenActivity(Intent intent) { if (intent == null) { return; } if (!isTrackPushEnabled()) return; String data = null; //获取华为平台附带的 jpush 信息 if (intent.getData() != null) { data = intent.getData().toString(); } //获取除华为平台附带的 jpush 信息 if (TextUtils.isEmpty(data) && intent.getExtras() != null) { data = intent.getExtras().getString("JMessageExtra"); } SALog.i(TAG, "trackJPushOpenActivity is called, Intent data is " + data); if (TextUtils.isEmpty(data)) return; try { JSONObject jsonObject = null; try { jsonObject = new JSONObject(data); } catch (Exception e) { SALog.i(TAG, "Failed to construct JSON"); } if (jsonObject != null) { String title = jsonObject.optString("n_title"); String content = jsonObject.optString("n_content"); String extras = jsonObject.optString("n_extras"); byte whichPushSDK = (byte) jsonObject.optInt("rom_type"); String appPushChannel = PushUtils.getJPushSDKName(whichPushSDK); SALog.i(TAG, String.format("trackJPushOpenActivity is called, title is %s, content is %s," + " extras is %s, appPushChannel is %s", title, content, extras, appPushChannel)); if (TextUtils.isEmpty(title) || TextUtils.isEmpty(content) || TextUtils.isEmpty(appPushChannel)) { return; } String sfData = getSFData(extras); trackNotificationOpenedEvent(sfData, title, content, "JPush", appPushChannel); } } catch (Exception e) { SALog.printStackTrace(e); } }
@Test public void trackJPushOpenActivity() throws InterruptedException { SensorsDataAPI sensorsDataAPI = SAHelper.initSensors(mApplication); final CountDownLatch countDownLatch = new CountDownLatch(1); sensorsDataAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { assertEquals("$AppPushClick", eventName); assertEquals("mock_title", eventProperties.optString("$app_push_msg_title")); assertEquals("mock_content", eventProperties.optString("$app_push_msg_content")); assertEquals("JPush", eventProperties.optString("$app_push_service_name")); countDownLatch.countDown(); return true; } }); try { Thread.sleep(1000); Robolectric.getForegroundThreadScheduler().advanceTo(5000); } catch (InterruptedException e) { e.printStackTrace(); } PushAutoTrackHelper.trackJPushOpenActivity(MockDataTest.mockJPushIntent()); countDownLatch.await(1000, TimeUnit.MILLISECONDS); }
@Override @SuppressWarnings("rawtypes") public void report(SortedMap<String, Gauge> gauges, SortedMap<String, Counter> counters, SortedMap<String, Histogram> histograms, SortedMap<String, Meter> meters, SortedMap<String, Timer> timers) { if (loggerProxy.isEnabled(marker)) { StringBuilder b = new StringBuilder(); for (Entry<String, Gauge> entry : gauges.entrySet()) { logGauge(b, entry.getKey(), entry.getValue()); } for (Entry<String, Counter> entry : counters.entrySet()) { logCounter(b, entry.getKey(), entry.getValue()); } for (Entry<String, Histogram> entry : histograms.entrySet()) { logHistogram(b, entry.getKey(), entry.getValue()); } for (Entry<String, Meter> entry : meters.entrySet()) { logMeter(b, entry.getKey(), entry.getValue()); } for (Entry<String, Timer> entry : timers.entrySet()) { logTimer(b, entry.getKey(), entry.getValue()); } } }
@Test public void reportsMeterValuesDefault() { final Meter meter = meter(); when(logger.isInfoEnabled(marker)).thenReturn(true); infoReporter().report(map(), map(), map(), map("test.meter", meter), map()); verify(logger).info(marker, "type=METER, name=prefix.test.meter, count=1, m1_rate=3.0, m5_rate=4.0, " + "m15_rate=5.0, mean_rate=2.0, rate_unit=events/second"); }
public static void closeQuietly(HttpURLConnection connection) { if (connection != null) { try { closeQuietly(connection.getInputStream()); } catch (Exception ignore) { } } }
@Test void testCloseQuietly() throws IOException { BufferedReader br = new BufferedReader( new InputStreamReader(new ByteArrayInputStream("111".getBytes(Charsets.toCharset("UTF-8"))))); assertEquals("111", br.readLine()); IoUtils.closeQuietly(br); try { br.readLine(); } catch (IOException e) { assertNotNull(e); return; } fail(); }
public static DataSchema avroToDataSchema(String avroSchemaInJson, AvroToDataSchemaTranslationOptions options) throws IllegalArgumentException { ValidationOptions validationOptions = SchemaParser.getDefaultSchemaParserValidationOptions(); validationOptions.setAvroUnionMode(true); SchemaParserFactory parserFactory = SchemaParserFactory.instance(validationOptions); DataSchemaResolver resolver = getResolver(parserFactory, options); PegasusSchemaParser parser = parserFactory.create(resolver); parser.parse(avroSchemaInJson); if (parser.hasError()) { throw new IllegalArgumentException(parser.errorMessage()); } assert(parser.topLevelDataSchemas().size() == 1); DataSchema dataSchema = parser.topLevelDataSchemas().get(0); DataSchema resultDataSchema = null; AvroToDataSchemaTranslationMode translationMode = options.getTranslationMode(); if (translationMode == AvroToDataSchemaTranslationMode.RETURN_EMBEDDED_SCHEMA || translationMode == AvroToDataSchemaTranslationMode.VERIFY_EMBEDDED_SCHEMA) { // check for embedded schema Object dataProperty = dataSchema.getProperties().get(SchemaTranslator.DATA_PROPERTY); if (dataProperty != null && dataProperty.getClass() == DataMap.class) { Object schemaProperty = ((DataMap) dataProperty).get(SchemaTranslator.SCHEMA_PROPERTY); if (schemaProperty.getClass() == DataMap.class) { SchemaParser embeddedSchemaParser = SchemaParserFactory.instance().create(null); embeddedSchemaParser.parse(Arrays.asList(schemaProperty)); if (embeddedSchemaParser.hasError()) { throw new IllegalArgumentException("Embedded schema is invalid\n" + embeddedSchemaParser.errorMessage()); } assert(embeddedSchemaParser.topLevelDataSchemas().size() == 1); resultDataSchema = embeddedSchemaParser.topLevelDataSchemas().get(0); if (translationMode == AvroToDataSchemaTranslationMode.VERIFY_EMBEDDED_SCHEMA) { // additional verification to make sure that embedded schema translates to Avro schema DataToAvroSchemaTranslationOptions dataToAvroSchemaOptions = new DataToAvroSchemaTranslationOptions(); Object optionalDefaultModeProperty = ((DataMap) dataProperty).get(SchemaTranslator.OPTIONAL_DEFAULT_MODE_PROPERTY); dataToAvroSchemaOptions.setOptionalDefaultMode(OptionalDefaultMode.valueOf(optionalDefaultModeProperty.toString())); Schema avroSchemaFromEmbedded = dataToAvroSchema(resultDataSchema, dataToAvroSchemaOptions); Schema avroSchemaFromJson = AvroCompatibilityHelper.parse(avroSchemaInJson, SchemaParseConfiguration.STRICT, null).getMainSchema(); Object embededSchemaPropertyVal = avroSchemaFromJson.getObjectProp(DATA_PROPERTY); if (embededSchemaPropertyVal != null) { avroSchemaFromEmbedded.addProp(DATA_PROPERTY, embededSchemaPropertyVal); } if (!avroSchemaFromEmbedded.equals(avroSchemaFromJson)) { throw new IllegalArgumentException("Embedded schema does not translate to input Avro schema: " + avroSchemaInJson); } } } } } if (resultDataSchema == null) { // translationMode == TRANSLATE or no embedded schema DataSchemaTraverse traverse = new DataSchemaTraverse(); traverse.traverse(dataSchema, AvroToDataSchemaConvertCallback.INSTANCE); // convert default values traverse.traverse(dataSchema, DefaultAvroToDataConvertCallback.INSTANCE); // make sure it can round-trip String dataSchemaJson = dataSchema.toString(); resultDataSchema = DataTemplateUtil.parseSchema(dataSchemaJson); } return resultDataSchema; }
@Test(dataProvider = "avroToDataSchemaTranslationModeErrorData") public void testAvroToDataSchemaTranslationModeError(AvroToDataSchemaTranslationMode translationMode, String avroSchemaText, Class<? extends Exception> expectedException) { AvroToDataSchemaTranslationOptions options = new AvroToDataSchemaTranslationOptions(translationMode); try { SchemaTranslator.avroToDataSchema(avroSchemaText, options); fail("Expect exception: " + expectedException); } catch (Exception e) { assertTrue(expectedException.isAssignableFrom(e.getClass())); } }
public static boolean unblock( final UnsafeBuffer[] termBuffers, final UnsafeBuffer logMetaDataBuffer, final long blockedPosition, final int termLength) { final int positionBitsToShift = LogBufferDescriptor.positionBitsToShift(termLength); final int blockedTermCount = (int)(blockedPosition >> positionBitsToShift); final int blockedOffset = (int)blockedPosition & (termLength - 1); final int activeTermCount = activeTermCount(logMetaDataBuffer); if (activeTermCount == (blockedTermCount - 1) && blockedOffset == 0) { final int currentTermId = termId(rawTailVolatile(logMetaDataBuffer, indexByTermCount(activeTermCount))); rotateLog(logMetaDataBuffer, activeTermCount, currentTermId); return true; } final int blockedIndex = indexByTermCount(blockedTermCount); final long rawTail = rawTailVolatile(logMetaDataBuffer, blockedIndex); final int termId = termId(rawTail); final int tailOffset = termOffset(rawTail, termLength); final UnsafeBuffer termBuffer = termBuffers[blockedIndex]; switch (TermUnblocker.unblock(logMetaDataBuffer, termBuffer, blockedOffset, tailOffset, termId)) { case NO_ACTION: break; case UNBLOCKED_TO_END: rotateLog(logMetaDataBuffer, blockedTermCount, termId); return true; case UNBLOCKED: return true; } return false; }
@Test void shouldUnblockWhenPositionHasNonCommittedMessageAndTailWithinTerm() { final int blockedOffset = HEADER_LENGTH * 4; final int messageLength = HEADER_LENGTH * 4; final long blockedPosition = computePosition(TERM_ID_1, blockedOffset, positionBitsToShift, TERM_ID_1); final int activeIndex = indexByPosition(blockedPosition, positionBitsToShift); when(termBuffers[activeIndex].getIntVolatile(blockedOffset)).thenReturn(-messageLength); assertTrue(LogBufferUnblocker.unblock(termBuffers, logMetaDataBuffer, blockedPosition, TERM_LENGTH)); final long rawTail = rawTailVolatile(logMetaDataBuffer); assertEquals( blockedPosition + messageLength, computePosition(termId(rawTail), blockedOffset + messageLength, positionBitsToShift, TERM_ID_1)); }
@Override protected SchemaTransform from(Configuration configuration) { return new JavaMapToFieldsTransform(configuration); }
@Test @Category(NeedsRunner.class) public void testErrorHandling() { Schema inputSchema = Schema.of(Schema.Field.of("x", Schema.FieldType.INT32)); PCollection<Row> input = pipeline .apply( Create.of( Row.withSchema(inputSchema).addValues(4).build(), Row.withSchema(inputSchema).addValues(-1).build())) .setRowSchema(inputSchema); PCollectionRowTuple result = PCollectionRowTuple.of(JavaMapToFieldsTransformProvider.INPUT_ROWS_TAG, input) .apply( new JavaMapToFieldsTransformProvider() .from( JavaMapToFieldsTransformProvider.Configuration.builder() .setLanguage("java") .setFields( ImmutableMap.of( "sqrt", JavaRowUdf.Configuration.builder() .setCallable( "import java.util.function.Function;" + "import org.apache.beam.sdk.values.Row;" + "public class Sqrt implements Function<Row, Double> {" + " public Double apply(Row row) {" + " int x = row.getInt32(\"x\");" + " if (x < 0) {" + " throw new ArithmeticException(\"negative value\");" + " } else {" + " return Math.sqrt(x);" + " }" + " }" + "}") .build())) .setErrorHandling(ErrorHandling.builder().setOutput("errors").build()) .build())); PCollection<Row> sqrts = result.get(JavaMapToFieldsTransformProvider.OUTPUT_ROWS_TAG); Schema outputSchema = sqrts.getSchema(); PAssert.that(sqrts) .containsInAnyOrder(Row.withSchema(outputSchema).withFieldValue("sqrt", 2.0).build()); PCollection<Row> errors = result.get("errors"); Schema errorSchema = errors.getSchema(); PAssert.that(errors) .containsInAnyOrder( Row.withSchema(errorSchema) .withFieldValue("failed_row", Row.withSchema(inputSchema).addValues(-1).build()) .withFieldValue("error_message", "negative value") .build()); pipeline.run(); }
@Udf(description = "Converts a TIMESTAMP value into the" + " string representation of the timestamp in the given format. Single quotes in the" + " timestamp format can be escaped with '', for example: 'yyyy-MM-dd''T''HH:mm:ssX'" + " The system default time zone is used when no time zone is explicitly provided." + " The format pattern should be in the format expected" + " by java.time.format.DateTimeFormatter") public String formatTimestamp( @UdfParameter( description = "TIMESTAMP value.") final Timestamp timestamp, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.time.format.DateTimeFormatter.") final String formatPattern) { return formatTimestamp(timestamp, formatPattern, ZoneId.of("GMT").getId()); }
@Test public void shouldSupportEmbeddedChars() { // When: final Object result = udf.formatTimestamp(new Timestamp(1638360611123L), "yyyy-MM-dd'T'HH:mm:ss.SSS'Fred'"); // Then: final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Fred'"); sdf.setTimeZone(TimeZone.getTimeZone("GMT")); final String expectedResult = sdf.format(new Date(1638360611123L)); assertThat(result, is(expectedResult)); }
@Override public ByteBuf writeBoolean(boolean value) { writeByte(value ? 1 : 0); return this; }
@Test public void testWriteBooleanAfterRelease() { assertThrows(IllegalReferenceCountException.class, new Executable() { @Override public void execute() { releasedBuffer().writeBoolean(true); } }); }
public static boolean delete(@Nullable File file) { if (file == null) { LOGGER.warn("cannot delete null File"); return false; } try (Stream<Path> paths = Files.walk(file.toPath())) { paths.sorted(Comparator.reverseOrder()) .map(Path::toFile) .forEach(File::delete); } catch (IOException ex) { LOGGER.trace(ex.getMessage(), ex); LOGGER.debug("Failed to delete file: {} (error message: {}); attempting to delete on exit.", file.getPath(), ex.getMessage()); file.deleteOnExit(); return false; } return true; }
@Test public void testDeleteWithSubDirectories() throws Exception { File dir = new File(getSettings().getTempDirectory(), "delete-me"); dir.mkdirs(); File file = File.createTempFile("tmp", "deleteme", dir); assertTrue("Unable to create a temporary file " + file.getAbsolutePath(), file.exists()); // delete the file boolean status = FileUtils.delete(dir); assertTrue("delete returned a failed status", status); assertFalse("Temporary file exists after attempting deletion", file.exists()); }
@JsonProperty public void setJdbcInterceptors(Optional<String> jdbcInterceptors) { this.jdbcInterceptors = jdbcInterceptors; }
@Test void testJdbcInterceptors() throws Exception { factory.setJdbcInterceptors(Optional.of("StatementFinalizer;ConnectionState")); assertThat(dataSource()) .isInstanceOfSatisfying(ManagedPooledDataSource.class, source -> assertThat(source.getPoolProperties().getJdbcInterceptorsAsArray()) .extracting("interceptorClass") .contains(StatementFinalizer.class, ConnectionState.class)); }
public MaterialPollResult getLatestRevision(String pluginId, final SCMPropertyConfiguration scmConfiguration, final Map<String, String> materialData, final String flyweightFolder) { return pluginRequestHelper.submitRequest(pluginId, REQUEST_LATEST_REVISION, new DefaultPluginInteractionCallback<>() { @Override public String requestBody(String resolvedExtensionVersion) { return messageHandlerMap.get(resolvedExtensionVersion).requestMessageForLatestRevision(scmConfiguration, materialData, flyweightFolder); } @Override public MaterialPollResult onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) { return messageHandlerMap.get(resolvedExtensionVersion).responseMessageForLatestRevision(responseBody); } }); }
@Test public void shouldTalkToPluginToGetLatestModification() throws Exception { String flyweight = "flyweight"; when(jsonMessageHandler.requestMessageForLatestRevision(scmPropertyConfiguration, materialData, flyweight)).thenReturn(requestBody); MaterialPollResult deserializedResponse = new MaterialPollResult(); when(jsonMessageHandler.responseMessageForLatestRevision(responseBody)).thenReturn(deserializedResponse); MaterialPollResult response = scmExtension.getLatestRevision(PLUGIN_ID, scmPropertyConfiguration, materialData, flyweight); assertRequest(requestArgumentCaptor.getValue(), SCM_EXTENSION, "1.0", SCMExtension.REQUEST_LATEST_REVISION, requestBody); verify(jsonMessageHandler).requestMessageForLatestRevision(scmPropertyConfiguration, materialData, flyweight); verify(jsonMessageHandler).responseMessageForLatestRevision(responseBody); assertSame(response, deserializedResponse); }
@Override public Collection<LocalDataQueryResultRow> getRows(final ShowComputeNodesStatement sqlStatement, final ContextManager contextManager) { String modeType = contextManager.getComputeNodeInstanceContext().getModeConfiguration().getType(); return "Standalone".equals(modeType) ? Collections.singleton(buildRow(contextManager.getComputeNodeInstanceContext().getInstance(), modeType)) : contextManager.getComputeNodeInstanceContext().getAllClusterInstances().stream().map(each -> buildRow(each, modeType)).collect(Collectors.toList()); }
@Test void assertExecuteWithStandaloneMode() { ShowComputeNodesExecutor executor = new ShowComputeNodesExecutor(); ContextManager contextManager = mock(ContextManager.class); ComputeNodeInstanceContext computeNodeInstanceContext = createStandaloneInstanceContext(); when(contextManager.getComputeNodeInstanceContext()).thenReturn(computeNodeInstanceContext); Collection<LocalDataQueryResultRow> actual = executor.getRows(mock(ShowComputeNodesStatement.class), contextManager); assertThat(actual.size(), is(1)); LocalDataQueryResultRow row = actual.iterator().next(); assertThat(row.getCell(1), is("foo")); assertThat(row.getCell(2), is("PROXY")); assertThat(row.getCell(3), is("127.0.0.1")); assertThat(row.getCell(4), is("3308")); assertThat(row.getCell(5), is("OK")); assertThat(row.getCell(6), is("Standalone")); assertThat(row.getCell(7), is("0")); assertThat(row.getCell(8), is("")); assertThat(row.getCell(9), is("foo_version")); }
public void writeEncodedValue(EncodedValue encodedValue) throws IOException { switch (encodedValue.getValueType()) { case ValueType.BOOLEAN: writeBooleanEncodedValue((BooleanEncodedValue) encodedValue); break; case ValueType.BYTE: writeIntegralValue(((ByteEncodedValue) encodedValue).getValue(), 't'); break; case ValueType.CHAR: writeCharEncodedValue((CharEncodedValue) encodedValue); break; case ValueType.SHORT: writeIntegralValue(((ShortEncodedValue) encodedValue).getValue(), 's'); break; case ValueType.INT: writeIntegralValue(((IntEncodedValue) encodedValue).getValue(), null); break; case ValueType.LONG: writeIntegralValue(((LongEncodedValue)encodedValue).getValue(), 'L'); break; case ValueType.FLOAT: writeFloatEncodedValue((FloatEncodedValue) encodedValue); break; case ValueType.DOUBLE: writeDoubleEncodedValue((DoubleEncodedValue) encodedValue); break; case ValueType.ANNOTATION: writeAnnotation((AnnotationEncodedValue)encodedValue); break; case ValueType.ARRAY: writeArray((ArrayEncodedValue)encodedValue); break; case ValueType.STRING: writeQuotedString(((StringEncodedValue)encodedValue).getValue()); break; case ValueType.FIELD: writeFieldDescriptor(((FieldEncodedValue)encodedValue).getValue()); break; case ValueType.ENUM: writeEnum((EnumEncodedValue) encodedValue); break; case ValueType.METHOD: writeMethodDescriptor(((MethodEncodedValue)encodedValue).getValue()); break; case ValueType.TYPE: writeType(((TypeEncodedValue)encodedValue).getValue()); break; case ValueType.METHOD_TYPE: writeMethodProtoDescriptor(((MethodTypeEncodedValue)encodedValue).getValue()); break; case ValueType.METHOD_HANDLE: writeMethodHandle(((MethodHandleEncodedValue)encodedValue).getValue()); break; case ValueType.NULL: writer.write("null"); break; default: throw new IllegalArgumentException("Unknown encoded value type"); } }
@Test public void testWriteEncodedValue_type_withSpaces() throws IOException { BaksmaliWriter writer = new BaksmaliWriter(output); writer.writeEncodedValue(new ImmutableTypeEncodedValue("Ltest/type with spaces;")); Assert.assertEquals( "Ltest/`type with spaces`;", output.toString()); }
@Override public boolean alterOffsets(Map<String, String> config, Map<Map<String, ?>, Map<String, ?>> offsets) { for (Map.Entry<Map<String, ?>, Map<String, ?>> offsetEntry : offsets.entrySet()) { Map<String, ?> sourceOffset = offsetEntry.getValue(); if (sourceOffset == null) { // We allow tombstones for anything; if there's garbage in the offsets for the connector, we don't // want to prevent users from being able to clean it up using the REST API continue; } Map<String, ?> sourcePartition = offsetEntry.getKey(); if (sourcePartition == null) { throw new ConnectException("Source partitions may not be null"); } MirrorUtils.validateSourcePartitionString(sourcePartition, SOURCE_CLUSTER_ALIAS_KEY); MirrorUtils.validateSourcePartitionString(sourcePartition, TARGET_CLUSTER_ALIAS_KEY); MirrorUtils.validateSourceOffset(sourcePartition, sourceOffset, true); } // We don't actually use these offsets in the task class, so no additional effort is required beyond just validating // the format of the user-supplied offsets return true; }
@Test public void testAlterOffsetsMissingPartitionKey() { MirrorHeartbeatConnector connector = new MirrorHeartbeatConnector(); Function<Map<String, ?>, Boolean> alterOffsets = partition -> connector.alterOffsets(null, Collections.singletonMap( partition, SOURCE_OFFSET )); Map<String, ?> validPartition = sourcePartition("primary", "backup"); // Sanity check to make sure our valid partition is actually valid assertTrue(alterOffsets.apply(validPartition)); for (String key : Arrays.asList(SOURCE_CLUSTER_ALIAS_KEY, TARGET_CLUSTER_ALIAS_KEY)) { Map<String, ?> invalidPartition = new HashMap<>(validPartition); invalidPartition.remove(key); assertThrows(ConnectException.class, () -> alterOffsets.apply(invalidPartition)); } }
@Override public Validation validate(Validation val) { if (StringUtils.isBlank(systemEnvironment.getPropertyImpl("jetty.home"))) { systemEnvironment.setProperty("jetty.home", systemEnvironment.getPropertyImpl("user.dir")); } systemEnvironment.setProperty("jetty.base", systemEnvironment.getPropertyImpl("jetty.home")); File home = new File(systemEnvironment.getPropertyImpl("jetty.home")); File work = new File(systemEnvironment.getPropertyImpl("jetty.home"), "work"); if (home.exists()) { if (work.exists()) { try { FileUtils.deleteDirectory(work); } catch (IOException e) { String message = format("Error trying to remove Jetty working directory {0}: {1}", work.getAbsolutePath(), e); return val.addError(new RuntimeException(message)); } } work.mkdir(); } return Validation.SUCCESS; }
@Test public void shouldSetJettyBaseToValueOfJettyHome() { when(systemEnvironment.getPropertyImpl("jetty.home")).thenReturn("foo"); Validation val = new Validation(); jettyWorkDirValidator.validate(val); assertThat(val.isSuccessful(), is(true)); verify(systemEnvironment).setProperty("jetty.base", "foo"); }
@VisibleForTesting public static JobGraph createJobGraph(StreamGraph streamGraph) { return new StreamingJobGraphGenerator( Thread.currentThread().getContextClassLoader(), streamGraph, null, Runnable::run) .createJobGraph(); }
@Test void testOperatorCoordinatorAddedToJobVertex() { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Integer> stream = env.fromSource( new MockSource(Boundedness.BOUNDED, 1), WatermarkStrategy.noWatermarks(), "TestingSource"); OneInputTransformation<Integer, Integer> resultTransform = new OneInputTransformation<Integer, Integer>( stream.getTransformation(), "AnyName", new CoordinatedTransformOperatorFactory(), BasicTypeInfo.INT_TYPE_INFO, env.getParallelism()); new TestingSingleOutputStreamOperator<>(env, resultTransform).print(); JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph()); assertThat(jobGraph.getVerticesAsArray()[0].getOperatorCoordinators()).hasSize(2); }
public static String calculateTypeName(CompilationUnit compilationUnit, FullyQualifiedJavaType fqjt) { if (fqjt.isArray()) { // if array, then calculate the name of the base (non-array) type // then add the array indicators back in String fqn = fqjt.getFullyQualifiedName(); String typeName = calculateTypeName(compilationUnit, new FullyQualifiedJavaType(fqn.substring(0, fqn.indexOf('[')))); return typeName + fqn.substring(fqn.indexOf('[')); } if (!fqjt.getTypeArguments().isEmpty()) { return calculateParameterizedTypeName(compilationUnit, fqjt); } if (compilationUnit == null || typeDoesNotRequireImport(fqjt) || typeIsInSamePackage(compilationUnit, fqjt) || typeIsAlreadyImported(compilationUnit, fqjt)) { return fqjt.getShortName(); } else { return fqjt.getFullyQualifiedName(); } }
@Test void testGenericTypeWithWildCard() { Interface interfaze = new Interface(new FullyQualifiedJavaType("com.foo.UserMapper")); interfaze.addImportedType(new FullyQualifiedJavaType("java.util.Map")); interfaze.addImportedType(new FullyQualifiedJavaType("java.util.List")); interfaze.addImportedType(new FullyQualifiedJavaType("java.math.BigDecimal")); FullyQualifiedJavaType fqjt = new FullyQualifiedJavaType("java.util.Map<java.math.BigDecimal, java.util.List<?>>"); assertEquals("Map<BigDecimal, List<?>>", JavaDomUtils.calculateTypeName(interfaze, fqjt)); }
@Override public TenantDO getTenantByWebsite(String website) { return tenantMapper.selectByWebsite(website); }
@Test public void testGetTenantByWebsite() { // mock 数据 TenantDO dbTenant = randomPojo(TenantDO.class, o -> o.setWebsite("https://www.iocoder.cn")); tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据 // 调用 TenantDO result = tenantService.getTenantByWebsite("https://www.iocoder.cn"); // 校验存在 assertPojoEquals(result, dbTenant); }
public synchronized void reset() { end = 0; total = 0; start = System.currentTimeMillis(); }
@Test public void reset() { Counter tt = new Counter(); tt.add(123L); assertEquals("incorrect number of bytes", 123L, tt.total()); double d = tt.duration(); double t = tt.throughput(); assertEquals("incorrect duration", d, tt.duration(), 0.0001); assertEquals("incorrect throughput", t, tt.throughput(), 0.0001); assertEquals("incorrect number of bytes", 123L, tt.total()); tt.reset(); assertEquals("incorrect throughput", 0.0, tt.throughput(), 0.0001); assertEquals("incorrect number of bytes", 0, tt.total()); }
public static String getPoolContent(ThreadPoolParameter parameter) { ThreadPoolParameterInfo threadPoolParameterInfo = new ThreadPoolParameterInfo(); threadPoolParameterInfo.setTenantId(parameter.getTenantId()) .setItemId(parameter.getItemId()) .setTpId(parameter.getTpId()) .setCoreSize(parameter.getCoreSize()) .setMaxSize(parameter.getMaxSize()) .setQueueType(parameter.getQueueType()) .setCapacity(parameter.getCapacity()) .setKeepAliveTime(parameter.getKeepAliveTime()) .setExecuteTimeOut(parameter.getExecuteTimeOut()) .setIsAlarm(parameter.getIsAlarm()) .setCapacityAlarm(parameter.getCapacityAlarm()) .setLivenessAlarm(parameter.getLivenessAlarm()) .setAllowCoreThreadTimeOut(parameter.getAllowCoreThreadTimeOut()) .setRejectedType(parameter.getRejectedType()); return JSONUtil.toJSONString(threadPoolParameterInfo); }
@Test public void assertGetPoolContent() { String testText = "{\"tenantId\":\"prescription\",\"itemId\":\"dynamic-threadpool-example\",\"tpId\":" + "\"message-consume\",\"queueType\":1,\"capacity\":4,\"keepAliveTime\":513,\"rejectedType\":4,\"isAlarm\"" + ":1,\"capacityAlarm\":80,\"livenessAlarm\":80,\"allowCoreThreadTimeOut\":1}"; ThreadPoolParameterInfo threadPoolParameterInfo = ThreadPoolParameterInfo.builder().tenantId("prescription") .itemId("dynamic-threadpool-example").tpId("message-consume").content("描述信息").corePoolSize(1) .maximumPoolSize(2).queueType(1).capacity(4).keepAliveTime(513L).executeTimeOut(null).rejectedType(4) .isAlarm(1).capacityAlarm(80).livenessAlarm(80).allowCoreThreadTimeOut(1).build(); Assert.isTrue(testText.equals(ContentUtil.getPoolContent(threadPoolParameterInfo))); }
public AccountDataResult getAccountData(long accountId) { Future<Map<String, Object>> accountDataFuture = accountClient.getAccountData(accountId); Future<Integer> unreadNotificationsFuture = notificationService.asyncUnreadNotificationCount(accountId); AccountDataResult result; try { result = objectMapper.convertValue(accountDataFuture.get(), AccountDataResult.class); } catch (IllegalArgumentException | InterruptedException | ExecutionException e) { throw new AccountRuntimeException("TODO", e); } try { result.setUnreadNotifications(unreadNotificationsFuture.get()); } catch (InterruptedException | ExecutionException e) { throw new AccountRuntimeException("TODO", e); } return result; }
@Test public void testGetAccountData() { Map<String, Object> result = Map.of( "status", "OK", "error", "custom error", "email_status", "VERIFIED", "setting_2_factor", "true", "classified_deceased", "true"); when(accountClient.getAccountData(anyLong())).thenReturn(new AsyncResult<Map<String, Object>>(result)); when(notificationService.asyncUnreadNotificationCount(anyLong())).thenReturn(new AsyncResult<Integer>(1)); AccountDataResult accountData = accountService.getAccountData(1); assertEquals(Status.OK, accountData.getStatus()); assertEquals("custom error", accountData.getError()); assertEquals(1, accountData.getUnreadNotifications()); assertEquals(true, accountData.getClassifiedDeceased()); assertEquals(EmailStatus.VERIFIED, accountData.getEmailStatus()); assertEquals(true, accountData.getSetting2Factor()); }
public static void unTar(InputStream inputStream, File untarDir, boolean gzipped) throws IOException, InterruptedException, ExecutionException { if (!untarDir.mkdirs()) { if (!untarDir.isDirectory()) { throw new IOException("Mkdirs failed to create " + untarDir); } } if(Shell.WINDOWS) { // Tar is not native to Windows. Use simple Java based implementation for // tests and simple tar archives unTarUsingJava(inputStream, untarDir, gzipped); } else { // spawn tar utility to untar archive for full fledged unix behavior such // as resolving symlinks in tar archives unTarUsingTar(inputStream, untarDir, gzipped); } }
@Test (timeout = 30000) public void testUntar() throws IOException { String tarGzFileName = System.getProperty("test.cache.data", "target/test/cache") + "/test-untar.tgz"; String tarFileName = System.getProperty("test.cache.data", "build/test/cache") + "/test-untar.tar"; File dataDir = GenericTestUtils.getTestDir(); File untarDir = new File(dataDir, "untarDir"); doUntarAndVerify(new File(tarGzFileName), untarDir); doUntarAndVerify(new File(tarFileName), untarDir); }
@Override public void shutdown() throws NacosException { String className = this.getClass().getName(); NAMING_LOGGER.info("{} do shutdown begin", className); if (null != refreshServerListExecutor) { ThreadUtils.shutdownThreadPool(refreshServerListExecutor, NAMING_LOGGER); } NamingHttpClientManager.getInstance().shutdown(); NAMING_LOGGER.info("{} do shutdown stop", className); }
@Test void testShutdown() { Properties properties = new Properties(); properties.put(PropertyKeyConst.SERVER_ADDR, "127.0.0.1:8848"); final ServerListManager serverListManager = new ServerListManager(properties); Assertions.assertDoesNotThrow(() -> { serverListManager.shutdown(); }); }
@Override public double getEle(double lat, double lon) { // Sometimes the cgiar data north of 59.999 equals 0 if (lat < 59.999 && lat > -56) { return srtmProvider.getEle(lat, lon); } return globalProvider.getEle(lat, lon); }
@Test public void testGetEleMocked() { instance = new MultiSourceElevationProvider( new CGIARProvider() { @Override public double getEle(double lat, double lon) { return 1; } }, new GMTEDProvider() { @Override public double getEle(double lat, double lon) { return 2; } } ); assertEquals(1, instance.getEle(0, 0), .1); assertEquals(2, instance.getEle(60.0001, 0), .1); assertEquals(2, instance.getEle(-56.0001, 0), .1); }
@VisibleForTesting static CPUResource getDefaultCpus(Configuration configuration) { double fallback = configuration.get(KubernetesConfigOptions.TASK_MANAGER_CPU); return TaskExecutorProcessUtils.getCpuCoresWithFallback(configuration, fallback); }
@Test void testGetCpuCoresNumSlots() { final Configuration configuration = new Configuration(); configuration.set(TaskManagerOptions.NUM_TASK_SLOTS, 3); assertThat(KubernetesWorkerResourceSpecFactory.getDefaultCpus(configuration)) .isEqualTo(new CPUResource(3.0)); }
@Override public List<MethodMetadata> parseAndValidateMetadata(Class<?> targetType) { List<MethodMetadata> methodsMetadata = this.delegate.parseAndValidateMetadata(targetType); for (final MethodMetadata metadata : methodsMetadata) { final Type type = metadata.returnType(); if (!isReactive(type)) { throw new IllegalArgumentException(String.format( "Method %s of contract %s doesn't returns a org.reactivestreams.Publisher", metadata.configKey(), targetType.getSimpleName())); } /* * we will need to change the return type of the method to match the return type contained * within the Publisher */ Type[] actualTypes = ((ParameterizedType) type).getActualTypeArguments(); if (actualTypes.length > 1) { throw new IllegalStateException("Expected only one contained type."); } else { Class<?> actual = Types.getRawType(actualTypes[0]); if (Stream.class.isAssignableFrom(actual)) { throw new IllegalArgumentException( "Streams are not supported when using Reactive Wrappers"); } metadata.returnType(type); } } return methodsMetadata; }
@Test void reactivexTypes() { Contract contract = new ReactiveDelegatingContract(new Contract.Default()); contract.parseAndValidateMetadata(TestReactiveXService.class); }
public void formatTo(DataTable table, StringBuilder appendable) { try { formatTo(table, (Appendable) appendable); } catch (IOException e) { throw new CucumberDataTableException(e.getMessage(), e); } }
@Test void should_print_to_appendable() throws IOException { DataTable table = tableOf("hello"); Appendable appendable = new StringBuilder(); formatter.formatTo(table, appendable); assertEquals("| hello |\n", appendable.toString()); }
public static TableIdentifier of(String... names) { Preconditions.checkArgument(names != null, "Cannot create table identifier from null array"); Preconditions.checkArgument( names.length > 0, "Cannot create table identifier without a table name"); return new TableIdentifier( Namespace.of(Arrays.copyOf(names, names.length - 1)), names[names.length - 1]); }
@Test public void testInvalidTableName() { assertThatThrownBy(() -> TableIdentifier.of(Namespace.empty(), "")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Invalid table name: null or empty"); assertThatThrownBy(() -> TableIdentifier.of(Namespace.empty(), null)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Invalid table name: null or empty"); }
public void setSimpleLoadBalancerState(SimpleLoadBalancerState state) { _watcherManager.updateWatcher(state, this::doRegisterLoadBalancerState); doRegisterLoadBalancerState(state, null); state.register(new SimpleLoadBalancerStateListener() { @Override public void onStrategyAdded(String serviceName, String scheme, LoadBalancerStrategy strategy) { _watcherManager.updateWatcher(serviceName, scheme, strategy, (item, mode) -> doRegisterLoadBalancerStrategy(serviceName, scheme, item, mode)); doRegisterLoadBalancerStrategy(serviceName, scheme, strategy, null); } @Override public void onStrategyRemoved(String serviceName, String scheme, LoadBalancerStrategy strategy) { _watcherManager.removeWatcherForLoadBalancerStrategy(serviceName, scheme); _jmxManager.unregister(getLoadBalancerStrategyJmxName(serviceName, scheme, null)); } @Override public void onClientAdded(String clusterName, TrackerClient client) { // We currently think we can make this no-op as the info provided is not helpful // _jmxManager.checkReg(new DegraderControl((DegraderImpl) client.getDegrader(DefaultPartitionAccessor.DEFAULT_PARTITION_ID)), // _prefix + "-" + clusterName + "-" + client.getUri().toString().replace("://", "-") + "-TrackerClient-Degrader"); } @Override public void onClientRemoved(String clusterName, TrackerClient client) { // We currently think we can make this no-op as the info provided is not helpful // _jmxManager.unregister(_prefix + "-" + clusterName + "-" + client.getUri().toString().replace("://", "-") + "-TrackerClient-Degrader"); } @Override public void onClusterInfoUpdate(ClusterInfoItem clusterInfoItem) { if (clusterInfoItem != null && clusterInfoItem.getClusterPropertiesItem() != null && clusterInfoItem.getClusterPropertiesItem().getProperty() != null) { String clusterName = clusterInfoItem.getClusterPropertiesItem().getProperty().getClusterName(); _watcherManager.updateWatcher(clusterName, clusterInfoItem, (item, mode) -> doRegisterClusterInfo(clusterName, item, mode)); doRegisterClusterInfo(clusterName, clusterInfoItem, null); } } @Override public void onClusterInfoRemoval(ClusterInfoItem clusterInfoItem) { if (clusterInfoItem != null && clusterInfoItem.getClusterPropertiesItem() != null && clusterInfoItem.getClusterPropertiesItem().getProperty() != null) { String clusterName = clusterInfoItem.getClusterPropertiesItem().getProperty().getClusterName(); _watcherManager.removeWatcherForClusterInfoItem(clusterName); _jmxManager.unregister(getClusterInfoJmxName(clusterName, null)); } } @Override public void onServicePropertiesUpdate(LoadBalancerStateItem<ServiceProperties> serviceProperties) { if (serviceProperties != null && serviceProperties.getProperty() != null) { String serviceName = serviceProperties.getProperty().getServiceName(); _watcherManager.updateWatcher(serviceName, serviceProperties, (item, mode) -> doRegisterServiceProperties(serviceName, item, mode)); doRegisterServiceProperties(serviceName, serviceProperties, null); } } @Override public void onServicePropertiesRemoval(LoadBalancerStateItem<ServiceProperties> serviceProperties) { if (serviceProperties != null && serviceProperties.getProperty() != null) { String serviceName = serviceProperties.getProperty().getServiceName(); _watcherManager.removeWatcherForServiceProperties(serviceName); _jmxManager.unregister(getServicePropertiesJmxName(serviceName, null)); } } private void doRegisterLoadBalancerStrategy(String serviceName, String scheme, LoadBalancerStrategy strategy, @Nullable DualReadModeProvider.DualReadMode mode) { String jmxName = getLoadBalancerStrategyJmxName(serviceName, scheme, mode); _jmxManager.registerLoadBalancerStrategy(jmxName, strategy); } private void doRegisterClusterInfo(String clusterName, ClusterInfoItem clusterInfoItem, @Nullable DualReadModeProvider.DualReadMode mode) { String jmxName = getClusterInfoJmxName(clusterName, mode); _jmxManager.registerClusterInfo(jmxName, clusterInfoItem); } private void doRegisterServiceProperties(String serviceName, LoadBalancerStateItem<ServiceProperties> serviceProperties, @Nullable DualReadModeProvider.DualReadMode mode) { _jmxManager.registerServiceProperties(getServicePropertiesJmxName(serviceName, mode), serviceProperties); } private String getClusterInfoJmxName(String clusterName, @Nullable DualReadModeProvider.DualReadMode mode) { return String.format("%s%s-ClusterInfo", getClusterPrefixForLBPropertyJmxNames(clusterName, mode), clusterName); } private String getServicePropertiesJmxName(String serviceName, @Nullable DualReadModeProvider.DualReadMode mode) { return String.format("%s%s-ServiceProperties", getServicePrefixForLBPropertyJmxNames(serviceName, mode), serviceName); } private String getLoadBalancerStrategyJmxName(String serviceName, String scheme, @Nullable DualReadModeProvider.DualReadMode mode) { return String.format("%s%s-%s-LoadBalancerStrategy", getServicePrefixForLBPropertyJmxNames(serviceName, mode), serviceName, scheme); } }); }
@Test(dataProvider = "nonDualReadD2ClientJmxManagers") public void testSetSimpleLBStateListenerRemoveServiceProperties(String prefix, D2ClientJmxManager.DiscoverySourceType sourceType, Boolean isDualReadLB) { D2ClientJmxManagerFixture fixture = new D2ClientJmxManagerFixture(); D2ClientJmxManager d2ClientJmxManager = fixture.getD2ClientJmxManager(prefix, sourceType, isDualReadLB); d2ClientJmxManager.setSimpleLoadBalancerState(fixture._simpleLoadBalancerState); Assert.assertEquals(fixture._simpleLoadBalancerStateNameCaptor.getValue(), "Foo-LoadBalancerState"); Assert.assertEquals(fixture._simpleLoadBalancerStateCaptor.getValue(), fixture._simpleLoadBalancerState); fixture._simpleLoadBalancerStateListenerCaptor.getValue().onServicePropertiesRemoval(null); Mockito.verify(fixture._jmxManager, never()).unregister(anyString()); fixture._simpleLoadBalancerStateListenerCaptor.getValue().onServicePropertiesRemoval(NO_PROPERTY_LB_STATE_ITEM); Mockito.verify(fixture._jmxManager, never()).unregister(anyString()); fixture._simpleLoadBalancerStateListenerCaptor.getValue().onServicePropertiesRemoval( SERVICE_PROPERTIES_LOAD_BALANCER_STATE_ITEM); Assert.assertEquals( fixture._unregisteredObjectNameCaptor.getValue(), SERVICE_PROPERTIES_LOAD_BALANCER_STATE_ITEM.getProperty().getServiceName() + "-ServiceProperties"); }
public void shutdown() throws InterruptedException { initiateShutdown(); awaitShutdown(); }
@Test public void testShutdownWhenCalledAfterThreadStart() throws InterruptedException { AtomicReference<Optional<Integer>> statusCodeOption = new AtomicReference<>(Optional.empty()); Exit.setExitProcedure((statusCode, ignored) -> { statusCodeOption.set(Optional.of(statusCode)); // Sleep until interrupted to emulate the fact that `System.exit()` never returns Utils.sleep(Long.MAX_VALUE); throw new AssertionError(); }); CountDownLatch latch = new CountDownLatch(1); ShutdownableThread thread = new ShutdownableThread("shutdownable-thread-test") { @Override public void doWork() { latch.countDown(); throw new FatalExitError(); } }; thread.start(); assertTrue(latch.await(10, TimeUnit.SECONDS), "doWork was not invoked"); thread.shutdown(); TestUtils.waitForCondition(() -> statusCodeOption.get().isPresent(), "Status code was not set by exit procedure"); assertEquals(1, statusCodeOption.get().get()); }
@SuppressWarnings("deprecation") static Object[] buildArgs(final Object[] positionalArguments, final ResourceMethodDescriptor resourceMethod, final ServerResourceContext context, final DynamicRecordTemplate template, final ResourceMethodConfig resourceMethodConfig) { List<Parameter<?>> parameters = resourceMethod.getParameters(); Object[] arguments = Arrays.copyOf(positionalArguments, parameters.size()); fixUpComplexKeySingletonArraysInArguments(arguments); boolean attachmentsDesired = false; for (int i = positionalArguments.length; i < parameters.size(); ++i) { Parameter<?> param = parameters.get(i); try { if (param.getParamType() == Parameter.ParamType.KEY || param.getParamType() == Parameter.ParamType.ASSOC_KEY_PARAM) { Object value = context.getPathKeys().get(param.getName()); if (value != null) { arguments[i] = value; continue; } } else if (param.getParamType() == Parameter.ParamType.CALLBACK) { continue; } else if (param.getParamType() == Parameter.ParamType.PARSEQ_CONTEXT_PARAM || param.getParamType() == Parameter.ParamType.PARSEQ_CONTEXT) { continue; // don't know what to fill in yet } else if (param.getParamType() == Parameter.ParamType.HEADER) { HeaderParam headerParam = param.getAnnotations().get(HeaderParam.class); String value = context.getRequestHeaders().get(headerParam.value()); arguments[i] = value; continue; } //Since we have multiple different types of MaskTrees that can be passed into resource methods, //we must evaluate based on the param type (annotation used) else if (param.getParamType() == Parameter.ParamType.PROJECTION || param.getParamType() == Parameter.ParamType.PROJECTION_PARAM) { arguments[i] = context.getProjectionMask(); continue; } else if (param.getParamType() == Parameter.ParamType.METADATA_PROJECTION_PARAM) { arguments[i] = context.getMetadataProjectionMask(); continue; } else if (param.getParamType() == Parameter.ParamType.PAGING_PROJECTION_PARAM) { arguments[i] = context.getPagingProjectionMask(); continue; } else if (param.getParamType() == Parameter.ParamType.CONTEXT || param.getParamType() == Parameter.ParamType.PAGING_CONTEXT_PARAM) { PagingContext ctx = RestUtils.getPagingContext(context, (PagingContext) param.getDefaultValue()); arguments[i] = ctx; continue; } else if (param.getParamType() == Parameter.ParamType.PATH_KEYS || param.getParamType() == Parameter.ParamType.PATH_KEYS_PARAM) { arguments[i] = context.getPathKeys(); continue; } else if (param.getParamType() == Parameter.ParamType.PATH_KEY_PARAM) { Object value = context.getPathKeys().get(param.getName()); if (value != null) { arguments[i] = value; continue; } } else if (param.getParamType() == Parameter.ParamType.RESOURCE_CONTEXT || param.getParamType() == Parameter.ParamType.RESOURCE_CONTEXT_PARAM) { arguments[i] = context; continue; } else if (param.getParamType() == Parameter.ParamType.VALIDATOR_PARAM) { RestLiDataValidator validator = new RestLiDataValidator(resourceMethod.getResourceModel().getResourceClass().getAnnotations(), resourceMethod.getResourceModel().getValueClass(), resourceMethod.getMethodType()); arguments[i] = validator; continue; } else if (param.getParamType() == Parameter.ParamType.RESTLI_ATTACHMENTS_PARAM) { arguments[i] = context.getRequestAttachmentReader(); attachmentsDesired = true; continue; } else if (param.getParamType() == Parameter.ParamType.UNSTRUCTURED_DATA_WRITER_PARAM) { // The OutputStream is passed to the resource implementation in a synchronous call. Upon return of the // resource method, all the bytes would haven't written to the OutputStream. The EntityStream would have // contained all the bytes by the time data is requested. The ownership of the OutputStream is passed to // the ByteArrayOutputStreamWriter, which is responsible of closing the OutputStream if necessary. ByteArrayOutputStream out = new ByteArrayOutputStream(); context.setResponseEntityStream(EntityStreams.newEntityStream(new ByteArrayOutputStreamWriter(out))); arguments[i] = new UnstructuredDataWriter(out, context); continue; } else if (param.getParamType() == Parameter.ParamType.UNSTRUCTURED_DATA_REACTIVE_READER_PARAM) { arguments[i] = new UnstructuredDataReactiveReader(context.getRequestEntityStream(), context.getRawRequest().getHeader(RestConstants.HEADER_CONTENT_TYPE)); continue; } else if (param.getParamType() == Parameter.ParamType.POST) { // handle action parameters if (template != null) { DataMap data = template.data(); if (data.containsKey(param.getName())) { arguments[i] = template.getValue(param); continue; } } } else if (param.getParamType() == Parameter.ParamType.QUERY) { Object value; if (DataTemplate.class.isAssignableFrom(param.getType())) { value = buildDataTemplateArgument(context.getStructuredParameter(param.getName()), param, resourceMethodConfig.shouldValidateQueryParams()); } else { value = buildRegularArgument(context, param, resourceMethodConfig.shouldValidateQueryParams()); } if (value != null) { arguments[i] = value; continue; } } else if (param.getParamType() == Parameter.ParamType.BATCH || param.getParamType() == Parameter.ParamType.RESOURCE_KEY) { // should not come to this routine since it should be handled by passing in positionalArguments throw new RoutingException("Parameter '" + param.getName() + "' should be passed in as a positional argument", HttpStatus.S_400_BAD_REQUEST.getCode()); } else { // unknown param type throw new RoutingException( "Parameter '" + param.getName() + "' has an unknown parameter type '" + param.getParamType().name() + "'", HttpStatus.S_400_BAD_REQUEST.getCode()); } } catch (TemplateRuntimeException e) { throw new RoutingException("Parameter '" + param.getName() + "' is invalid", HttpStatus.S_400_BAD_REQUEST.getCode()); } try { // Handling null-valued parameters not provided in resource context or entity body // check if it is optional parameter if (param.isOptional() && param.hasDefaultValue()) { arguments[i] = param.getDefaultValue(); } else if (param.isOptional() && !param.getType().isPrimitive()) { // optional primitive parameter must have default value or provided arguments[i] = null; } else { throw new RoutingException("Parameter '" + param.getName() + "' is required", HttpStatus.S_400_BAD_REQUEST.getCode()); } } catch (ResourceConfigException e) { // Parameter default value format exception should result in server error code 500. throw new RestLiServiceException(HttpStatus.S_500_INTERNAL_SERVER_ERROR, "Parameter '" + param.getName() + "' default value is invalid", e); } } //Verify that if the resource method did not expect attachments, and attachments were present, that we drain all //incoming attachments and send back a bad request. We must take precaution here since simply ignoring the request //attachments is not correct behavior here. Ignoring other request level constructs such as headers or query parameters //that were not needed is safe, but not for request attachments. if (!attachmentsDesired && context.getRequestAttachmentReader() != null) { throw new RestLiServiceException(HttpStatus.S_400_BAD_REQUEST, "Resource method endpoint invoked does not accept any request attachments."); } return arguments; }
@Test public void testRestLiAttachmentsParam() { ServerResourceContext mockResourceContext = EasyMock.createMock(ServerResourceContext.class); final RestLiAttachmentReader restLiAttachmentReader = new RestLiAttachmentReader(null); EasyMock.expect(mockResourceContext.getRequestAttachmentReader()).andReturn(restLiAttachmentReader); EasyMock.replay(mockResourceContext); @SuppressWarnings({"unchecked","rawtypes"}) final Parameter<RestLiAttachmentReader> param = new Parameter("RestLi Attachment Reader", RestLiAttachmentReader.class, null, false, null, Parameter.ParamType.RESTLI_ATTACHMENTS_PARAM, false, AnnotationSet.EMPTY); List<Parameter<?>> parameters = Collections.singletonList(param); Object[] results = ArgumentBuilder.buildArgs(new Object[0], getMockResourceMethod(parameters), mockResourceContext, null, getMockResourceMethodConfig(false)); Assert.assertEquals(results[0], restLiAttachmentReader); }
@Override public long getWorkerCount() { return asyncExecutionMonitoring.getWorkerCount(); }
@Test public void getWorkerCount_delegates_to_AsyncExecutionMonitoring() { when(asyncExecutionMonitoring.getWorkerCount()).thenReturn(12); assertThat(underTest.getWorkerCount()).isEqualTo(12); verify(asyncExecutionMonitoring).getWorkerCount(); }
public BrokerSetResolutionHelper(Cluster cluster, BrokerSetResolver brokerSetResolver) throws BrokerSetResolutionException { initializeBrokerSetMappings(getRackIdByBrokerIdMapping(cluster), brokerSetResolver); }
@Test public void testBrokerSetResolutionHelper() throws BrokerSetResolutionException { ClusterModel clusterModel = DeterministicCluster.brokerSetSatisfiable1(); Map<String, Set<Integer>> testBrokerSetMapping = Map.of("BS1", Set.of(0), "BS2", Set.of(1, 2), "BS3", Set.of(3, 4), "BS4", Set.of(5)); BrokerSetResolver brokerSetResolver = EasyMock.createNiceMock(BrokerSetResolver.class); EasyMock.expect(brokerSetResolver.brokerIdsByBrokerSetId(BrokerSetResolutionHelper.getRackIdByBrokerIdMapping(clusterModel))) .andReturn(testBrokerSetMapping); EasyMock.replay(brokerSetResolver); BrokerSetResolutionHelper brokerSetResolutionHelper = new BrokerSetResolutionHelper(clusterModel, brokerSetResolver); assertEquals("BS1", brokerSetResolutionHelper.brokerSetId(0)); assertEquals("BS2", brokerSetResolutionHelper.brokerSetId(1)); assertEquals("BS2", brokerSetResolutionHelper.brokerSetId(2)); assertEquals("BS3", brokerSetResolutionHelper.brokerSetId(3)); assertEquals("BS3", brokerSetResolutionHelper.brokerSetId(4)); assertEquals("BS4", brokerSetResolutionHelper.brokerSetId(5)); }
@ExecuteOn(TaskExecutors.IO) @Get(uri = "/search") @Operation(tags = {"Triggers"}, summary = "Search for triggers") public PagedResults<Triggers> search( @Parameter(description = "The current page") @QueryValue(defaultValue = "1") int page, @Parameter(description = "The current page size") @QueryValue(defaultValue = "10") int size, @Parameter(description = "The sort of current page") @Nullable @QueryValue List<String> sort, @Parameter(description = "A string filter") @Nullable @QueryValue(value = "q") String query, @Parameter(description = "A namespace filter prefix") @Nullable @QueryValue String namespace, @Parameter(description = "The identifier of the worker currently evaluating the trigger") @Nullable @QueryValue String workerId ) throws HttpStatusException { ArrayListTotal<Trigger> triggerContexts = triggerRepository.find( PageableUtils.from(page, size, sort, triggerRepository.sortMapping()), query, tenantService.resolveTenant(), namespace, null, workerId ); List<Triggers> triggers = new ArrayList<>(); triggerContexts.forEach(tc -> { Optional<Flow> flow = flowRepository.findById(tc.getTenantId(), tc.getNamespace(), tc.getFlowId()); if (flow.isEmpty()) { // Warn instead of throwing to avoid blocking the trigger UI log.warn(String.format("Flow %s not found for trigger %s", tc.getFlowId(), tc.getTriggerId())); triggers.add(Triggers.builder() .abstractTrigger(null) .triggerContext(tc) .build() ); return; } if (flow.get().getTriggers() == null) { // a trigger was removed from the flow but still in the trigger table return; } AbstractTrigger abstractTrigger = flow.get().getTriggers().stream().filter(t -> t.getId().equals(tc.getTriggerId())).findFirst().orElse(null); if (abstractTrigger == null) { // Warn instead of throwing to avoid blocking the trigger UI log.warn(String.format("Flow %s has no trigger %s", tc.getFlowId(), tc.getTriggerId())); } triggers.add(Triggers.builder() .abstractTrigger(abstractTrigger) .triggerContext(tc) .build() ); }); return PagedResults.of(new ArrayListTotal<>(triggers, triggerContexts.getTotal())); }
@SuppressWarnings("unchecked") @Test void search() { String triggerFlowId = "schedule-trigger-search"; String triggerNamespace = "io.kestra.tests.schedule"; Flow flow = generateFlow(triggerFlowId); jdbcFlowRepository.create(flow, flow.generateSource(), flow); Trigger trigger = Trigger.builder() .flowId(triggerFlowId) .namespace(triggerNamespace) .triggerId("trigger-nextexec-schedule") .date(ZonedDateTime.now()) .build(); jdbcTriggerRepository.save(trigger); jdbcTriggerRepository.save(trigger.toBuilder().triggerId("trigger-nextexec-polling").build()); PagedResults<TriggerController.Triggers> triggers = client.toBlocking().retrieve(HttpRequest.GET("/api/v1/triggers/search?q=schedule-trigger-search&namespace=io.kestra.tests&sort=triggerId:asc"), Argument.of(PagedResults.class, TriggerController.Triggers.class)); assertThat(triggers.getTotal(), greaterThanOrEqualTo(2L)); assertThat(triggers.getResults().stream().map(TriggerController.Triggers::getTriggerContext).toList(), Matchers.hasItems( allOf( hasProperty("triggerId", is("trigger-nextexec-schedule")), hasProperty("namespace", is(triggerNamespace)), hasProperty("flowId", is(triggerFlowId)) ), allOf( hasProperty("triggerId", is("trigger-nextexec-polling")), hasProperty("namespace", is(triggerNamespace)), hasProperty("flowId", is(triggerFlowId)) ) ) ); }
public FEELFnResult<String> invoke(@ParameterName("string") String string) { if ( string == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "string", "cannot be null")); } else { return FEELFnResult.ofResult( string.toUpperCase() ); } }
@Test void invokeLowercaseString() { FunctionTestUtil.assertResult(stringUpperCaseFunction.invoke("teststring"), "TESTSTRING"); }
@Udf(description = "Returns the inverse (arc) sine of an INT value") public Double asin( @UdfParameter( value = "value", description = "The value to get the inverse sine of." ) final Integer value ) { return asin(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleMoreThanPositiveOne() { assertThat(Double.isNaN(udf.asin(1.1)), is(true)); assertThat(Double.isNaN(udf.asin(6.0)), is(true)); assertThat(Double.isNaN(udf.asin(2)), is(true)); assertThat(Double.isNaN(udf.asin(2L)), is(true)); }
public static String escapeWithDoubleQuote(String value) { return "\"" + WINDOWS_DOUBLE_QUOTE_ESCAPER.escape(value) + "\""; }
@Test void testEscapeDynamicPropertyValueWithDoubleQuote() { final String value1 = "#a,b&c^d*e@f(g!h"; assertThat(BootstrapTools.escapeWithDoubleQuote(value1)) .isEqualTo("\"#a,b&c\"^^\"d*e@f(g!h\""); final String value2 = "foo\"bar'"; assertThat(BootstrapTools.escapeWithDoubleQuote(value2)).isEqualTo("\"foo\\\"bar'\""); final String value3 = "\"foo\" \"bar\""; assertThat(BootstrapTools.escapeWithDoubleQuote(value3)) .isEqualTo("\"\\\"foo\\\" \\\"bar\\\"\""); }
public static String extractQuery(String uri) { if (uri == null) { return null; } return StringHelper.after(uri, "?"); }
@Test public void testExtractQuery() { assertNull(URISupport.extractQuery(null)); assertNull(URISupport.extractQuery("")); assertNull(URISupport.extractQuery("file:foo")); assertEquals("recursive=true", URISupport.extractQuery("file:foo?recursive=true")); assertEquals("recursive=true&delete=true", URISupport.extractQuery("file:foo?recursive=true&delete=true")); }
public List<MavenArtifact> searchSha1(String sha1) throws IOException, TooManyRequestsException { if (null == sha1 || !sha1.matches("^[0-9A-Fa-f]{40}$")) { throw new IllegalArgumentException("Invalid SHA1 format"); } if (cache != null) { final List<MavenArtifact> cached = cache.get(sha1); if (cached != null) { LOGGER.debug("cache hit for Central: " + sha1); if (cached.isEmpty()) { throw new FileNotFoundException("Artifact not found in Central"); } return cached; } } final List<MavenArtifact> result = new ArrayList<>(); final URL url = new URL(String.format(query, rootURL, sha1)); LOGGER.trace("Searching Central url {}", url); // Determine if we need to use a proxy. The rules: // 1) If the proxy is set, AND the setting is set to true, use the proxy // 2) Otherwise, don't use the proxy (either the proxy isn't configured, // or proxy is specifically set to false) final URLConnectionFactory factory = new URLConnectionFactory(settings); final HttpURLConnection conn = factory.createHttpURLConnection(url, useProxy); conn.setDoOutput(true); // JSON would be more elegant, but there's not currently a dependency // on JSON, so don't want to add one just for this conn.addRequestProperty("Accept", "application/xml"); conn.connect(); if (conn.getResponseCode() == 200) { boolean missing = false; try { final DocumentBuilder builder = XmlUtils.buildSecureDocumentBuilder(); final Document doc = builder.parse(conn.getInputStream()); final XPath xpath = XPathFactory.newInstance().newXPath(); final String numFound = xpath.evaluate("/response/result/@numFound", doc); if ("0".equals(numFound)) { missing = true; } else { final NodeList docs = (NodeList) xpath.evaluate("/response/result/doc", doc, XPathConstants.NODESET); for (int i = 0; i < docs.getLength(); i++) { final String g = xpath.evaluate("./str[@name='g']", docs.item(i)); LOGGER.trace("GroupId: {}", g); final String a = xpath.evaluate("./str[@name='a']", docs.item(i)); LOGGER.trace("ArtifactId: {}", a); final String v = xpath.evaluate("./str[@name='v']", docs.item(i)); final NodeList attributes = (NodeList) xpath.evaluate("./arr[@name='ec']/str", docs.item(i), XPathConstants.NODESET); boolean pomAvailable = false; boolean jarAvailable = false; for (int x = 0; x < attributes.getLength(); x++) { final String tmp = xpath.evaluate(".", attributes.item(x)); if (".pom".equals(tmp)) { pomAvailable = true; } else if (".jar".equals(tmp)) { jarAvailable = true; } } final String centralContentUrl = settings.getString(Settings.KEYS.CENTRAL_CONTENT_URL); String artifactUrl = null; String pomUrl = null; if (jarAvailable) { //org/springframework/spring-core/3.2.0.RELEASE/spring-core-3.2.0.RELEASE.pom artifactUrl = centralContentUrl + g.replace('.', '/') + '/' + a + '/' + v + '/' + a + '-' + v + ".jar"; } if (pomAvailable) { //org/springframework/spring-core/3.2.0.RELEASE/spring-core-3.2.0.RELEASE.pom pomUrl = centralContentUrl + g.replace('.', '/') + '/' + a + '/' + v + '/' + a + '-' + v + ".pom"; } result.add(new MavenArtifact(g, a, v, artifactUrl, pomUrl)); } } } catch (ParserConfigurationException | IOException | SAXException | XPathExpressionException e) { // Anything else is jacked up XML stuff that we really can't recover from well final String errorMessage = "Failed to parse MavenCentral XML Response: " + e.getMessage(); throw new IOException(errorMessage, e); } if (missing) { if (cache != null) { cache.put(sha1, result); } throw new FileNotFoundException("Artifact not found in Central"); } } else if (conn.getResponseCode() == 429) { final String errorMessage = "Too many requests sent to MavenCentral; additional requests are being rejected."; throw new TooManyRequestsException(errorMessage); } else { final String errorMessage = "Could not connect to MavenCentral (" + conn.getResponseCode() + "): " + conn.getResponseMessage(); throw new IOException(errorMessage); } if (cache != null) { cache.put(sha1, result); } return result; }
@Test public void testValidSha1() throws Exception { try { List<MavenArtifact> ma = searcher.searchSha1("9977a8d04e75609cf01badc4eb6a9c7198c4c5ea"); assertEquals("Incorrect group", "org.apache.maven.plugins", ma.get(0).getGroupId()); assertEquals("Incorrect artifact", "maven-compiler-plugin", ma.get(0).getArtifactId()); assertEquals("Incorrect version", "3.1", ma.get(0).getVersion()); } catch (IOException ex) { //we hit a failure state on the CI Assume.assumeFalse(StringUtils.contains(ex.getMessage(), "Could not connect to MavenCentral")); throw ex; } }
@Override public CompressionOutputStream createOutputStream(OutputStream out) throws IOException { return CompressionCodec.Util. createOutputStreamWithCodecPool(this, conf, out); }
@Test public void testSingleCompress() throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); CompressionOutputStream cmpOut = codec.createOutputStream(baos); cmpOut.write(DATA1.getBytes(StandardCharsets.UTF_8)); cmpOut.finish(); cmpOut.close(); ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); GZIPInputStream cmpIn = new GZIPInputStream(bais); byte[] buf = new byte[1024]; int len = cmpIn.read(buf); String result = new String(buf, 0, len, StandardCharsets.UTF_8); assertEquals("Input must match output", DATA1, result); }
public static Set<Result> anaylze(String log) { Set<Result> results = new HashSet<>(); for (Rule rule : Rule.values()) { Matcher matcher = rule.pattern.matcher(log); if (matcher.find()) { results.add(new Result(rule, log, matcher)); } } return results; }
@Test public void forgeEroor() throws IOException { CrashReportAnalyzer.Result result = findResultByRule( CrashReportAnalyzer.anaylze(loadLog("/crash-report/forge_error.txt")), CrashReportAnalyzer.Rule.FORGE_ERROR); assertEquals(("\nnet.minecraftforge.fml.common.MissingModsException: Mod pixelmon (Pixelmon) requires [forge@[14.23.5.2860,)]\n" + "\tat net.minecraftforge.fml.common.Loader.sortModList(Loader.java:264) ~[Loader.class:?]\n" + "\tat net.minecraftforge.fml.common.Loader.loadMods(Loader.java:570) ~[Loader.class:?]\n" + "\tat net.minecraftforge.fml.client.FMLClientHandler.beginMinecraftLoading(FMLClientHandler.java:232) [FMLClientHandler.class:?]\n" + "\tat net.minecraft.client.Minecraft.func_71384_a(Minecraft.java:467) [bib.class:?]\n" + "\tat net.minecraft.client.Minecraft.func_99999_d(Minecraft.java:378) [bib.class:?]\n" + "\tat net.minecraft.client.main.Main.main(SourceFile:123) [Main.class:?]\n" + "\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0_131]\n" + "\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0_131]\n" + "\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0_131]\n" + "\tat java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0_131]\n" + "\tat net.minecraft.launchwrapper.Launch.launch(Launch.java:135) [launchwrapper-1.12.jar:?]\n" + "\tat net.minecraft.launchwrapper.Launch.main(Launch.java:28) [launchwrapper-1.12.jar:?]\n" + "\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0_131]\n" + "\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0_131]\n" + "\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0_131]\n" + "\tat java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0_131]\n" + "\tat oolloo.jlw.Wrapper.invokeMain(Wrapper.java:58) [JavaWrapper.jar:?]\n").replaceAll("\\s+", ""), result.getMatcher().group("reason").replaceAll("\\s+", "")); }
@Override @CheckReturnValue public boolean flushAndResetMap() { if (!initCurrentMap()) { return false; } for (int i = 0; i < buffers.length; i++) { if (!flushPartition(i)) { return false; } } // we're done currentMap = null; if (logger.isFineEnabled()) { logger.fine("Stats for %s: keys=%,d, chunks=%,d, bytes=%,d", vertexName, totalKeys, totalChunks, totalPayloadBytes); } return true; }
@Test public void when_noItemsAndNoCurrentMap_then_flushAndResetReturnsFalse() { snapshotContext.setCurrentMapName(null); assertFalse(writer.flushAndResetMap()); snapshotContext.setCurrentMapName("map1"); }
@Override public String name() { return name; }
@Test public void testRemoveNamespaceOwnership() throws TException, IOException { removeNamespaceOwnershipAndVerify( "remove_individual_ownership", ImmutableMap.of(HiveCatalog.HMS_DB_OWNER, "some_owner"), ImmutableSet.of(HiveCatalog.HMS_DB_OWNER, HiveCatalog.HMS_DB_OWNER_TYPE), "some_owner", PrincipalType.USER, UserGroupInformation.getCurrentUser().getShortUserName(), PrincipalType.USER); removeNamespaceOwnershipAndVerify( "remove_group_ownership", ImmutableMap.of( HiveCatalog.HMS_DB_OWNER, "some_group_owner", HiveCatalog.HMS_DB_OWNER_TYPE, PrincipalType.GROUP.name()), ImmutableSet.of(HiveCatalog.HMS_DB_OWNER, HiveCatalog.HMS_DB_OWNER_TYPE), "some_group_owner", PrincipalType.GROUP, UserGroupInformation.getCurrentUser().getShortUserName(), PrincipalType.USER); removeNamespaceOwnershipAndVerify( "remove_ownership_on_default_noop_1", ImmutableMap.of(), ImmutableSet.of(HiveCatalog.HMS_DB_OWNER, HiveCatalog.HMS_DB_OWNER_TYPE), UserGroupInformation.getCurrentUser().getShortUserName(), PrincipalType.USER, UserGroupInformation.getCurrentUser().getShortUserName(), PrincipalType.USER); removeNamespaceOwnershipAndVerify( "remove_ownership_on_default_noop_2", ImmutableMap.of(), ImmutableSet.of(), UserGroupInformation.getCurrentUser().getShortUserName(), PrincipalType.USER, UserGroupInformation.getCurrentUser().getShortUserName(), PrincipalType.USER); removeNamespaceOwnershipAndVerify( "remove_ownership_noop_1", ImmutableMap.of(HiveCatalog.HMS_DB_OWNER, "some_owner"), ImmutableSet.of(), "some_owner", PrincipalType.USER, "some_owner", PrincipalType.USER); removeNamespaceOwnershipAndVerify( "remove_ownership_noop_2", ImmutableMap.of( HiveCatalog.HMS_DB_OWNER, "some_group_owner", HiveCatalog.HMS_DB_OWNER_TYPE, PrincipalType.GROUP.name()), ImmutableSet.of(), "some_group_owner", PrincipalType.GROUP, "some_group_owner", PrincipalType.GROUP); assertThatThrownBy( () -> removeNamespaceOwnershipAndVerify( "remove_owner_without_removing_owner_type", ImmutableMap.of( HiveCatalog.HMS_DB_OWNER, "some_individual_owner", HiveCatalog.HMS_DB_OWNER_TYPE, PrincipalType.USER.name()), ImmutableSet.of(HiveCatalog.HMS_DB_OWNER), "some_individual_owner", PrincipalType.USER, "no_post_remove_expectation_due_to_exception_thrown", null)) .isInstanceOf(IllegalArgumentException.class) .hasMessage( String.format( "Removing %s and %s has to be performed together or not at all", HiveCatalog.HMS_DB_OWNER_TYPE, HiveCatalog.HMS_DB_OWNER)); assertThatThrownBy( () -> removeNamespaceOwnershipAndVerify( "remove_owner_type_without_removing_owner", ImmutableMap.of( HiveCatalog.HMS_DB_OWNER, "some_group_owner", HiveCatalog.HMS_DB_OWNER_TYPE, PrincipalType.GROUP.name()), ImmutableSet.of(HiveCatalog.HMS_DB_OWNER_TYPE), "some_group_owner", PrincipalType.GROUP, "no_post_remove_expectation_due_to_exception_thrown", null)) .isInstanceOf(IllegalArgumentException.class) .hasMessage( String.format( "Removing %s and %s has to be performed together or not at all", HiveCatalog.HMS_DB_OWNER_TYPE, HiveCatalog.HMS_DB_OWNER)); }
public String getHiveMetastoreURIs() { return metastoreURIs; }
@Test public void testFromStmt(@Mocked GlobalStateMgr globalStateMgr) throws UserException { String name = "hive0"; String type = "hive"; String metastoreURIs = "thrift://127.0.0.1:9380"; Map<String, String> properties = Maps.newHashMap(); properties.put("type", type); properties.put("hive.metastore.uris", metastoreURIs); CreateResourceStmt stmt = new CreateResourceStmt(true, name, properties); // drop repo Analyzer analyzer = new Analyzer(Analyzer.AnalyzerVisitor.getInstance()); new Expectations() { { globalStateMgr.getAnalyzer(); result = analyzer; } }; Analyzer.analyze(stmt, connectContext); HiveResource resource = (HiveResource) Resource.fromStmt(stmt); Assert.assertEquals("hive0", resource.getName()); Assert.assertEquals(type, resource.getType().name().toLowerCase()); Assert.assertEquals(metastoreURIs, resource.getHiveMetastoreURIs()); }
@DeleteMapping("/batch") @RequiresPermissions(value = {"system:resource:deleteMenu", "system:resource:deleteButton"}, logical = Logical.OR) public ShenyuAdminResult deleteResource(@RequestBody @NotEmpty final List<@NotBlank String> ids) { return ShenyuAdminResult.success(ShenyuResultMessage.DELETE_SUCCESS, resourceService.delete(ids)); }
@Test public void testDeleteResource() throws Exception { final List<String> mockParameter = newArrayList("mock-id"); given(resourceService.delete(mockParameter)).willReturn(1); this.mockMvc.perform(MockMvcRequestBuilders.delete("/resource/batch") .contentType(MediaType.APPLICATION_JSON) .content(GsonUtils.getInstance().toJson(mockParameter))) .andExpect(content().json(GsonUtils.getInstance().toJson(ShenyuAdminResult.success(ShenyuResultMessage.DELETE_SUCCESS, 1)))) .andReturn(); }
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { handle((HttpServletRequest) request, (HttpServletResponse) response, chain); }
@Test void blocksDisallowedMethod() throws Exception { when(request.getMethod()).thenReturn("TRACE"); filter.doFilter(request, response, chain); verify(chain, never()).doFilter(request, response); }
public abstract void filter(Metadata metadata) throws TikaException;
@Test public void testFieldNameMapping() throws Exception { TikaConfig config = getConfig("TIKA-3137-field-mapping.xml"); Metadata metadata = new Metadata(); metadata.set(TikaCoreProperties.TIKA_CONTENT, "quick brown fox"); metadata.set("author", "author"); metadata.set("a", "a-value"); MetadataFilter filter = config.getMetadataFilter(); filter.filter(metadata); assertEquals("quick brown fox", metadata.get("content")); assertEquals("a-value", metadata.get("b")); assertNull(metadata.get("author")); assertNull(metadata.get("a")); }
public static AsyncArchiveService startAsyncArchiveIfEnabled(BaseHoodieWriteClient writeClient) { HoodieWriteConfig config = writeClient.getConfig(); if (!config.isAutoArchive() || !config.isAsyncArchive()) { LOG.info("The HoodieWriteClient is not configured to auto & async archive. Async archive service will not start."); return null; } AsyncArchiveService asyncArchiveService = new AsyncArchiveService(writeClient); asyncArchiveService.start(null); return asyncArchiveService; }
@Test void startAsyncArchiveReturnsNullWhenAsyncArchiveDisabled() { when(config.isAutoArchive()).thenReturn(true); when(config.isAsyncArchive()).thenReturn(false); when(writeClient.getConfig()).thenReturn(config); assertNull(AsyncArchiveService.startAsyncArchiveIfEnabled(writeClient)); }
@Override public void stop() throws Exception { synchronized (lock) { if (running) { running = false; LOG.info("Stopping DefaultJobGraphStore."); Exception exception = null; try { jobGraphStateHandleStore.releaseAll(); } catch (Exception e) { exception = e; } try { jobGraphStoreWatcher.stop(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } if (exception != null) { throw new FlinkException( "Could not properly stop the DefaultJobGraphStore.", exception); } } } }
@Test public void testStoppingJobGraphStoreShouldReleaseAllHandles() throws Exception { final CompletableFuture<Void> completableFuture = new CompletableFuture<>(); final TestingStateHandleStore<JobGraph> stateHandleStore = builder.setReleaseAllHandlesRunnable(() -> completableFuture.complete(null)) .build(); final JobGraphStore jobGraphStore = createAndStartJobGraphStore(stateHandleStore); jobGraphStore.stop(); assertThat(completableFuture.isDone(), is(true)); }
@Override public boolean implies(Permission permission) { if (permission instanceof RuntimePermission && BLOCKED_RUNTIME_PERMISSIONS.contains(permission.getName())) { return false; } if (permission instanceof SecurityPermission && BLOCKED_SECURITY_PERMISSIONS.contains(permission.getName())) { return false; } return true; }
@Test public void rule_allows_permissions() { assertThat(rule.implies(allowedSecurity)).isTrue(); assertThat(rule.implies(allowedRuntime)).isTrue(); }
public void visitPackageDescr(PackageDescr descr) throws UnknownDescriptionException, ClassNotFoundException, IOException { rulePackage = data.getPackageByName(descr.getName()); if (rulePackage == null) { rulePackage = new RulePackage(descr); rulePackage.setName(descr.getName()); data.add(rulePackage); } visitImports(descr.getImports()); TypeDeclarationDescrVisitor typeDeclarationDescrVisitor = new TypeDeclarationDescrVisitor(data); typeDeclarationDescrVisitor.visit(descr.getTypeDeclarations()); visitRules(descr.getRules()); }
@Test void testSubPatterns() throws Exception { PackageDescr packageDescr = getPackageDescr(getClass().getResourceAsStream("SubPattern.drl")); assertThat(packageDescr).isNotNull(); packageDescrVisitor.visitPackageDescr(packageDescr); Collection<VerifierComponent> all = verifierData.getAll(); assertThat(all).isNotNull(); SubPattern test1SubPattern = null; SubPattern test2SubPattern = null; SubRule test1SubRule = null; SubRule test2SubRule = null; for (VerifierComponent verifierComponent : all) { // System.out.println( verifierComponent ); System.out.println("-" + verifierComponent); if (verifierComponent.getDescr() != null) { System.out.println(" \n\t\t => " + verifierComponent.getDescr().getLine() + ":" + +verifierComponent.getDescr().getEndLine() + " " + verifierComponent.getDescr().getText()); } else { System.out.println(" \n\t\t => null for " + verifierComponent.getClass().getSimpleName()); } if (verifierComponent.getVerifierComponentType().equals(VerifierComponentType.SUB_PATTERN)) { SubPattern subPattern = (SubPattern) verifierComponent; if ("Test 1".equals(subPattern.getRuleName())) { assertThat(test1SubPattern).isNull(); test1SubPattern = subPattern; } else if ("Test 2".equals(subPattern.getRuleName())) { assertThat(test2SubPattern).isNull(); test2SubPattern = subPattern; } } if (verifierComponent.getVerifierComponentType().equals(VerifierComponentType.SUB_RULE)) { SubRule subRule = (SubRule) verifierComponent; if ("Test 1".equals(subRule.getRuleName())) { assertThat(test1SubRule).isNull(); test1SubRule = subRule; } else if ("Test 2".equals(subRule.getRuleName())) { assertThat(test2SubRule).isNull(); test2SubRule = subRule; } } } assertThat(test1SubPattern).isNotNull(); assertThat(test1SubPattern.getItems().size()).isEqualTo(3); assertThat(test2SubPattern).isNotNull(); assertThat(test2SubPattern.getItems().size()).isEqualTo(3); assertThat(test1SubRule).isNotNull(); assertThat(test1SubRule.getItems().size()).isEqualTo(1); assertThat(test2SubRule).isNotNull(); assertThat(test2SubRule.getItems().size()).isEqualTo(1); }
@Override @CheckForNull public EmailMessage format(Notification notification) { if (!"alerts".equals(notification.getType())) { return null; } // Retrieve useful values String projectId = notification.getFieldValue("projectId"); String projectKey = notification.getFieldValue("projectKey"); String projectName = notification.getFieldValue("projectName"); String projectVersion = notification.getFieldValue("projectVersion"); String branchName = notification.getFieldValue("branch"); String alertName = notification.getFieldValue("alertName"); String alertText = notification.getFieldValue("alertText"); String alertLevel = notification.getFieldValue("alertLevel"); String ratingMetricsInOneString = notification.getFieldValue("ratingMetrics"); boolean isNewAlert = Boolean.parseBoolean(notification.getFieldValue("isNewAlert")); String fullProjectName = computeFullProjectName(projectName, branchName); // Generate text String subject = generateSubject(fullProjectName, alertLevel, isNewAlert); String messageBody = generateMessageBody(projectName, projectKey, projectVersion, branchName, alertName, alertText, isNewAlert, ratingMetricsInOneString); // And finally return the email that will be sent return new EmailMessage() .setMessageId("alerts/" + projectId) .setSubject(subject) .setPlainTextMessage(messageBody); }
@Test public void shouldFormatAlertWithSeveralMessages() { Notification notification = createNotification("Failed", "violations > 4, coverage < 75%", "ERROR", "false"); EmailMessage message = template.format(notification); assertThat(message.getMessageId(), is("alerts/45")); assertThat(message.getSubject(), is("Quality gate status changed on \"Foo\"")); assertThat(message.getMessage(), is("" + "Project: Foo\n" + "Version: V1-SNAP\n" + "Quality gate status: Failed\n" + "\n" + "Quality gate thresholds:\n" + " - violations > 4\n" + " - coverage < 75%\n" + "\n" + "More details at: http://nemo.sonarsource.org/dashboard?id=org.sonar.foo:foo")); }
public static CreateMode getCreateModeFromString(String modeHeader, CreateMode defaultMode) { CreateMode mode = null; if (modeHeader != null) { try { mode = CreateMode.valueOf(modeHeader); } catch (Exception e) { } } return mode == null ? defaultMode : mode; }
@Test public void testCreateModeExtraction() { assertEquals(CreateMode.EPHEMERAL, getCreateModeFromString("EPHEMERAL", CreateMode.EPHEMERAL)); assertEquals(CreateMode.EPHEMERAL_SEQUENTIAL, getCreateModeFromString("EPHEMERAL_SEQUENTIAL", CreateMode.EPHEMERAL)); assertEquals(CreateMode.PERSISTENT, getCreateModeFromString("PERSISTENT", CreateMode.EPHEMERAL)); assertEquals(CreateMode.PERSISTENT_SEQUENTIAL, getCreateModeFromString("PERSISTENT_SEQUENTIAL", CreateMode.EPHEMERAL)); assertEquals(CreateMode.EPHEMERAL, getCreateModeFromString("DOESNOTEXIST", CreateMode.EPHEMERAL)); }
@Override public PageResult<SocialUserDO> getSocialUserPage(SocialUserPageReqVO pageReqVO) { return socialUserMapper.selectPage(pageReqVO); }
@Test public void testGetSocialUserPage() { // mock 数据 SocialUserDO dbSocialUser = randomPojo(SocialUserDO.class, o -> { // 等会查询到 o.setType(SocialTypeEnum.GITEE.getType()); o.setNickname("芋艿"); o.setOpenid("yudaoyuanma"); o.setCreateTime(buildTime(2020, 1, 15)); }); socialUserMapper.insert(dbSocialUser); // 测试 type 不匹配 socialUserMapper.insert(cloneIgnoreId(dbSocialUser, o -> o.setType(SocialTypeEnum.DINGTALK.getType()))); // 测试 nickname 不匹配 socialUserMapper.insert(cloneIgnoreId(dbSocialUser, o -> o.setNickname(randomString()))); // 测试 openid 不匹配 socialUserMapper.insert(cloneIgnoreId(dbSocialUser, o -> o.setOpenid("java"))); // 测试 createTime 不匹配 socialUserMapper.insert(cloneIgnoreId(dbSocialUser, o -> o.setCreateTime(buildTime(2020, 1, 21)))); // 准备参数 SocialUserPageReqVO reqVO = new SocialUserPageReqVO(); reqVO.setType(SocialTypeEnum.GITEE.getType()); reqVO.setNickname("芋"); reqVO.setOpenid("yudao"); reqVO.setCreateTime(buildBetweenTime(2020, 1, 10, 2020, 1, 20)); // 调用 PageResult<SocialUserDO> pageResult = socialUserService.getSocialUserPage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbSocialUser, pageResult.getList().get(0)); }
@Nullable public abstract E pop();
@Test public void pushPop() { final PriorityQueue<Priority, String> queue = populateQueue(); assertEquals("7", queue.pop()); assertEquals("2", queue.pop()); assertEquals("4", queue.pop()); assertEquals("1", queue.pop()); assertEquals("3", queue.pop()); assertEquals("5", queue.pop()); assertEquals("6", queue.pop()); assertEquals("8", queue.pop()); }
public List<ContainerLogMeta> collect( LogAggregationFileController fileController) throws IOException { List<ContainerLogMeta> containersLogMeta = new ArrayList<>(); RemoteIterator<FileStatus> appDirs = fileController. getApplicationDirectoriesOfUser(logsRequest.getUser()); while (appDirs.hasNext()) { FileStatus currentAppDir = appDirs.next(); if (logsRequest.getAppId() == null || logsRequest.getAppId().equals(currentAppDir.getPath().getName())) { ApplicationId appId = ApplicationId.fromString( currentAppDir.getPath().getName()); RemoteIterator<FileStatus> nodeFiles = fileController .getNodeFilesOfApplicationDirectory(currentAppDir); while (nodeFiles.hasNext()) { FileStatus currentNodeFile = nodeFiles.next(); if (!logsRequest.getNodeId().match(currentNodeFile.getPath() .getName())) { continue; } if (currentNodeFile.getPath().getName().equals( logsRequest.getAppId() + ".har")) { Path p = new Path("har:///" + currentNodeFile.getPath().toUri().getRawPath()); nodeFiles = HarFs.get(p.toUri(), conf).listStatusIterator(p); continue; } try { Map<String, List<ContainerLogFileInfo>> metaFiles = fileController .getLogMetaFilesOfNode(logsRequest, currentNodeFile, appId); if (metaFiles == null) { continue; } metaFiles.entrySet().removeIf(entry -> !(logsRequest.getContainerId() == null || logsRequest.getContainerId().equals(entry.getKey()))); containersLogMeta.addAll(createContainerLogMetas( currentNodeFile.getPath().getName(), metaFiles)); } catch (IOException ioe) { LOG.warn("Can not get log meta from the log file:" + currentNodeFile.getPath() + "\n" + ioe.getMessage()); } } } } return containersLogMeta; }
@Test void testAllSet() throws IOException { ExtendedLogMetaRequest.ExtendedLogMetaRequestBuilder request = new ExtendedLogMetaRequest.ExtendedLogMetaRequestBuilder(); Set<String> fileSizeExpressions = new HashSet<>(); fileSizeExpressions.add("<51"); Set<String> modificationTimeExpressions = new HashSet<>(); modificationTimeExpressions.add("<1000"); request.setAppId(app.toString()); request.setContainerId(attemptContainer.toString()); request.setFileName(String.format("%s.*", SMALL_FILE_NAME)); request.setFileSize(fileSizeExpressions); request.setModificationTime(modificationTimeExpressions); request.setNodeId(TEST_NODE); request.setUser("TEST"); LogAggregationMetaCollector collector = new LogAggregationMetaCollector( request.build(), new YarnConfiguration()); List<ContainerLogMeta> res = collector.collect(fileController); List<ContainerLogFileInfo> allFile = res.stream() .flatMap(m -> m.getContainerLogMeta().stream()) .collect(Collectors.toList()); assertEquals(1, allFile.size()); }