focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static JSyntaxTextArea getInstance(int rows, int cols, boolean disableUndo) { try { JSyntaxTextArea jSyntaxTextArea = new JSyntaxTextArea(rows, cols, disableUndo); JFactory.withDynamic(jSyntaxTextArea, JSyntaxTextArea::applyTheme); // Gutter styling is only applied if the text area is contained in a scroll pane. jSyntaxTextArea.addHierarchyListener(GUTTER_THEME_PATCHER); return jSyntaxTextArea; } catch (HeadlessException e) { // Allow override for unit testing only if ("true".equals(System.getProperty("java.awt.headless"))) { // $NON-NLS-1$ $NON-NLS-2$ return new JSyntaxTextArea(disableUndo) { private String savedText = ""; private static final long serialVersionUID = 1L; @Override protected void init() { try { super.init(); } catch (HeadlessException|NullPointerException e) { // ignored } } // Override methods that would fail @Override public void setCodeFoldingEnabled(boolean b) { } @Override public void setCaretPosition(int b) { } @Override public void discardAllEdits() { } @Override public void setText(String t) { savedText = t; } @Override public String getText() { return savedText; } @Override public boolean isCodeFoldingEnabled(){ return true; } }; } else { throw e; } } }
@Test public void testHeadlessGetText() { String key = "java.awt.headless"; String initialValue = System.getProperty(key); try { System.setProperty(key, "true"); // getInstance() returns anonymous class with some overridden methods // to avoid errors due to 'java.awt.headless=true'. // E.g. it should not throw a HeadlessException. JSyntaxTextArea textArea = JSyntaxTextArea.getInstance(10,20); String myText = "my text"; textArea.setText(myText); assertEquals(myText, textArea.getText()); } finally { if (initialValue != null) { System.setProperty(key, initialValue); } else { System.clearProperty(key); } } }
public String reqApi(String api, Map<String, String> params, String method) throws NacosException { return reqApi(api, params, Collections.EMPTY_MAP, method); }
@Test void testReqApi2() throws Exception { //given NacosRestTemplate nacosRestTemplate = mock(NacosRestTemplate.class); when(nacosRestTemplate.exchangeForm(any(), any(), any(), any(), any(), any())).thenAnswer(invocationOnMock -> { //return url HttpRestResult<Object> res = new HttpRestResult<Object>(); res.setData(invocationOnMock.getArgument(0)); res.setCode(200); return res; }); final Field nacosRestTemplateField = NamingHttpClientProxy.class.getDeclaredField("nacosRestTemplate"); nacosRestTemplateField.setAccessible(true); nacosRestTemplateField.set(clientProxy, nacosRestTemplate); String api = "/api"; Map<String, String> params = new HashMap<>(); Map<String, String> body = new HashMap<>(); String method = HttpMethod.GET; //when String res = clientProxy.reqApi(api, params, body, method); //then assertEquals("http://localhost:8848/api", res); }
@Override public KTable<K, V> reduce(final Reducer<V> adder, final Reducer<V> subtractor, final Materialized<K, V, KeyValueStore<Bytes, byte[]>> materialized) { return reduce(adder, subtractor, NamedInternal.empty(), materialized); }
@Test public void shouldNotAllowNullAdderOnReduce() { assertThrows(NullPointerException.class, () -> groupedTable.reduce( null, MockReducer.STRING_REMOVER, Materialized.as("store"))); }
public TrustManagerFactory createTrustManagerFactory() throws NoSuchProviderException, NoSuchAlgorithmException { return getProvider() != null ? TrustManagerFactory.getInstance(getAlgorithm(), getProvider()) : TrustManagerFactory.getInstance(getAlgorithm()); }
@Test public void testDefaults() throws Exception { assertNotNull(factoryBean.createTrustManagerFactory()); }
@Override public void onError(Exception e) { log.error("WebSocket connection to {} failed with error", uri, e); listenerOpt.ifPresent(listener -> listener.onError(e)); }
@Test public void testNotifyListenerOnError() throws Exception { IOException e = new IOException("123"); client.onError(e); verify(listener).onError(e); }
@Override public byte[] serialize(final String topic, final List<?> values) { if (values == null) { return null; } final T single = extractOnlyColumn(values, topic); return inner.serialize(topic, single); }
@Test public void shouldThrowIfMoreThanOneValue() { // When: final Exception e = assertThrows( SerializationException.class, () -> serializer.serialize("t", ImmutableList.of("too", "many")) ); // Then: assertThat(e.getMessage(), is("Column count mismatch on serialization. topic: t, expected: 1, got: 2")); }
@Override public final int hashCode() { int result = (enabled ? 1 : 0); result = 31 * result + (jmxConfig != null ? jmxConfig.hashCode() : 0); result = 31 * result + collectionFrequencySeconds; return result; }
@Test public void testCloneEquals() { // create MetricsConfig with non-defaults ClientMetricsConfig original = new ClientMetricsConfig() .setEnabled(false) .setCollectionFrequencySeconds(1); original.getJmxConfig() .setEnabled(false); ClientMetricsConfig clone = new ClientMetricsConfig(original); assertEquals(original.hashCode(), clone.hashCode()); assertEquals(original, clone); }
Record convert(Object data) { return convert(data, null); }
@Test public void testNestedStructConvert() { Table table = mock(Table.class); when(table.schema()).thenReturn(NESTED_SCHEMA); RecordConverter converter = new RecordConverter(table, config); Struct nestedData = createNestedStructData(); Record record = converter.convert(nestedData); assertNestedRecordValues(record); }
public static List<AnnotationCompatibilityResult> checkPegasusSchemaAnnotation(DataSchema prevSchema, DataSchema currSchema, List<SchemaAnnotationHandler> handlers) { // Update handler list to only contain handlers with implementation of checkCompatibility. handlers = handlers .stream() .filter( h -> h.implementsCheckCompatibility()) .collect(Collectors.toList()); SchemaAnnotationProcessor.SchemaAnnotationProcessResult prevSchemaResult = processSchemaAnnotation(prevSchema, handlers); SchemaAnnotationProcessor.SchemaAnnotationProcessResult currSchemaResult = processSchemaAnnotation(currSchema, handlers); Map<PathSpec, Pair<CompatibilityCheckContext, Map<String, Object>>> prevResolvedPropertiesMap = getNodeToResolvedProperties(prevSchemaResult); Map<PathSpec, Pair<CompatibilityCheckContext, Map<String, Object>>> currResolvedPropertiesMap = getNodeToResolvedProperties(currSchemaResult); return getCompatibilityResult(prevResolvedPropertiesMap, currResolvedPropertiesMap, handlers); }
@Test(dataProvider = "annotationCompatibilityCheckTestData") public void testCheckCompatibility(String prevSchemaFile, String currSchemaFile, List<SchemaAnnotationHandler> handlers, List<AnnotationCompatibilityResult> expectedResults) throws IOException { DataSchema prevSchema = TestUtil.dataSchemaFromPdlInputStream(getClass().getResourceAsStream(prevSchemaFile)); DataSchema currSchema = TestUtil.dataSchemaFromPdlInputStream(getClass().getResourceAsStream(currSchemaFile)); List<AnnotationCompatibilityResult> results = AnnotationCompatibilityChecker .checkPegasusSchemaAnnotation(prevSchema, currSchema, handlers); Assert.assertEquals(results.size(), expectedResults.size()); for (int i = 0; i < results.size(); i++) { Assert.assertEquals(results.get(i).getMessages().size(), expectedResults.get(i).getMessages().size()); List<CompatibilityMessage> actualCompatibilityMessage = (List<CompatibilityMessage>) results.get(i).getMessages(); List<CompatibilityMessage> expectCompatibilityMessage = (List<CompatibilityMessage>) expectedResults.get(i).getMessages(); for (int j = 0; j < actualCompatibilityMessage.size(); j++) { Assert.assertEquals(actualCompatibilityMessage.get(j).toString(), expectCompatibilityMessage.get(j).toString()); } } }
public void cleanupOrphanedInternalTopics( final ServiceContext serviceContext, final Set<String> queryApplicationIds ) { final KafkaTopicClient topicClient = serviceContext.getTopicClient(); final Set<String> topicNames; try { topicNames = topicClient.listTopicNames(); } catch (KafkaResponseGetFailedException e) { LOG.error("Couldn't fetch topic names", e); return; } // Find any transient query topics final Set<String> orphanedQueryApplicationIds = topicNames.stream() .map(topicName -> queryApplicationIds.stream().filter(topicName::startsWith).findFirst()) .filter(Optional::isPresent) .map(Optional::get) .collect(Collectors.toSet()); for (final String queryApplicationId : orphanedQueryApplicationIds) { cleanupService.addCleanupTask( new QueryCleanupService.QueryCleanupTask( serviceContext, queryApplicationId, Optional.empty(), true, ksqlConfig.getKsqlStreamConfigProps() .getOrDefault( StreamsConfig.STATE_DIR_CONFIG, StreamsConfig.configDef().defaultValues().get(StreamsConfig.STATE_DIR_CONFIG)) .toString(), ksqlConfig.getString(KsqlConfig.KSQL_SERVICE_ID_CONFIG), ksqlConfig.getString(KsqlConfig.KSQL_PERSISTENT_QUERY_NAME_PREFIX_CONFIG))); } }
@Test public void shouldCleanup_someApplicationIds() { // Given when(topicClient.listTopicNames()).thenReturn(ImmutableSet.of(TOPIC1, TOPIC2)); // When cleaner.cleanupOrphanedInternalTopics(serviceContext, ImmutableSet.of(APP_ID_1, APP_ID_2)); // Then verify(queryCleanupService, times(1)).addCleanupTask(taskCaptor.capture()); assertThat(taskCaptor.getAllValues().get(0).getAppId(), is(APP_ID_1)); }
@Override public List<String> selectTagByConfig(String dataId, String group, String tenant) { ConfigTagsRelationMapper configTagsRelationMapper = mapperManager.findMapper( dataSourceService.getDataSourceType(), TableConstant.CONFIG_TAGS_RELATION); String sql = configTagsRelationMapper.select(Collections.singletonList("tag_name"), Arrays.asList("data_id", "group_id", "tenant_id")); try { return jt.queryForList(sql, new Object[] {dataId, group, tenant}, String.class); } catch (EmptyResultDataAccessException e) { return null; } catch (IncorrectResultSizeDataAccessException e) { return null; } catch (CannotGetJdbcConnectionException e) { LogUtil.FATAL_LOG.error("[db-error] " + e, e); throw e; } }
@Test void testSelectTagByConfig() { String dataId = "dataId4567222"; String group = "group3456789"; String tenant = "tenant4567890"; //mock page list List<String> tagStrings = Arrays.asList("", "", ""); when(jdbcTemplate.queryForList(anyString(), eq(new Object[] {dataId, group, tenant}), eq(String.class))).thenReturn(tagStrings); List<String> configTags = externalConfigInfoPersistService.selectTagByConfig(dataId, group, tenant); assertEquals(tagStrings, configTags); //mock EmptyResultDataAccessException when(jdbcTemplate.queryForList(anyString(), eq(new Object[] {dataId, group, tenant}), eq(String.class))).thenThrow( new EmptyResultDataAccessException(3)); List<String> nullResult = externalConfigInfoPersistService.selectTagByConfig(dataId, group, tenant); assertTrue(nullResult == null); //mock IncorrectResultSizeDataAccessException when(jdbcTemplate.queryForList(anyString(), eq(new Object[] {dataId, group, tenant}), eq(String.class))).thenThrow( new IncorrectResultSizeDataAccessException(3)); List<String> nullResult2 = externalConfigInfoPersistService.selectTagByConfig(dataId, group, tenant); assertTrue(nullResult2 == null); //mock IncorrectResultSizeDataAccessException when(jdbcTemplate.queryForList(anyString(), eq(new Object[] {dataId, group, tenant}), eq(String.class))).thenThrow( new CannotGetJdbcConnectionException("mock exp")); try { externalConfigInfoPersistService.selectTagByConfig(dataId, group, tenant); assertFalse(true); } catch (Exception e) { assertTrue(e instanceof CannotGetJdbcConnectionException); } }
@Override public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { for(Path f : files.keySet()) { try { if(f.isDirectory()) { new FoldersApi(new BoxApiClient(session.getClient())).deleteFoldersId(fileid.getFileId(f), null, true); } else { new FilesApi(new BoxApiClient(session.getClient())).deleteFilesId(fileid.getFileId(f), null); } } catch(ApiException e) { throw new BoxExceptionMappingService(fileid).map("Cannot delete {0}", e, f); } } }
@Test public void testDeleteFolder() throws Exception { final BoxFileidProvider fileid = new BoxFileidProvider(session); final Path directory = new BoxDirectoryFeature(session, fileid).mkdir(new Path( new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.directory)), new TransferStatus()); assertTrue(new BoxFindFeature(session, fileid).find(directory, new DisabledListProgressListener())); new BoxDeleteFeature(session, fileid).delete(Collections.singletonList(directory), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse((new BoxFindFeature(session, fileid).find(directory, new DisabledListProgressListener()))); }
@Override public ZonedDateTime createdAt() { return ZonedDateTime.parse("2019-04-03T10:27:00Z"); }
@Test public void createdAt() { // Test the date to detect accidental changes to it. assertThat(migration.createdAt()).isEqualTo(ZonedDateTime.parse("2019-04-03T10:27:00Z")); }
@Override public int substraction(int n1, int n2) { int n5 = n1 - n2; return n5; }
@Test public void testSubstraction() { Controlador controlador = new Controlador(); int result = controlador.substraction(5, 3); assertEquals(2, result); }
public static boolean isValidIPOrCidr(String ipOrCidr) { return isValidIp(ipOrCidr) || isValidCidr(ipOrCidr); }
@Test public void isValidIPOrCidr() { String ipv4 = "192.168.1.0"; String ipv6 = "2001:0db8:85a3:0000:0000:8a2e:0370:7334"; String ipv4Cidr = "192.168.1.0/24"; String ipv6Cidr = "2001:0db8:1234:5678::/64"; assert IPAddressUtils.isValidIPOrCidr(ipv4); assert IPAddressUtils.isValidIPOrCidr(ipv6); assert IPAddressUtils.isValidIPOrCidr(ipv4Cidr); assert IPAddressUtils.isValidIPOrCidr(ipv6Cidr); }
public Object set(final String property, final Object value) { Objects.requireNonNull(value, "value"); final Object parsed = parser.parse(property, value); return props.put(property, parsed); }
@Test(expected = IllegalArgumentException.class) public void shouldNotAllowUnknownPropertyToBeSet() { realProps.set("some.unknown.prop", "some.value"); }
public abstract T getNow(T valueIfAbsent) throws InterruptedException, ExecutionException;
@Test public void testCompletingFutures() throws Exception { final KafkaFutureImpl<String> future = new KafkaFutureImpl<>(); CompleterThread<String> myThread = new CompleterThread<>(future, "You must construct additional pylons."); assertIsNotCompleted(future); assertEquals("I am ready", future.getNow("I am ready")); myThread.start(); awaitAndAssertResult(future, "You must construct additional pylons.", "I am ready"); assertIsSuccessful(future); myThread.join(); assertNull(myThread.testException); }
@Override public PollResult poll(long currentTimeMs) { return pollInternal( prepareFetchRequests(), this::handleFetchSuccess, this::handleFetchFailure ); }
@Test public void testReturnCommittedTransactions() { buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); int currentOffset = 0; currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes())); commitTransaction(buffer, 1L, currentOffset); buffer.flip(); MemoryRecords records = MemoryRecords.readableRecords(buffer); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); // normal fetch assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); client.prepareResponse(body -> { FetchRequest request = (FetchRequest) body; assertEquals(IsolationLevel.READ_COMMITTED, request.isolationLevel()); return true; }, fullFetchResponseWithAbortedTransactions(records, Collections.emptyList(), Errors.NONE, 100L, 100L, 0)); networkClientDelegate.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchRecords(); assertTrue(fetchedRecords.containsKey(tp0)); assertEquals(fetchedRecords.get(tp0).size(), 2); }
public static SqlDecimal widen(final SqlType t0, final SqlType t1) { final SqlDecimal lDecimal = DecimalUtil.toSqlDecimal(t0); final SqlDecimal rDecimal = DecimalUtil.toSqlDecimal(t1); final int wholePrecision = Math.max( lDecimal.getPrecision() - lDecimal.getScale(), rDecimal.getPrecision() - rDecimal.getScale() ); final int scale = Math.max(lDecimal.getScale(), rDecimal.getScale()); return SqlTypes.decimal(wholePrecision + scale, scale); }
@Test public void shouldWidenBigIntAndDecimal() { // Given: final SqlDecimal smallerPrecision = SqlTypes.decimal(14, 3); final SqlDecimal largerPrecision = SqlTypes.decimal(20, 0); // Then: assertThat( DecimalUtil.widen(smallerPrecision, SqlTypes.BIGINT), is(SqlTypes.decimal(22, 3)) ); assertThat( DecimalUtil.widen(SqlTypes.BIGINT, largerPrecision), is(SqlTypes.decimal(20, 0)) ); }
public JobStatsExtended enrich(JobStats jobStats) { JobStats latestJobStats = getLatestJobStats(jobStats, previousJobStats); if (lock.tryLock()) { setFirstRelevantJobStats(latestJobStats); setJobStatsExtended(latestJobStats); setPreviousJobStats(latestJobStats); lock.unlock(); } return jobStatsExtended; }
@Test void firstRelevantJobStatsIsUpdated() { JobStats firstJobStats = getJobStats(0L, 0L, 0L, 100L); JobStats secondJobStats = getJobStats(10L, 0L, 0L, 100L); jobStatsEnricher.enrich(firstJobStats); jobStatsEnricher.enrich(secondJobStats); JobStats jobStats = Whitebox.getInternalState(jobStatsEnricher, "firstRelevantJobStats"); assertThat(jobStats).isEqualToComparingFieldByField(secondJobStats); }
public static DatabaseType getDatabaseType() { Optional<DatabaseType> configuredDatabaseType = findConfiguredDatabaseType(); if (configuredDatabaseType.isPresent()) { return configuredDatabaseType.get(); } MetaDataContexts metaDataContexts = ProxyContext.getInstance().getContextManager().getMetaDataContexts(); if (metaDataContexts.getMetaData().getDatabases().isEmpty()) { return TypedSPILoader.getService(DatabaseType.class, DEFAULT_FRONTEND_DATABASE_PROTOCOL_TYPE); } Optional<ShardingSphereDatabase> database = metaDataContexts.getMetaData().getDatabases().values().stream().filter(ShardingSphereDatabase::containsDataSource).findFirst(); return database.isPresent() ? database.get().getResourceMetaData().getStorageUnits().values().iterator().next().getStorageType() : TypedSPILoader.getService(DatabaseType.class, DEFAULT_FRONTEND_DATABASE_PROTOCOL_TYPE); }
@Test void assertGetDatabaseTypeInstanceOfMySQLDatabaseTypeFromMetaDataContextsSchemaName() { ContextManager contextManager = mockContextManager(mockDatabases(), new Properties()); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); DatabaseType databaseType = FrontDatabaseProtocolTypeFactory.getDatabaseType(); assertThat(databaseType, instanceOf(DatabaseType.class)); assertThat(databaseType.getType(), is("MySQL")); }
public void store(@NonNull String name, @NonNull String value) { if (mIncognitoMode) return; final HistoryKey usedKey = new HistoryKey(name, value); mLoadedKeys.remove(usedKey); mLoadedKeys.add(usedKey); while (mLoadedKeys.size() > MAX_LIST_SIZE) mLoadedKeys.remove(0 /*dropping the first key*/); final String encodedHistory = encodeForOldDevices(mLoadedKeys); mRxPref.set(encodedHistory); }
@Test public void testStore() { mUnderTest = new QuickKeyHistoryRecords(mSharedPreferences); mUnderTest.store("1", "2"); mUnderTest.store("3", "4"); mUnderTest.store("5", "6"); final List<QuickKeyHistoryRecords.HistoryKey> currentHistory = mUnderTest.getCurrentHistory(); Assert.assertEquals(3 + 1 /*first default emoji*/, currentHistory.size()); Assert.assertEquals(QuickKeyHistoryRecords.DEFAULT_EMOJI, currentHistory.get(0).name); Assert.assertEquals(QuickKeyHistoryRecords.DEFAULT_EMOJI, currentHistory.get(0).value); Assert.assertEquals("1", currentHistory.get(1).name); Assert.assertEquals("2", currentHistory.get(1).value); Assert.assertEquals("3", currentHistory.get(2).name); Assert.assertEquals("4", currentHistory.get(2).value); Assert.assertEquals("5", currentHistory.get(3).name); Assert.assertEquals("6", currentHistory.get(3).value); }
@Override public HttpServletRequest readRequest(AwsProxyRequest request, SecurityContext securityContext, Context lambdaContext, ContainerConfig config) throws InvalidRequestEventException { // Expect the HTTP method and context to be populated. If they are not, we are handling an // unsupported event type. if (request.getHttpMethod() == null || request.getHttpMethod().equals("") || request.getRequestContext() == null) { throw new InvalidRequestEventException(INVALID_REQUEST_ERROR); } request.setPath(stripBasePath(request.getPath(), config)); if (request.getMultiValueHeaders() != null && request.getMultiValueHeaders().getFirst(HttpHeaders.CONTENT_TYPE) != null) { String contentType = request.getMultiValueHeaders().getFirst(HttpHeaders.CONTENT_TYPE); // put single as we always expect to have one and only one content type in a request. request.getMultiValueHeaders().putSingle(HttpHeaders.CONTENT_TYPE, getContentTypeWithCharset(contentType, config)); } AwsProxyHttpServletRequest servletRequest = new AwsProxyHttpServletRequest(request, lambdaContext, securityContext, config); servletRequest.setServletContext(servletContext); servletRequest.setAttribute(API_GATEWAY_CONTEXT_PROPERTY, request.getRequestContext()); servletRequest.setAttribute(API_GATEWAY_STAGE_VARS_PROPERTY, request.getStageVariables()); servletRequest.setAttribute(API_GATEWAY_EVENT_PROPERTY, request); servletRequest.setAttribute(ALB_CONTEXT_PROPERTY, request.getRequestContext().getElb()); servletRequest.setAttribute(LAMBDA_CONTEXT_PROPERTY, lambdaContext); servletRequest.setAttribute(JAX_SECURITY_CONTEXT_PROPERTY, securityContext); return servletRequest; }
@Test void readRequest_contentCharset_appendsCharsetToComplextContentType() { String contentType = "multipart/form-data; boundary=something"; AwsProxyRequest request = new AwsProxyRequestBuilder(ENCODED_REQUEST_PATH, "GET").header(HttpHeaders.CONTENT_TYPE, contentType).build(); try { HttpServletRequest servletRequest = reader.readRequest(request, null, null, ContainerConfig.defaultConfig()); assertNotNull(servletRequest); assertNotNull(servletRequest.getHeader(HttpHeaders.CONTENT_TYPE)); String contentAndCharset = contentType + "; charset=" + LambdaContainerHandler.getContainerConfig().getDefaultContentCharset(); assertEquals(contentAndCharset, servletRequest.getHeader(HttpHeaders.CONTENT_TYPE)); assertEquals(LambdaContainerHandler.getContainerConfig().getDefaultContentCharset(), servletRequest.getCharacterEncoding()); } catch (InvalidRequestEventException e) { e.printStackTrace(); fail("Could not read request"); } }
public static String findAddress(List<NodeAddress> addresses, NodeAddressType preferredAddressType) { if (addresses == null) { return null; } Map<String, String> addressMap = addresses.stream() .collect(Collectors.toMap(NodeAddress::getType, NodeAddress::getAddress, (address1, address2) -> { LOGGER.warnOp("Found multiple addresses with the same type. Only the first address '{}' will be used.", address1); return address1; })); // If user set preferred address type, we should check it first if (preferredAddressType != null && addressMap.containsKey(preferredAddressType.toValue())) { return addressMap.get(preferredAddressType.toValue()); } if (addressMap.containsKey("ExternalDNS")) { return addressMap.get("ExternalDNS"); } else if (addressMap.containsKey("ExternalIP")) { return addressMap.get("ExternalIP"); } else if (addressMap.containsKey("InternalDNS")) { return addressMap.get("InternalDNS"); } else if (addressMap.containsKey("InternalIP")) { return addressMap.get("InternalIP"); } else if (addressMap.containsKey("Hostname")) { return addressMap.get("Hostname"); } return null; }
@Test public void testFindAddressNullWithInvalidAddressTypes() { List<NodeAddress> addresses = new ArrayList<>(3); addresses.add(new NodeAddressBuilder().withType("SomeAddress").withAddress("my.external.address").build()); addresses.add(new NodeAddressBuilder().withType("SomeOtherAddress").withAddress("my.internal.address").build()); addresses.add(new NodeAddressBuilder().withType("YetAnotherAddress").withAddress("192.168.2.94").build()); String address = NodeUtils.findAddress(addresses, null); assertThat(address, is(nullValue())); }
@Override public long getHeartBeatInterval(String namespaceId, String serviceName, String ip, int port, String cluster) { Service service = getService(namespaceId, serviceName, true); String metadataId = InstancePublishInfo.genMetadataId(ip, port, cluster); Optional<InstanceMetadata> metadata = metadataManager.getInstanceMetadata(service, metadataId); if (metadata.isPresent() && metadata.get().getExtendData() .containsKey(PreservedMetadataKeys.HEART_BEAT_INTERVAL)) { return ConvertUtils.toLong(metadata.get().getExtendData().get(PreservedMetadataKeys.HEART_BEAT_INTERVAL)); } String clientId = IpPortBasedClient.getClientId(ip + InternetAddressUtil.IP_PORT_SPLITER + port, true); Client client = clientManager.getClient(clientId); InstancePublishInfo instance = null != client ? client.getInstancePublishInfo(service) : null; if (null != instance && instance.getExtendDatum().containsKey(PreservedMetadataKeys.HEART_BEAT_INTERVAL)) { return ConvertUtils.toLong(instance.getExtendDatum().get(PreservedMetadataKeys.HEART_BEAT_INTERVAL)); } return switchDomain.getClientBeatInterval(); }
@Test void testGetHeartBeatInterval() { InstanceMetadata instanceMetadata = new InstanceMetadata(); Map<String, Object> map = new HashMap<>(2); instanceMetadata.setExtendData(map); when(metadataManager.getInstanceMetadata(Mockito.any(), Mockito.anyString())).thenReturn(Optional.of(instanceMetadata)); when(switchDomain.getClientBeatInterval()).thenReturn(100L); long interval = instanceOperatorClient.getHeartBeatInterval("A", "C", "1.1.1.1", 8848, "D"); assertEquals(100L, interval); }
public Order id(Long id) { this.id = id; return this; }
@Test public void idTest() { // TODO: test id }
public static void handleReflectionException(Exception ex) { if (ex instanceof NoSuchMethodException) { throw new IllegalStateException("Method not found: " + ex.getMessage()); } if (ex instanceof IllegalAccessException) { throw new IllegalStateException("Could not access method or field: " + ex.getMessage()); } if (ex instanceof InvocationTargetException) { handleInvocationTargetException((InvocationTargetException) ex); } if (ex instanceof RuntimeException) { throw (RuntimeException) ex; } throw new UndeclaredThrowableException(ex); }
@Test void testHandleReflectionException() { assertThrows(UndeclaredThrowableException.class, () -> { try { NoSuchMethodException exception = new NoSuchMethodException("test"); ReflectUtils.handleReflectionException(exception); } catch (Exception e) { assertEquals("Method not found: test", e.getMessage()); } try { IllegalAccessException exception = new IllegalAccessException("test"); ReflectUtils.handleReflectionException(exception); } catch (Exception e) { assertEquals("Could not access method or field: test", e.getMessage()); } RuntimeException exception = new RuntimeException("test"); try { ReflectUtils.handleReflectionException(exception); } catch (Exception e) { assertEquals(exception, e); } try { InvocationTargetException invocationTargetException = new InvocationTargetException(exception); ReflectUtils.handleReflectionException(invocationTargetException); } catch (Exception e) { assertEquals(exception, e); } ReflectUtils.handleReflectionException(new IOException()); }); }
@Override public Collection<FileSourceSplit> enumerateSplits(Path[] paths, int minDesiredSplits) throws IOException { final ArrayList<FileSourceSplit> splits = new ArrayList<>(); for (Path path : paths) { final FileSystem fs = path.getFileSystem(); final FileStatus status = fs.getFileStatus(path); addSplitsForPath(status, fs, splits); } return splits; }
@Test void testIncludeFilesFromNestedDirectories() throws Exception { final Path[] testPaths = new Path[] { new Path("testfs:///dir/file1"), new Path("testfs:///dir/nested/file.out"), new Path("testfs:///dir/nested/anotherfile.txt") }; testFs = TestingFileSystem.createWithFiles("testfs", testPaths); testFs.register(); final NonSplittingRecursiveEnumerator enumerator = createEnumerator(); final Collection<FileSourceSplit> splits = enumerator.enumerateSplits(new Path[] {new Path("testfs:///dir")}, 1); assertThat(toPaths(splits)).containsExactlyInAnyOrder(testPaths); }
@Override public Object getBody() throws Exception { if (StringUtils.containsIgnoreCase(target.getContentType(), MediaType.MULTIPART_FORM_DATA)) { return target.getParts(); } else { String s = ByteUtils.toString(body); if (StringUtils.isBlank(s)) { return HttpUtils .encodingParams(HttpUtils.translateParameterMap(stringMap), StandardCharsets.UTF_8.name()); } return s; } }
@Test void testGetBody() throws Exception { Object body = reuseHttpServletRequest.getBody(); assertNotNull(body); assertEquals("name=test&value=123&", body.toString()); target.setContentType(MediaType.MULTIPART_FORM_DATA); body = reuseHttpServletRequest.getBody(); assertNotNull(body); }
@GET @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) public NodeInfo get() { return getNodeInfo(); }
@Test public void testInvalidAccept() throws JSONException, Exception { WebResource r = resource(); String responseStr = ""; try { responseStr = r.path("ws").path("v1").path("node") .accept(MediaType.TEXT_PLAIN).get(String.class); fail("should have thrown exception on invalid uri"); } catch (UniformInterfaceException ue) { ClientResponse response = ue.getResponse(); assertResponseStatusCode(Status.INTERNAL_SERVER_ERROR, response.getStatusInfo()); WebServicesTestUtils.checkStringMatch( "error string exists and shouldn't", "", responseStr); } }
@Override public boolean containsKey(K key) { checkState(!destroyed, destroyedMessage); checkNotNull(key, ERROR_NULL_KEY); return get(key) != null; }
@Test public void testContainsKey() throws Exception { expectPeerMessage(clusterCommunicator); assertFalse(ecMap.containsKey(KEY1)); ecMap.put(KEY1, VALUE1); assertTrue(ecMap.containsKey(KEY1)); assertFalse(ecMap.containsKey(KEY2)); ecMap.remove(KEY1); assertFalse(ecMap.containsKey(KEY1)); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() != 6) { onInvalidDataReceived(device, data); return; } final int featuresValue = data.getIntValue(Data.FORMAT_UINT24_LE, 0); final int typeAndSampleLocation = data.getIntValue(Data.FORMAT_UINT8, 3); final int expectedCrc = data.getIntValue(Data.FORMAT_UINT16_LE, 4); final CGMFeatures features = new CGMFeatures(featuresValue); if (features.e2eCrcSupported) { final int actualCrc = CRC16.MCRF4XX(data.getValue(), 0, 4); if (actualCrc != expectedCrc) { onContinuousGlucoseMonitorFeaturesReceivedWithCrcError(device, data); return; } } else { // If the device doesn't support E2E-safety the value of the field shall be set to 0xFFFF. if (expectedCrc != 0xFFFF) { onInvalidDataReceived(device, data); return; } } @SuppressLint("WrongConstant") final int type = typeAndSampleLocation & 0x0F; // least significant nibble final int sampleLocation = typeAndSampleLocation >> 4; // most significant nibble onContinuousGlucoseMonitorFeaturesReceived(device, features, type, sampleLocation, features.e2eCrcSupported); }
@Test public void onContinuousGlucoseMeasurementFeaturesReceived_full() { final DataReceivedCallback callback = new CGMFeatureDataCallback() { @Override public void onContinuousGlucoseMonitorFeaturesReceived(@NonNull final BluetoothDevice device, @NonNull final CGMFeatures features, final int type, final int sampleLocation, final boolean secured) { called = true; assertNotNull(features); assertFalse(features.calibrationSupported); assertTrue(features.patientHighLowAlertsSupported); assertTrue(features.hypoAlertsSupported); assertTrue(features.hyperAlertsSupported); assertFalse(features.rateOfIncreaseDecreaseAlertsSupported); assertTrue(features.deviceSpecificAlertSupported); assertTrue(features.sensorMalfunctionDetectionSupported); assertFalse(features.sensorTempHighLowDetectionSupported); assertFalse(features.sensorResultHighLowSupported); assertTrue(features.lowBatteryDetectionSupported); assertTrue(features.sensorTypeErrorDetectionSupported); assertTrue(features.generalDeviceFaultSupported); assertTrue(features.e2eCrcSupported); assertFalse(features.multipleBondSupported); assertFalse(features.multipleSessionsSupported); assertTrue(features.cgmTrendInfoSupported); assertTrue(features.cgmQualityInfoSupported); assertEquals("Type", TYPE_ARTERIAL_PLASMA, type); assertEquals("Sample Location", SAMPLE_LOCATION_FINGER, sampleLocation); assertTrue(secured); } @Override public void onContinuousGlucoseMonitorFeaturesReceivedWithCrcError(@NonNull final BluetoothDevice device, @NonNull final Data data) { assertEquals("Correct packet but invalid CRC reported", 1, 2); } @Override public void onInvalidDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { assertEquals("Correct packet but invalid data reported", 1, 2); } }; final MutableData data = new MutableData(new byte[6]); assertTrue(data.setValue(0b11001111001101110, Data.FORMAT_UINT24_LE, 0)); assertTrue(data.setValue(0x16, Data.FORMAT_UINT8, 3)); assertTrue(data.setValue(0xC18A, Data.FORMAT_UINT16_LE, 4)); called = false; //noinspection DataFlowIssue callback.onDataReceived(null, data); assertTrue(called); }
public void createPipe(CreatePipeStmt stmt) throws DdlException { try { lock.writeLock().lock(); Pair<Long, String> dbIdAndName = resolvePipeNameUnlock(stmt.getPipeName()); boolean existed = nameToId.containsKey(dbIdAndName); if (existed) { if (!stmt.isIfNotExists() && !stmt.isReplace()) { ErrorReport.reportSemanticException(ErrorCode.ERR_PIPE_EXISTS); } if (stmt.isIfNotExists()) { return; } else if (stmt.isReplace()) { LOG.info("Pipe {} already exist, replace it with a new one", stmt.getPipeName()); Pipe pipe = pipeMap.get(nameToId.get(dbIdAndName)); dropPipeImpl(pipe); } } // Add pipe long id = GlobalStateMgr.getCurrentState().getNextId(); Pipe pipe = Pipe.fromStatement(id, stmt); putPipe(pipe); repo.addPipe(pipe); } finally { lock.writeLock().unlock(); } }
@Test public void testInspectPipes() throws Exception { ConnectContext newCtx = UtFrameUtils.initCtxForNewPrivilege(UserIdentity.ROOT); newCtx.setDatabase(PIPE_TEST_DB); newCtx.setThreadLocalInfo(); createPipe("create pipe p_inspect as insert into tbl " + "select * from files('path'='fake://pipe', 'format'='parquet')"); String sql = "select inspect_all_pipes()"; String plan = UtFrameUtils.getFragmentPlan(newCtx, sql); Assert.assertTrue(plan.contains("name")); }
@Override public void moveTo(long position) throws IllegalArgumentException { if (position < 0 || length() < position) { throw new IllegalArgumentException("Position out of the bounds of the file!"); } fp = position; }
@Test public void reset() throws IOException { ss.moveTo(10); assertEquals(text.length - 10, ss.availableExact()); ss.reset(); assertEquals(text.length, ss.availableExact()); }
@VisibleForTesting synchronized List<RemoteNode> getLeastLoadedNodes() { long currTime = System.currentTimeMillis(); if ((currTime - lastCacheUpdateTime > cacheRefreshInterval) || (cachedNodes == null)) { cachedNodes = convertToRemoteNodes( this.nodeMonitor.selectLeastLoadedNodes(this.numNodes)); if (cachedNodes.size() > 0) { lastCacheUpdateTime = currTime; } } return cachedNodes; }
@Test(timeout = 600000) public void testContainerAutoUpdateContainer() throws Exception { rm.stop(); createAndStartRMWithAutoUpdateContainer(); MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService()); nm1.registerNode(); nm1.nodeHeartbeat(oppContainersStatus, true); OpportunisticContainerAllocatorAMService amservice = (OpportunisticContainerAllocatorAMService) rm .getApplicationMasterService(); MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm) .withAppName("app") .withUser("user") .withAcls(null) .withQueue("default") .withUnmanagedAM(false) .build(); RMApp app1 = MockRMAppSubmitter.submit(rm, data); MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); ResourceScheduler scheduler = rm.getResourceScheduler(); RMNode rmNode1 = rm.getRMContext().getRMNodes().get(nm1.getNodeId()); nm1.nodeHeartbeat(oppContainersStatus, true); GenericTestUtils.waitFor(() -> amservice.getLeastLoadedNodes().size() == 1, 10, 10 * 100); AllocateResponse allocateResponse = am1.allocate(Arrays.asList( ResourceRequest.newInstance(Priority.newInstance(1), "*", Resources.createResource(1 * GB), 2, true, null, ExecutionTypeRequest .newInstance(ExecutionType.OPPORTUNISTIC, true))), null); List<Container> allocatedContainers = allocateResponse.getAllocatedContainers(); allocatedContainers.addAll( am1.allocate(null, null).getAllocatedContainers()); Assert.assertEquals(2, allocatedContainers.size()); Container container = allocatedContainers.get(0); // Start Container in NM nm1.nodeHeartbeat(Arrays.asList(ContainerStatus .newInstance(container.getId(), ExecutionType.OPPORTUNISTIC, ContainerState.RUNNING, "", 0)), true); rm.drainEvents(); // Verify that container is actually running wrt the RM.. RMContainer rmContainer = ((CapacityScheduler) scheduler) .getApplicationAttempt(container.getId().getApplicationAttemptId()) .getRMContainer(container.getId()); Assert.assertEquals(RMContainerState.RUNNING, rmContainer.getState()); // Send Promotion req... this should result in update error // Since the container doesn't exist anymore.. allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList( UpdateContainerRequest.newInstance(0, container.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE, null, ExecutionType.GUARANTEED))); nm1.nodeHeartbeat(Arrays.asList(ContainerStatus .newInstance(container.getId(), ExecutionType.OPPORTUNISTIC, ContainerState.RUNNING, "", 0)), true); rm.drainEvents(); // Get the update response on next allocate allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>()); // Check the update response from YARNRM Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size()); UpdatedContainer uc = allocateResponse.getUpdatedContainers().get(0); Assert.assertEquals(container.getId(), uc.getContainer().getId()); Assert.assertEquals(ExecutionType.GUARANTEED, uc.getContainer().getExecutionType()); // Check that the container is updated in NM through NM heartbeat response NodeHeartbeatResponse response = nm1.nodeHeartbeat(true); Assert.assertEquals(1, response.getContainersToUpdate().size()); Container containersFromNM = response.getContainersToUpdate().get(0); Assert.assertEquals(container.getId(), containersFromNM.getId()); Assert.assertEquals(ExecutionType.GUARANTEED, containersFromNM.getExecutionType()); //Increase resources allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList( UpdateContainerRequest.newInstance(1, container.getId(), ContainerUpdateType.INCREASE_RESOURCE, Resources.createResource(2 * GB, 1), null))); response = nm1.nodeHeartbeat(Arrays.asList(ContainerStatus .newInstance(container.getId(), ExecutionType.GUARANTEED, ContainerState.RUNNING, "", 0)), true); rm.drainEvents(); if (allocateResponse.getUpdatedContainers().size() == 0) { allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>()); } Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size()); uc = allocateResponse.getUpdatedContainers().get(0); Assert.assertEquals(container.getId(), uc.getContainer().getId()); Assert.assertEquals(Resource.newInstance(2 * GB, 1), uc.getContainer().getResource()); rm.drainEvents(); // Check that the container resources are increased in // NM through NM heartbeat response if (response.getContainersToUpdate().size() == 0) { response = nm1.nodeHeartbeat(true); } Assert.assertEquals(1, response.getContainersToUpdate().size()); Assert.assertEquals(Resource.newInstance(2 * GB, 1), response.getContainersToUpdate().get(0).getResource()); //Decrease resources allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList( UpdateContainerRequest.newInstance(2, container.getId(), ContainerUpdateType.DECREASE_RESOURCE, Resources.createResource(1 * GB, 1), null))); Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size()); rm.drainEvents(); // Check that the container resources are decreased // in NM through NM heartbeat response response = nm1.nodeHeartbeat(true); Assert.assertEquals(1, response.getContainersToUpdate().size()); Assert.assertEquals(Resource.newInstance(1 * GB, 1), response.getContainersToUpdate().get(0).getResource()); nm1.nodeHeartbeat(oppContainersStatus, true); // DEMOTE the container allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList( UpdateContainerRequest.newInstance(3, container.getId(), ContainerUpdateType.DEMOTE_EXECUTION_TYPE, null, ExecutionType.OPPORTUNISTIC))); response = nm1.nodeHeartbeat(Arrays.asList(ContainerStatus .newInstance(container.getId(), ExecutionType.GUARANTEED, ContainerState.RUNNING, "", 0)), true); rm.drainEvents(); if (allocateResponse.getUpdatedContainers().size() == 0) { // Get the update response on next allocate allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>()); } // Check the update response from YARNRM Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size()); uc = allocateResponse.getUpdatedContainers().get(0); Assert.assertEquals(ExecutionType.OPPORTUNISTIC, uc.getContainer().getExecutionType()); // Check that the container is updated in NM through NM heartbeat response if (response.getContainersToUpdate().size() == 0) { response = nm1.nodeHeartbeat(oppContainersStatus, true); } Assert.assertEquals(1, response.getContainersToUpdate().size()); Assert.assertEquals(ExecutionType.OPPORTUNISTIC, response.getContainersToUpdate().get(0).getExecutionType()); }
@Override public String getName() { return "CPE Analyzer"; }
@Test public void testGetName() { CPEAnalyzer instance = new CPEAnalyzer(); String expResult = "CPE Analyzer"; String result = instance.getName(); assertEquals(expResult, result); }
String substituteParametersInSqlString(String sql, SqlParameterSource paramSource) { ParsedSql parsedSql = NamedParameterUtils.parseSqlStatement(sql); List<SqlParameter> declaredParams = NamedParameterUtils.buildSqlParameterList(parsedSql, paramSource); if (declaredParams.isEmpty()) { return sql; } for (SqlParameter parSQL: declaredParams) { String paramName = parSQL.getName(); if (!paramSource.hasValue(paramName)) { continue; } Object value = paramSource.getValue(paramName); if (value instanceof SqlParameterValue) { value = ((SqlParameterValue)value).getValue(); } if (!(value instanceof Iterable)) { String ValueForSQLQuery = getValueForSQLQuery(value); sql = sql.replace(":" + paramName, ValueForSQLQuery); continue; } //Iterable int count = 0; String valueArrayStr = ""; for (Object valueTemp: (Iterable)value) { if (count > 0) { valueArrayStr+=", "; } String valueForSQLQuery = getValueForSQLQuery(valueTemp); valueArrayStr += valueForSQLQuery; ++count; } sql = sql.replace(":" + paramName, valueArrayStr); } return sql; }
@Test public void substituteParametersInSqlString_UuidType() { UUID guid = UUID.randomUUID(); String sql = "Select * from Table Where guid = :guid"; String sqlToUse = "Select * from Table Where guid = '" + guid + "'"; ctx.addUuidParameter("guid", guid); String sqlToUseResult = queryLog.substituteParametersInSqlString(sql, ctx); assertEquals(sqlToUse, sqlToUseResult); }
public StatementExecutorResponse execute( final ConfiguredStatement<? extends Statement> statement, final KsqlExecutionContext executionContext, final KsqlSecurityContext securityContext ) { final String commandRunnerWarningString = commandRunnerWarning.get(); if (!commandRunnerWarningString.equals("")) { throw new KsqlServerException("Failed to handle Ksql Statement." + System.lineSeparator() + commandRunnerWarningString); } final InjectorWithSideEffects injector = InjectorWithSideEffects.wrap( injectorFactory.apply(executionContext, securityContext.getServiceContext())); final ConfiguredStatementWithSideEffects<?> injectedWithSideEffects = injector.injectWithSideEffects(statement); try { return executeInjected( injectedWithSideEffects.getStatement(), statement, executionContext, securityContext); } catch (Exception e) { injector.revertSideEffects(injectedWithSideEffects); throw e; } }
@Test public void shouldThrowExceptionWhenInsertIntoSourceWithHeaders() { // Given final PreparedStatement<Statement> preparedStatement = PreparedStatement.of("", new InsertInto(SourceName.of("s1"), mock(Query.class))); final ConfiguredStatement<Statement> configured = ConfiguredStatement.of(preparedStatement, SessionConfig.of(KSQL_CONFIG, ImmutableMap.of()) ); final DataSource dataSource = mock(DataSource.class); final LogicalSchema schema = mock(LogicalSchema.class); doReturn(dataSource).when(metaStore).getSource(SourceName.of("s1")); doReturn(schema).when(dataSource).getSchema(); doReturn(ImmutableList.of(ColumnName.of("a"))).when(schema).headers(); when(dataSource.getKafkaTopicName()).thenReturn("topic"); // When: final Exception e = assertThrows( KsqlException.class, () -> distributor.execute(configured, executionContext, mock(KsqlSecurityContext.class)) ); // Then: assertThat(e.getMessage(), is("Cannot insert into s1 because it has header columns")); }
protected static Number findSumPattern(BigDecimal[] numbers) { if ( numbers == null || numbers.length < MIN_NUMBER_OF_RESTRICTIONS ) { return null; } BigDecimal gap; Number missingNumber = null; BigDecimal a = numbers[0]; BigDecimal b = numbers[1]; BigDecimal c = numbers[2]; BigDecimal d = numbers[3]; // Uses first four numbers to check if there is a pattern and to // calculate the gap between them. One missing value is allowed. if ( b.subtract( a ).equals( c.subtract( b ) ) ) { gap = b.subtract( a ); } else if ( c.subtract( b ).equals( d.subtract( c ) ) ) { gap = c.subtract( b ); } else if ( b.subtract( a ).equals( d.subtract( c ) ) ) { gap = b.subtract( a ); } else { // No pattern found. return null; } for ( int i = 0; i < (numbers.length - 1); i++ ) { BigDecimal first = numbers[i]; BigDecimal second = numbers[i + 1]; if (!second.subtract( first ).equals( gap )) { if (missingNumber == null) { missingNumber = second.subtract(gap); } else { // Happends if there is no pattern found, or more than 1 // missing number. return null; } } } return missingNumber; }
@Test void testfindSumPattern() { // Sum +2 missing number 4 assertThat(FindMissingNumber.findSumPattern( new BigDecimal[]{BigDecimal.valueOf(2), BigDecimal.valueOf(6), BigDecimal.valueOf(8), BigDecimal.valueOf(10)}).doubleValue() == 4).isTrue(); // +10 missing number 50 assertThat(FindMissingNumber.findSumPattern( new BigDecimal[]{BigDecimal.valueOf(10), BigDecimal.valueOf(20), BigDecimal.valueOf(30), BigDecimal.valueOf(40), BigDecimal.valueOf(60), BigDecimal.valueOf(70)}).doubleValue() == 50).isTrue(); // +66 missing number 308 assertThat(FindMissingNumber.findSumPattern( new BigDecimal[]{BigDecimal.valueOf(110), BigDecimal.valueOf(176), BigDecimal.valueOf(242), BigDecimal.valueOf(374)}).doubleValue() == 308).isTrue(); // Deduction -2 missing number 8 assertThat(FindMissingNumber.findSumPattern( new BigDecimal[]{BigDecimal.valueOf(10), BigDecimal.valueOf(6), BigDecimal.valueOf(4), BigDecimal.valueOf(2)}).doubleValue() == 8).isTrue(); // -337 missing number -11 assertThat(FindMissingNumber.findSumPattern( new BigDecimal[]{BigDecimal.valueOf(663), BigDecimal.valueOf(326), BigDecimal.valueOf(-348), BigDecimal.valueOf(-685)}).doubleValue() == -11).isTrue(); // -31 missing number 4350 assertThat(FindMissingNumber.findSumPattern( new BigDecimal[]{BigDecimal.valueOf(4443), BigDecimal.valueOf(4412), BigDecimal.valueOf(4381), BigDecimal.valueOf(4319)}).doubleValue() == 4350).isTrue(); // Not valid // Not in pattern. assertThat(FindMissingNumber.findSumPattern(new BigDecimal[]{ BigDecimal.valueOf(1), BigDecimal.valueOf(2), BigDecimal.valueOf(4), BigDecimal.valueOf(6), BigDecimal.valueOf(8), BigDecimal.valueOf(11)}) == null).isTrue(); assertThat(FindMissingNumber.findSumPattern(new BigDecimal[]{ BigDecimal.valueOf(5), BigDecimal.valueOf(3), BigDecimal.valueOf(54353), BigDecimal.valueOf(54554), BigDecimal.valueOf(232), BigDecimal.valueOf(123)}) == null).isTrue(); // No missing values. assertThat(FindMissingNumber.findSumPattern(new BigDecimal[]{ BigDecimal.valueOf(2), BigDecimal.valueOf(4), BigDecimal.valueOf(6), BigDecimal.valueOf(8), BigDecimal.valueOf(10), BigDecimal.valueOf(12), BigDecimal.valueOf(14)}) == null).isTrue(); assertThat(FindMissingNumber.findSumPattern(new BigDecimal[]{ BigDecimal.valueOf(10), BigDecimal.valueOf(20), BigDecimal.valueOf(30), BigDecimal.valueOf(40), BigDecimal.valueOf(50), BigDecimal.valueOf(60)}) == null).isTrue(); assertThat(FindMissingNumber.findSumPattern(new BigDecimal[]{ BigDecimal.valueOf(-15), BigDecimal.valueOf(-10), BigDecimal.valueOf(-5), BigDecimal.valueOf(0), BigDecimal.valueOf(5), BigDecimal.valueOf(10), BigDecimal.valueOf(15)}) == null).isTrue(); // Under 4 values always returns null. assertThat(FindMissingNumber.findSumPattern(new BigDecimal[]{ BigDecimal.valueOf(2), BigDecimal.valueOf(4), BigDecimal.valueOf(6)}) == null).isTrue(); assertThat(FindMissingNumber.findSumPattern(new BigDecimal[]{ BigDecimal.valueOf(2), BigDecimal.valueOf(4)}) == null).isTrue(); assertThat(FindMissingNumber.findSumPattern(new BigDecimal[]{BigDecimal .valueOf(2)}) == null).isTrue(); }
@VisibleForTesting Map<ExecutionVertexID, Collection<ExecutionAttemptID>> findSlowTasks( final ExecutionGraph executionGraph) { final long currentTimeMillis = System.currentTimeMillis(); final Map<ExecutionVertexID, Collection<ExecutionAttemptID>> slowTasks = new HashMap<>(); final List<ExecutionJobVertex> jobVerticesToCheck = getJobVerticesToCheck(executionGraph); for (ExecutionJobVertex ejv : jobVerticesToCheck) { final ExecutionTimeWithInputBytes baseline = getBaseline(ejv, currentTimeMillis); for (ExecutionVertex ev : ejv.getTaskVertices()) { if (ev.getExecutionState().isTerminal()) { continue; } final List<ExecutionAttemptID> slowExecutions = findExecutionsExceedingBaseline( ev.getCurrentExecutions(), baseline, currentTimeMillis); if (!slowExecutions.isEmpty()) { slowTasks.put(ev.getID(), slowExecutions); } } } return slowTasks; }
@Test void testBalancedInput() throws Exception { final int parallelism = 3; final JobVertex jobVertex1 = createNoOpVertex(parallelism); final JobVertex jobVertex2 = createNoOpVertex(parallelism); jobVertex2.connectNewDataSetAsInput( jobVertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED); final ExecutionGraph executionGraph = createExecutionGraph(jobVertex1, jobVertex2); final ExecutionTimeBasedSlowTaskDetector slowTaskDetector = createSlowTaskDetector(0.3, 1, 0); final ExecutionVertex ev21 = executionGraph.getJobVertex(jobVertex2.getID()).getTaskVertices()[0]; ev21.setInputBytes(1024); final ExecutionVertex ev22 = executionGraph.getJobVertex(jobVertex2.getID()).getTaskVertices()[1]; ev22.setInputBytes(1024); final ExecutionVertex ev23 = executionGraph.getJobVertex(jobVertex2.getID()).getTaskVertices()[2]; ev23.setInputBytes(1024); ev23.getCurrentExecutionAttempt().markFinished(); final Map<ExecutionVertexID, Collection<ExecutionAttemptID>> slowTasks = slowTaskDetector.findSlowTasks(executionGraph); assertThat(slowTasks).hasSize(2); }
@VisibleForTesting @SuppressWarnings("unchecked") TimelineDoc applyUpdatesOnPrevDoc(CollectionType collectionType, TimelineDoc timelineDoc, StringBuilder eTagStrBuilder) { TimelineDoc prevDocument = fetchLatestDoc(collectionType, timelineDoc.getId(), eTagStrBuilder); if (prevDocument != null) { prevDocument.merge(timelineDoc); timelineDoc = prevDocument; } return timelineDoc; }
@SuppressWarnings("unchecked") @Test public void applyingUpdatesOnPrevDocTest() throws IOException { MockedCosmosDBDocumentStoreWriter documentStoreWriter = new MockedCosmosDBDocumentStoreWriter(null); TimelineEntityDocument actualEntityDoc = new TimelineEntityDocument(); TimelineEntityDocument expectedEntityDoc = DocumentStoreTestUtils.bakeTimelineEntityDoc(); Assert.assertEquals(1, actualEntityDoc.getInfo().size()); Assert.assertEquals(0, actualEntityDoc.getMetrics().size()); Assert.assertEquals(0, actualEntityDoc.getEvents().size()); Assert.assertEquals(0, actualEntityDoc.getConfigs().size()); Assert.assertEquals(0, actualEntityDoc.getIsRelatedToEntities().size()); Assert.assertEquals(0, actualEntityDoc. getRelatesToEntities().size()); actualEntityDoc = (TimelineEntityDocument) documentStoreWriter .applyUpdatesOnPrevDoc(CollectionType.ENTITY, actualEntityDoc, null); Assert.assertEquals(expectedEntityDoc.getInfo().size(), actualEntityDoc.getInfo().size()); Assert.assertEquals(expectedEntityDoc.getMetrics().size(), actualEntityDoc.getMetrics().size()); Assert.assertEquals(expectedEntityDoc.getEvents().size(), actualEntityDoc.getEvents().size()); Assert.assertEquals(expectedEntityDoc.getConfigs().size(), actualEntityDoc.getConfigs().size()); Assert.assertEquals(expectedEntityDoc.getRelatesToEntities().size(), actualEntityDoc.getIsRelatedToEntities().size()); Assert.assertEquals(expectedEntityDoc.getRelatesToEntities().size(), actualEntityDoc.getRelatesToEntities().size()); }
static boolean isValidNodeIdRange(String range) { return NODE_ID_RANGE_PATTERN.matcher(range).matches(); }
@Test public void testRangeValidation() { VALID_RANGES.forEach(range -> { assertThat(NodeIdRange.isValidNodeIdRange(range), is(true)); }); INVALID_RANGES.forEach(range -> { assertThat(NodeIdRange.isValidNodeIdRange(range), is(false)); }); }
public SubscriptionStatsImpl add(SubscriptionStatsImpl stats) { Objects.requireNonNull(stats); this.msgRateOut += stats.msgRateOut; this.msgThroughputOut += stats.msgThroughputOut; this.bytesOutCounter += stats.bytesOutCounter; this.msgOutCounter += stats.msgOutCounter; this.msgRateRedeliver += stats.msgRateRedeliver; this.messageAckRate += stats.messageAckRate; this.chunkedMessageRate += stats.chunkedMessageRate; this.msgBacklog += stats.msgBacklog; this.backlogSize += stats.backlogSize; this.msgBacklogNoDelayed += stats.msgBacklogNoDelayed; this.msgDelayed += stats.msgDelayed; this.unackedMessages += stats.unackedMessages; this.type = stats.type; this.msgRateExpired += stats.msgRateExpired; this.totalMsgExpired += stats.totalMsgExpired; this.isReplicated |= stats.isReplicated; this.isDurable |= stats.isDurable; if (this.consumers.size() != stats.consumers.size()) { for (int i = 0; i < stats.consumers.size(); i++) { ConsumerStatsImpl consumerStats = new ConsumerStatsImpl(); this.consumers.add(consumerStats.add(stats.consumers.get(i))); } } else { for (int i = 0; i < stats.consumers.size(); i++) { this.consumers.get(i).add(stats.consumers.get(i)); } } this.allowOutOfOrderDelivery |= stats.allowOutOfOrderDelivery; this.consumersAfterMarkDeletePosition.putAll(stats.consumersAfterMarkDeletePosition); this.nonContiguousDeletedMessagesRanges += stats.nonContiguousDeletedMessagesRanges; this.nonContiguousDeletedMessagesRangesSerializedSize += stats.nonContiguousDeletedMessagesRangesSerializedSize; if (this.earliestMsgPublishTimeInBacklog != 0 && stats.earliestMsgPublishTimeInBacklog != 0) { this.earliestMsgPublishTimeInBacklog = Math.min( this.earliestMsgPublishTimeInBacklog, stats.earliestMsgPublishTimeInBacklog ); } else { this.earliestMsgPublishTimeInBacklog = Math.max( this.earliestMsgPublishTimeInBacklog, stats.earliestMsgPublishTimeInBacklog ); } this.delayedMessageIndexSizeInBytes += stats.delayedMessageIndexSizeInBytes; this.subscriptionProperties.putAll(stats.subscriptionProperties); this.filterProcessedMsgCount += stats.filterProcessedMsgCount; this.filterAcceptedMsgCount += stats.filterAcceptedMsgCount; this.filterRejectedMsgCount += stats.filterRejectedMsgCount; this.filterRescheduledMsgCount += stats.filterRescheduledMsgCount; stats.bucketDelayedIndexStats.forEach((k, v) -> { TopicMetricBean topicMetricBean = this.bucketDelayedIndexStats.computeIfAbsent(k, __ -> new TopicMetricBean()); topicMetricBean.name = v.name; topicMetricBean.labelsAndValues = v.labelsAndValues; topicMetricBean.value += v.value; }); return this; }
@Test public void testAdd_EarliestMsgPublishTimeInBacklogs_Second0() { SubscriptionStatsImpl stats1 = new SubscriptionStatsImpl(); stats1.earliestMsgPublishTimeInBacklog = 10L; SubscriptionStatsImpl stats2 = new SubscriptionStatsImpl(); stats2.earliestMsgPublishTimeInBacklog = 0L; SubscriptionStatsImpl aggregate = stats1.add(stats2); assertEquals(aggregate.earliestMsgPublishTimeInBacklog, 10L); }
@Override public ConfigErrors errors() { return errors; }
@Test public void shouldValidatePipelineLabelWithNonExistingMaterial() { String labelFormat = "pipeline-${COUNT}-${NoSuchMaterial}"; PipelineConfig pipelineConfig = createAndValidatePipelineLabel(labelFormat); assertThat(pipelineConfig.errors().on(PipelineConfig.LABEL_TEMPLATE), startsWith("You have defined a label template in pipeline")); }
public RuntimeOptionsBuilder parse(Map<String, String> properties) { return parse(properties::get); }
@Test void should_parse_features() { properties.put(Constants.FEATURES_PROPERTY_NAME, "classpath:com/example.feature"); RuntimeOptions options = cucumberPropertiesParser.parse(properties).build(); assertThat(options.getFeaturePaths(), contains( URI.create("classpath:com/example.feature"))); }
@Override public List<byte[]> clusterGetKeysInSlot(int slot, Integer count) { RFuture<List<byte[]>> f = executorService.readAsync((String)null, ByteArrayCodec.INSTANCE, CLUSTER_GETKEYSINSLOT, slot, count); return syncFuture(f); }
@Test public void testClusterGetKeysInSlot() { List<byte[]> keys = connection.clusterGetKeysInSlot(12, 10); assertThat(keys).isEmpty(); }
@Bean @ConfigurationProperties(prefix = "shenyu.register") public ShenyuRegisterCenterConfig shenyuRegisterCenterConfig() { return new ShenyuRegisterCenterConfig(); }
@Test public void testShenyuRegisterCenterConfig() { MockedStatic<RegisterUtils> registerUtilsMockedStatic = mockStatic(RegisterUtils.class); registerUtilsMockedStatic.when(() -> RegisterUtils.doLogin(any(), any(), any())).thenReturn(Optional.ofNullable("token")); applicationContextRunner.run(context -> { ShenyuRegisterCenterConfig config = context.getBean("shenyuRegisterCenterConfig", ShenyuRegisterCenterConfig.class); assertNotNull(config); assertThat(config.getRegisterType()).isEqualTo(RegisterTypeEnum.HTTP.getName()); }); registerUtilsMockedStatic.close(); }
@Override boolean isCacheable() { return true; }
@Test public void isCacheable() { // GIVEN ExtractorGetter getter = new ExtractorGetter(UNUSED, mock(ValueExtractor.class), "argument"); // THEN assertThat(getter.isCacheable()).isTrue(); }
public RolesConfig allRoles() { return new RolesConfig(this.toArray(new Role[0])); }
@Test public void allRolesShouldReturnAllRoles() { Role admin = new RoleConfig(new CaseInsensitiveString("admin")); Role view = new RoleConfig(new CaseInsensitiveString("view")); Role blackbird = new PluginRoleConfig("blackbird", "foo"); Role spacetiger = new PluginRoleConfig("spacetiger", "foo"); RolesConfig rolesConfig = new RolesConfig(admin, blackbird, view, spacetiger); List<Role> roles = rolesConfig.allRoles(); assertThat(roles, hasSize(4)); assertThat(roles, contains(admin, blackbird, view, spacetiger)); }
@Override public Optional<ShardingConditionValue> generate(final BinaryOperationExpression predicate, final Column column, final List<Object> params, final TimestampServiceRule timestampServiceRule) { String operator = predicate.getOperator().toUpperCase(); if (!isSupportedOperator(operator)) { return Optional.empty(); } ExpressionSegment valueExpression = predicate.getLeft() instanceof ColumnSegment ? predicate.getRight() : predicate.getLeft(); ConditionValue conditionValue = new ConditionValue(valueExpression, params); if (conditionValue.isNull()) { return generate(null, column, operator, conditionValue.getParameterMarkerIndex().orElse(-1)); } Optional<Comparable<?>> value = conditionValue.getValue(); if (value.isPresent()) { return generate(value.get(), column, operator, conditionValue.getParameterMarkerIndex().orElse(-1)); } if (ExpressionConditionUtils.isNowExpression(valueExpression)) { return generate(timestampServiceRule.getTimestamp(), column, operator, -1); } return Optional.empty(); }
@SuppressWarnings("unchecked") @Test void assertGenerateConditionValueWithLessThanOperator() { BinaryOperationExpression rightValue = new BinaryOperationExpression(0, 0, mock(ColumnSegment.class), new LiteralExpressionSegment(0, 0, 1), "<", null); Optional<ShardingConditionValue> shardingConditionValue = generator.generate(rightValue, column, new LinkedList<>(), mock(TimestampServiceRule.class)); assertTrue(shardingConditionValue.isPresent()); assertTrue(Range.lessThan(1).encloses(((RangeShardingConditionValue<Integer>) shardingConditionValue.get()).getValueRange())); assertTrue(shardingConditionValue.get().getParameterMarkerIndexes().isEmpty()); }
@Override public int choosePartition(Message<?> msg, TopicMetadata topicMetadata) { // If the message has a key, it supersedes the round robin routing policy if (msg.hasKey()) { return signSafeMod(hash.makeHash(msg.getKey()), topicMetadata.numPartitions()); } if (isBatchingEnabled) { // if batching is enabled, choose partition on `partitionSwitchMs` boundary. long currentMs = clock.millis(); return signSafeMod(currentMs / partitionSwitchMs + startPtnIdx, topicMetadata.numPartitions()); } else { return signSafeMod(PARTITION_INDEX_UPDATER.getAndIncrement(this), topicMetadata.numPartitions()); } }
@Test public void testChoosePartitionWithNegativeTime() { Message<?> msg = mock(Message.class); when(msg.getKey()).thenReturn(null); // Fake clock, simulate timestamp that resolves into a negative Integer value Clock clock = mock(Clock.class); when(clock.millis()).thenReturn((long) Integer.MAX_VALUE); RoundRobinPartitionMessageRouterImpl router = new RoundRobinPartitionMessageRouterImpl( HashingScheme.JavaStringHash, 3, true, 5, clock); int idx = router.choosePartition(msg, new TopicMetadataImpl(5)); assertTrue(idx >= 0); assertTrue(idx < 5); }
@Override public DescribeFeaturesResult describeFeatures(final DescribeFeaturesOptions options) { final KafkaFutureImpl<FeatureMetadata> future = new KafkaFutureImpl<>(); final long now = time.milliseconds(); final Call call = new Call( "describeFeatures", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedBrokerOrActiveKController()) { private FeatureMetadata createFeatureMetadata(final ApiVersionsResponse response) { final Map<String, FinalizedVersionRange> finalizedFeatures = new HashMap<>(); for (final FinalizedFeatureKey key : response.data().finalizedFeatures().valuesSet()) { finalizedFeatures.put(key.name(), new FinalizedVersionRange(key.minVersionLevel(), key.maxVersionLevel())); } Optional<Long> finalizedFeaturesEpoch; if (response.data().finalizedFeaturesEpoch() >= 0L) { finalizedFeaturesEpoch = Optional.of(response.data().finalizedFeaturesEpoch()); } else { finalizedFeaturesEpoch = Optional.empty(); } final Map<String, SupportedVersionRange> supportedFeatures = new HashMap<>(); for (final SupportedFeatureKey key : response.data().supportedFeatures().valuesSet()) { supportedFeatures.put(key.name(), new SupportedVersionRange(key.minVersion(), key.maxVersion())); } return new FeatureMetadata(finalizedFeatures, finalizedFeaturesEpoch, supportedFeatures); } @Override ApiVersionsRequest.Builder createRequest(int timeoutMs) { return new ApiVersionsRequest.Builder(); } @Override void handleResponse(AbstractResponse response) { final ApiVersionsResponse apiVersionsResponse = (ApiVersionsResponse) response; if (apiVersionsResponse.data().errorCode() == Errors.NONE.code()) { future.complete(createFeatureMetadata(apiVersionsResponse)); } else { future.completeExceptionally(Errors.forCode(apiVersionsResponse.data().errorCode()).exception()); } } @Override void handleFailure(Throwable throwable) { completeAllExceptionally(Collections.singletonList(future), throwable); } }; runnable.call(call, now); return new DescribeFeaturesResult(future); }
@Test public void testDescribeFeaturesFailure() { try (final AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().prepareResponse( body -> body instanceof ApiVersionsRequest, prepareApiVersionsResponseForDescribeFeatures(Errors.INVALID_REQUEST)); final DescribeFeaturesOptions options = new DescribeFeaturesOptions(); options.timeoutMs(10000); final KafkaFuture<FeatureMetadata> future = env.adminClient().describeFeatures(options).featureMetadata(); final ExecutionException e = assertThrows(ExecutionException.class, future::get); assertEquals(e.getCause().getClass(), Errors.INVALID_REQUEST.exception().getClass()); } }
@Override protected ExecuteContext doBefore(ExecuteContext context) { checkState(context, null); return context; }
@Test public void doBefore() throws Exception { REGISTER_CONFIG.setEnableSpringRegister(true); REGISTER_CONFIG.setOpenMigration(true); RegisterDynamicConfig.INSTANCE.setClose(false); final ExecuteContext context = interceptor.before(buildContext()); Assert.assertFalse(context.isSkip()); RegisterDynamicConfig.INSTANCE.setClose(true); final ExecuteContext openContext = interceptor.doBefore(buildContext()); Assert.assertTrue(openContext.isSkip()); RegisterDynamicConfig.INSTANCE.setClose(false); REGISTER_CONFIG.setEnableSpringRegister(false); REGISTER_CONFIG.setOpenMigration(false); }
public static void replaceNonWordChars(StringValue string, char replacement) { final char[] chars = string.getCharArray(); final int len = string.length(); for (int i = 0; i < len; i++) { final char c = chars[i]; if (!(Character.isLetter(c) || Character.isDigit(c) || c == '_')) { chars[i] = replacement; } } }
@Test void testReplaceNonWordChars() { StringValue testString = new StringValue("TEST123_@"); StringValueUtils.replaceNonWordChars(testString, '!'); assertThat((Object) testString).isEqualTo(new StringValue("TEST123_!")); }
public PluginManager getPluginManager() { if (pluginManager != null) { return pluginManager; } throw new IllegalStateException("PluginManager reference is not set"); }
@Test public void testGetPluginManager() throws Exception { PluginManagerReference reference = PluginManagerReference.reference(); try { reference.getPluginManager(); fail("should throw exception"); } catch (IllegalStateException ignored) { } PluginManager mockManager = mock(PluginManager.class); reference.setPluginManager(mockManager); assertThat(reference.getPluginManager()).isEqualTo(mockManager); }
public String render(File templateFile) throws IOException { String template = FileUtils.readFileToString(templateFile, Charset.defaultCharset()); return render(template); }
@Test void testRender() { // given template variables K8sSpecTemplate template = new K8sSpecTemplate(); template.put("name", "world"); // when String spec = template.render("Hello {{name}}"); // then assertEquals("Hello world", spec); }
public static String prettyHex(byte[] data, int offset, int length) { if (length == 0) return ""; final StringBuilder sb = new StringBuilder(length * 3 - 1); sb.append(String.format("%02X", data[offset])); for (int i = 1; i < length; i++) { sb.append(String.format(" %02X", data[offset + i])); } return sb.toString(); }
@Test public void prettyHexString() { assertEquals("CA FE BA BE", ByteArrayUtils.prettyHex(new byte[] { -54, -2, -70, -66})); }
public OffsetAndMetadata findNextCommitOffset(final String commitMetadata) { boolean found = false; long currOffset; long nextCommitOffset = committedOffset; for (KafkaSpoutMessageId currAckedMsg : ackedMsgs) { // complexity is that of a linear scan on a TreeMap currOffset = currAckedMsg.offset(); if (currOffset == nextCommitOffset) { // found the next offset to commit found = true; nextCommitOffset = currOffset + 1; } else if (currOffset > nextCommitOffset) { if (emittedOffsets.contains(nextCommitOffset)) { LOG.debug("topic-partition [{}] has non-sequential offset [{}]." + " It will be processed in a subsequent batch.", tp, currOffset); break; } else { /* This case will arise in case of non-sequential offset being processed. So, if the topic doesn't contain offset = nextCommitOffset (possible if the topic is compacted or deleted), the consumer should jump to the next logical point in the topic. Next logical offset should be the first element after nextCommitOffset in the ascending ordered emitted set. */ LOG.debug("Processed non-sequential offset." + " The earliest uncommitted offset is no longer part of the topic." + " Missing offset: [{}], Processed: [{}]", nextCommitOffset, currOffset); final Long nextEmittedOffset = emittedOffsets.ceiling(nextCommitOffset); if (nextEmittedOffset != null && currOffset == nextEmittedOffset) { LOG.debug("Found committable offset: [{}] after missing offset: [{}], skipping to the committable offset", currOffset, nextCommitOffset); found = true; nextCommitOffset = currOffset + 1; } else { LOG.debug("Topic-partition [{}] has non-sequential offset [{}]." + " Next offset to commit should be [{}]", tp, currOffset, nextCommitOffset); break; } } } else { throw new IllegalStateException("The offset [" + currOffset + "] is below the current nextCommitOffset " + "[" + nextCommitOffset + "] for [" + tp + "]." + " This should not be possible, and likely indicates a bug in the spout's acking or emit logic."); } } OffsetAndMetadata nextCommitOffsetAndMetadata = null; if (found) { nextCommitOffsetAndMetadata = new OffsetAndMetadata(nextCommitOffset, commitMetadata); LOG.debug("Topic-partition [{}] has offsets [{}-{}] ready to be committed." + " Processing will resume at offset [{}] upon spout restart", tp, committedOffset, nextCommitOffsetAndMetadata.offset() - 1, nextCommitOffsetAndMetadata.offset()); } else { LOG.debug("Topic-partition [{}] has no offsets ready to be committed", tp); } LOG.trace("{}", this); return nextCommitOffsetAndMetadata; }
@Test public void testFindNextCommittedOffsetWithNoAcks() { OffsetAndMetadata nextCommitOffset = manager.findNextCommitOffset(COMMIT_METADATA); assertThat("There shouldn't be a next commit offset when nothing has been acked", nextCommitOffset, is(nullValue())); }
@Override @SuppressWarnings("unchecked") public void upgrade() { if (clusterConfigService.get(MigrationCompleted.class) != null) { LOG.debug("Migration already done."); return; } final ImmutableSet.Builder<String> modifiedStreams = ImmutableSet.builder(); final ImmutableSet.Builder<String> modifiedAlertConditions = ImmutableSet.builder(); for (Document document : collection.find().sort(ascending(FIELD_CREATED_AT))) { final String streamId = document.getObjectId(FIELD_ID).toHexString(); if (!document.containsKey(FIELD_ALERT_CONDITIONS)) { continue; } final List<Document> alertConditions = (List<Document>) document.get(FIELD_ALERT_CONDITIONS); // Need to check if the following fields are integers: // // FieldContentValue: grace, backlog // FieldValue: grace, backlog, time, threshold // MessageCount: grace, backlog, time, threshold final Set<String> intFields = ImmutableSet.of("grace", "backlog", "time", "threshold"); for (Document alertCondition : alertConditions) { final String alertConditionId = alertCondition.get("id", String.class); final String alertConditionTitle = alertCondition.get("title", String.class); final Document parameters = alertCondition.get("parameters", Document.class); for (String field : intFields) { final Object fieldValue = parameters.get(field); // No need to convert anything if the field does not exist or is already an integer if (fieldValue == null || fieldValue instanceof Integer) { continue; } if (!(fieldValue instanceof String)) { LOG.warn("Field <{}> in alert condition <{}> ({}) of stream <{}> is not a string but a <{}>, not trying to convert it!", field, alertConditionId, alertConditionTitle, streamId, fieldValue.getClass().getCanonicalName()); continue; } final String stringValue = parameters.get(field, String.class); final Integer intValue = Ints.tryParse(stringValue); LOG.info("Converting value for field <{}> from string to integer in alert condition <{}> ({}) of stream <{}>", field, alertConditionId, alertConditionTitle, streamId); if (intValue == null) { LOG.error("Unable to parse \"{}\" into integer!", fieldValue); } final UpdateResult result = collection.updateOne(eq(FIELD_ALERT_CONDITIONS_ID, alertConditionId), set(ALERT_CONDITIONS_PARAMETERS_PREFIX + field, intValue)); // Use UpdateResult#getMatchedCount() instead of #getModifiedCount() to make it work on MongoDB 2.4 if (result.getMatchedCount() > 0) { modifiedStreams.add(streamId); modifiedAlertConditions.add(alertConditionId); } else { LOG.warn("No document modified for alert condition <{}> ({})", alertConditionId, alertConditionTitle); } } } } clusterConfigService.write(MigrationCompleted.create(modifiedStreams.build(), modifiedAlertConditions.build())); }
@Test public void upgradeWhenMigrationCompleted() throws Exception { clusterConfigService.write(MigrationCompleted.create(Collections.emptySet(), Collections.emptySet())); // Reset the spy to be able to verify that there wasn't a write reset(clusterConfigService); migration.upgrade(); verify(collection, never()).updateOne(any(), any(Bson.class)); verify(clusterConfigService, never()).write(any(MigrationCompleted.class)); }
public void offer(int handle) { if (handle == NO_VALUE) { throw new IllegalArgumentException("The NO_VALUE (" + NO_VALUE + ") cannot be added to the queue."); } size++; if (size == array.length) { // Grow queue capacity. array = Arrays.copyOf(array, 1 + (array.length - 1) * 2); } array[size] = handle; lift(size); }
@Test public void mustThrowWhenAddingNoValue() { final IntPriorityQueue pq = new IntPriorityQueue(); assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() { pq.offer(IntPriorityQueue.NO_VALUE); } }); }
public Set<CoLocationGroup> getCoLocationGroups() { final Set<CoLocationGroup> coLocationGroups = IterableUtils.toStream(getVertices()) .map(JobVertex::getCoLocationGroup) .filter(Objects::nonNull) .collect(Collectors.toSet()); return Collections.unmodifiableSet(coLocationGroups); }
@Test public void testGetCoLocationGroups() { final JobVertex v1 = new JobVertex("1"); final JobVertex v2 = new JobVertex("2"); final JobVertex v3 = new JobVertex("3"); final JobVertex v4 = new JobVertex("4"); final SlotSharingGroup slotSharingGroup = new SlotSharingGroup(); v1.setSlotSharingGroup(slotSharingGroup); v2.setSlotSharingGroup(slotSharingGroup); v1.setStrictlyCoLocatedWith(v2); final JobGraph jobGraph = JobGraphBuilder.newStreamingJobGraphBuilder() .addJobVertices(Arrays.asList(v1, v2, v3, v4)) .build(); assertThat(jobGraph.getCoLocationGroups(), hasSize(1)); final CoLocationGroup onlyCoLocationGroup = jobGraph.getCoLocationGroups().iterator().next(); assertThat(onlyCoLocationGroup.getVertexIds(), containsInAnyOrder(v1.getID(), v2.getID())); }
public static String nameHash(String ensName) { String normalisedEnsName = normalise(ensName); return Numeric.toHexString(nameHash(normalisedEnsName.split("\\."))); }
@Test public void testNameHash() { assertEquals( nameHash(""), ("0x0000000000000000000000000000000000000000000000000000000000000000")); assertEquals( nameHash("eth"), ("0x93cdeb708b7545dc668eb9280176169d1c33cfd8ed6f04690a0bcc88a93fc4ae")); assertEquals( nameHash("foo.eth"), ("0xde9b09fd7c5f901e23a3f19fecc54828e9c848539801e86591bd9801b019f84f")); assertEquals( nameHash("\uD83D\uDC8E.gmcafe.art"), ("0xf7de5954cda078ee481b14cff677e8066fe805a89b5c87b4a9b866338049b04a")); }
@Operation(summary = "Get single session") @GetMapping(value = "/iapi/saml/ad_sessions/{id}", produces = "application/json") @ResponseBody public AdSession getById(@PathVariable("id") String id) throws AdException { return adService.getAdSession(id); }
@Test public void getSessionById() throws AdException { when(adServiceMock.getAdSession(anyString())).thenReturn(adSession); AdSession result = sessionController.getById("httpSessionId"); verify(adServiceMock, times(1)).getAdSession("httpSessionId"); assertEquals(result.getSessionId(), adSession.getSessionId()); }
public static ThreadPoolExecutor newCachedThreadPool(int corePoolSize, int maximumPoolSize) { return new ThreadPoolExecutor(corePoolSize, maximumPoolSize, DateUtils.MILLISECONDS_PER_MINUTE, TimeUnit.MILLISECONDS, new SynchronousQueue<Runnable>()); }
@Test public void newCachedThreadPool2() throws Exception { BlockingQueue<Runnable> queue = new SynchronousQueue<Runnable>(); ThreadFactory factory = new NamedThreadFactory("xxx"); ThreadPoolExecutor executor = ThreadPoolUtils.newCachedThreadPool(10, 20, queue, factory); Assert.assertEquals(executor.getCorePoolSize(), 10); Assert.assertEquals(executor.getMaximumPoolSize(), 20); Assert.assertEquals(executor.getQueue(), queue); Assert.assertEquals(executor.getThreadFactory(), factory); }
public BundleProcessor getProcessor( BeamFnApi.ProcessBundleDescriptor descriptor, List<RemoteInputDestination> remoteInputDesinations) { checkState( !descriptor.hasStateApiServiceDescriptor(), "The %s cannot support a %s containing a state %s.", BundleProcessor.class.getSimpleName(), BeamFnApi.ProcessBundleDescriptor.class.getSimpleName(), Endpoints.ApiServiceDescriptor.class.getSimpleName()); return getProcessor(descriptor, remoteInputDesinations, NoOpStateDelegator.INSTANCE); }
@Test public void testNewBundleAndProcessElements() throws Exception { SdkHarnessClient client = harness.client(); BundleProcessor processor = client.getProcessor( descriptor, Collections.singletonList( RemoteInputDestination.of( (FullWindowedValueCoder) FullWindowedValueCoder.of(StringUtf8Coder.of(), Coder.INSTANCE), SDK_GRPC_READ_TRANSFORM))); Collection<WindowedValue<String>> outputs = new ArrayList<>(); try (RemoteBundle activeBundle = processor.newBundle( Collections.singletonMap( SDK_GRPC_WRITE_TRANSFORM, RemoteOutputReceiver.of( FullWindowedValueCoder.of( LengthPrefixCoder.of(StringUtf8Coder.of()), Coder.INSTANCE), outputs::add)), BundleProgressHandler.ignored())) { FnDataReceiver<WindowedValue<?>> bundleInputReceiver = Iterables.getOnlyElement(activeBundle.getInputReceivers().values()); bundleInputReceiver.accept(WindowedValue.valueInGlobalWindow("foo")); bundleInputReceiver.accept(WindowedValue.valueInGlobalWindow("bar")); bundleInputReceiver.accept(WindowedValue.valueInGlobalWindow("baz")); } // The bundle can be a simple function of some sort, but needs to be complete. assertThat( outputs, containsInAnyOrder( WindowedValue.valueInGlobalWindow("spam"), WindowedValue.valueInGlobalWindow("ham"), WindowedValue.valueInGlobalWindow("eggs"))); }
protected MessageOutput launchOutput(Output output, Stream stream) throws Exception { final MessageOutput messageOutput = messageOutputFactory.fromStreamOutput(output, stream, new org.graylog2.plugin.configuration.Configuration(output.getConfiguration())); if (messageOutput == null) { throw new IllegalArgumentException("Failed to instantiate MessageOutput from Output: " + output); } messageOutput.initialize(); return messageOutput; }
@Test(expected = IllegalArgumentException.class) public void testThrowExceptionForUnknownOutputType() throws Exception { registry.launchOutput(output, null); }
@Override public List<Namespace> listNamespaces(Namespace namespace) { SnowflakeIdentifier scope = NamespaceHelpers.toSnowflakeIdentifier(namespace); List<SnowflakeIdentifier> results; switch (scope.type()) { case ROOT: results = snowflakeClient.listDatabases(); break; case DATABASE: results = snowflakeClient.listSchemas(scope); break; default: throw new IllegalArgumentException( String.format( "listNamespaces must be at either ROOT or DATABASE level; got %s from namespace %s", scope, namespace)); } return results.stream().map(NamespaceHelpers::toIcebergNamespace).collect(Collectors.toList()); }
@Test public void testListNamespaceWithinSchema() { // No "sub-namespaces" beyond database.schema; invalid to try to list namespaces given // a database.schema. String dbName = "DB_3"; String schemaName = "SCHEMA_4"; assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> catalog.listNamespaces(Namespace.of(dbName, schemaName))) .withMessageContaining("level") .withMessageContaining("DB_3.SCHEMA_4"); }
public void resetCallbackClient(InetAddress pythonAddress, int pythonPort) { gateway.resetCallbackClient(pythonAddress, pythonPort); this.pythonPort = pythonPort; this.pythonAddress = pythonAddress; }
@Test public void testResetCallbackClient() { GatewayServer server = new GatewayServer(null, 0); server.start(true); try { Thread.sleep(250); } catch (Exception e) { } server.resetCallbackClient(server.getAddress(), GatewayServer.DEFAULT_PYTHON_PORT + 1); try { Thread.sleep(250); } catch (Exception e) { } int pythonPort = server.getPythonPort(); InetAddress pythonAddress = server.getPythonAddress(); assertEquals(pythonPort, GatewayServer.DEFAULT_PYTHON_PORT + 1); assertEquals(pythonAddress, server.getAddress()); server.shutdown(true); }
public void setAttribute(File file, String attribute, Object value, boolean create) { String view = getViewName(attribute); String attr = getSingleAttribute(attribute); setAttributeInternal(file, view, attr, value, create); }
@Test public void testSetAttribute() { File file = createFile(); service.setAttribute(file, "test:bar", 10L, false); assertThat(file.getAttribute("test", "bar")).isEqualTo(10L); service.setAttribute(file, "test:baz", 100, false); assertThat(file.getAttribute("test", "baz")).isEqualTo(100); }
public static boolean isNullOrEmpty(String s) { return s == null || s.isEmpty(); }
@Test void testIsNullOrEmpty() { assertThat(isNullOrEmpty(null)).isTrue(); assertThat(isNullOrEmpty("")).isTrue(); assertThat(isNullOrEmpty("bla")).isFalse(); }
@Override public LocalId toLocalId() { return this; }
@Test void toLocalId() { LocalComponentIdDrlSession LocalComponentIdDrlSession = new LocalComponentIdDrlSession(basePath, identifier); LocalId retrieved = LocalComponentIdDrlSession.toLocalId(); assertThat(retrieved).isEqualTo(LocalComponentIdDrlSession); }
@Override public boolean isWriteable(Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) { return typesMatch(type, genericType) && MoreMediaTypes.TEXT_CSV_TYPE.isCompatible(mediaType); }
@Test void isNotWritableForOtherClasses() { boolean isWritable = sut.isWriteable(Search.class, null, null, MoreMediaTypes.TEXT_CSV_TYPE); assertThat(isWritable).isFalse(); }
@Override public List<ConfigInfoWrapper> queryConfigInfoByNamespace(String tenantId) { if (Objects.isNull(tenantId)) { throw new IllegalArgumentException("tenantId can not be null"); } String tenantTmp = StringUtils.isBlank(tenantId) ? StringUtils.EMPTY : tenantId; ConfigInfoMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO); final String sql = configInfoMapper.select( Arrays.asList("data_id", "group_id", "tenant_id", "app_name", "type", "gmt_modified"), Collections.singletonList("tenant_id")); return databaseOperate.queryMany(sql, new Object[] {tenantTmp}, CONFIG_INFO_WRAPPER_ROW_MAPPER); }
@Test void testQueryConfigInfoByNamespace() { //mock select config state List<ConfigInfoWrapper> mockConfigs = new ArrayList<>(); mockConfigs.add(createMockConfigInfoWrapper(0)); mockConfigs.add(createMockConfigInfoWrapper(1)); mockConfigs.add(createMockConfigInfoWrapper(2)); String tenant = "tenant13245"; when(databaseOperate.queryMany(anyString(), eq(new Object[] {tenant}), eq(CONFIG_INFO_WRAPPER_ROW_MAPPER))).thenReturn(mockConfigs); //execute return mock obj List<ConfigInfoWrapper> configInfoWrappers = embeddedConfigInfoPersistService.queryConfigInfoByNamespace(tenant); //expect check assertEquals(mockConfigs, configInfoWrappers); }
@Override public OAuth2CodeDO createAuthorizationCode(Long userId, Integer userType, String clientId, List<String> scopes, String redirectUri, String state) { OAuth2CodeDO codeDO = new OAuth2CodeDO().setCode(generateCode()) .setUserId(userId).setUserType(userType) .setClientId(clientId).setScopes(scopes) .setExpiresTime(LocalDateTime.now().plusSeconds(TIMEOUT)) .setRedirectUri(redirectUri).setState(state); oauth2CodeMapper.insert(codeDO); return codeDO; }
@Test public void testCreateAuthorizationCode() { // 准备参数 Long userId = randomLongId(); Integer userType = RandomUtil.randomEle(UserTypeEnum.values()).getValue(); String clientId = randomString(); List<String> scopes = Lists.newArrayList("read", "write"); String redirectUri = randomString(); String state = randomString(); // 调用 OAuth2CodeDO codeDO = oauth2CodeService.createAuthorizationCode(userId, userType, clientId, scopes, redirectUri, state); // 断言 OAuth2CodeDO dbCodeDO = oauth2CodeMapper.selectByCode(codeDO.getCode()); assertPojoEquals(codeDO, dbCodeDO, "createTime", "updateTime", "deleted"); assertEquals(userId, codeDO.getUserId()); assertEquals(userType, codeDO.getUserType()); assertEquals(clientId, codeDO.getClientId()); assertEquals(scopes, codeDO.getScopes()); assertEquals(redirectUri, codeDO.getRedirectUri()); assertEquals(state, codeDO.getState()); assertFalse(DateUtils.isExpired(codeDO.getExpiresTime())); }
@Override public Result reconcile(Request request) { return client.fetch(Backup.class, request.name()) .map(backup -> { var metadata = backup.getMetadata(); var status = backup.getStatus(); var spec = backup.getSpec(); if (isDeleted(backup)) { if (removeFinalizers(metadata, Set.of(HOUSE_KEEPER_FINALIZER))) { migrationService.cleanup(backup).block(); client.update(backup); } return doNotRetry(); } if (addFinalizers(metadata, Set.of(HOUSE_KEEPER_FINALIZER))) { client.update(backup); } if (Phase.PENDING.equals(status.getPhase())) { // Do backup try { status.setPhase(Phase.RUNNING); status.setStartTimestamp(Instant.now(clock)); updateStatus(request.name(), status); // Long period execution when backing up migrationService.backup(backup).block(); status.setPhase(Phase.SUCCEEDED); status.setCompletionTimestamp(Instant.now(clock)); updateStatus(request.name(), status); } catch (Throwable t) { var unwrapped = Exceptions.unwrap(t); log.error("Failed to backup", unwrapped); // Only happen when shutting down status.setPhase(Phase.FAILED); if (unwrapped instanceof InterruptedException) { status.setFailureReason("Interrupted"); status.setFailureMessage("The backup process was interrupted."); } else { status.setFailureReason("SystemError"); status.setFailureMessage( "Something went wrong! Error message: " + unwrapped.getMessage()); } updateStatus(request.name(), status); } } // Only happen when failing to update status when interrupted if (Phase.RUNNING.equals(status.getPhase())) { status.setPhase(Phase.FAILED); status.setFailureReason("UnexpectedExit"); status.setFailureMessage("The backup process may exit abnormally."); updateStatus(request.name(), status); } // Check the expires at and requeue if necessary if (isTerminal(status.getPhase())) { var expiresAt = spec.getExpiresAt(); if (expiresAt != null) { var now = Instant.now(clock); if (now.isBefore(expiresAt)) { return new Result(true, Duration.between(now, expiresAt)); } client.delete(backup); } } return doNotRetry(); }).orElseGet(Result::doNotRetry); }
@Test void whenBackupWasFailed() { var name = "fake-backup"; var backup = createPureBackup(name); backup.getStatus().setPhase(Backup.Phase.FAILED); when(client.fetch(Backup.class, name)).thenReturn(Optional.of(backup)); var result = reconciler.reconcile(new Reconciler.Request(name)); assertNotNull(result); assertFalse(result.reEnqueue()); Mockito.verify(migrationService, never()).backup(any(Backup.class)); }
T getFunction(final List<SqlArgument> arguments) { // first try to get the candidates without any implicit casting Optional<T> candidate = findMatchingCandidate(arguments, false); if (candidate.isPresent()) { return candidate.get(); } else if (!supportsImplicitCasts) { throw createNoMatchingFunctionException(arguments); } // if none were found (candidate isn't present) try again with implicit casting candidate = findMatchingCandidate(arguments, true); if (candidate.isPresent()) { return candidate.get(); } throw createNoMatchingFunctionException(arguments); }
@Test public void shouldChooseSpecificOverVarArgsInMiddleReversedInsertionOrder() { // Given: givenFunctions( function(OTHER, 2, INT, INT, STRING_VARARGS, INT), function(EXPECTED, -1, INT, INT, STRING, STRING, STRING, STRING, INT) ); // When: final KsqlScalarFunction fun = udfIndex.getFunction(ImmutableList.of( SqlArgument.of(SqlTypes.INTEGER), SqlArgument.of(SqlTypes.INTEGER), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.INTEGER) )); // Then: assertThat(fun.name(), equalTo(EXPECTED)); }
@Override public int addLast(V... elements) { return get(addLastAsync(elements)); }
@Test public void testAddLast() { RDeque<Integer> queue = redisson.getDeque("deque"); queue.addLast(1); queue.addLast(2); queue.addLast(3); assertThat(queue).containsExactly(1, 2, 3); }
@Override public void setLogWriter(final PrintWriter out) throws SQLException { dataSource.setLogWriter(out); }
@Test void assertSetLogWriterFailure() throws SQLException { doThrow(new SQLException("")).when(dataSource).setLogWriter(printWriter); assertThrows(SQLException.class, () -> new PipelineDataSourceWrapper(dataSource, TypedSPILoader.getService(DatabaseType.class, "FIXTURE")).setLogWriter(printWriter)); }
@Override public Num calculate(Position position, int currentIndex) { return this.calculate(position); }
@Test public void calculateBuyPosition() { // Calculate the transaction costs of a closed long position int holdingPeriod = 2; Trade entry = Trade.buyAt(0, DoubleNum.valueOf(100), DoubleNum.valueOf(1), transactionModel); Trade exit = Trade.sellAt(holdingPeriod, DoubleNum.valueOf(110), DoubleNum.valueOf(1), transactionModel); Position position = new Position(entry, exit, transactionModel, new ZeroCostModel()); Num costFromBuy = entry.getCost(); Num costFromSell = exit.getCost(); Num costsFromModel = transactionModel.calculate(position, holdingPeriod); assertNumEquals(costsFromModel, costFromBuy.plus(costFromSell)); assertNumEquals(costsFromModel, DoubleNum.valueOf(2.1)); assertNumEquals(costFromBuy, DoubleNum.valueOf(1)); }
@Override public void checkBeforeUpdate(final AlterReadwriteSplittingRuleStatement sqlStatement) { ReadwriteSplittingRuleStatementChecker.checkAlteration(database, sqlStatement.getRules(), rule.getConfiguration()); }
@Test void assertCheckSQLStatementWithDuplicateWriteResourceNamesInStatement() { ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getResourceMetaData()).thenReturn(resourceMetaData); ReadwriteSplittingRule rule = mock(ReadwriteSplittingRule.class); when(rule.getConfiguration()).thenReturn(createCurrentRuleConfigurationWithMultipleRules()); executor.setRule(rule); assertThrows(DuplicateReadwriteSplittingActualDataSourceException.class, () -> executor.checkBeforeUpdate(createSQLStatementWithDuplicateWriteResourceNames("readwrite_ds_0", "readwrite_ds_1", "TEST"))); }
public boolean isValid(String value) { if (value == null) { return false; } URI uri; // ensure value is a valid URI try { uri = new URI(value); } catch (URISyntaxException e) { return false; } // OK, perfom additional validation String scheme = uri.getScheme(); if (!isValidScheme(scheme)) { return false; } String authority = uri.getRawAuthority(); if ("file".equals(scheme) && (authority == null || "".equals(authority))) { // Special case - file: allows an empty authority return true; // this is a local file - nothing more to do here } else if ("file".equals(scheme) && authority != null && authority.contains(":")) { return false; } else { // Validate the authority if (!isValidAuthority(authority)) { return false; } } if (!isValidPath(uri.getRawPath())) { return false; } if (!isValidQuery(uri.getRawQuery())) { return false; } if (!isValidFragment(uri.getRawFragment())) { return false; } return true; }
@Test public void testValidator283() { UrlValidator validator = new UrlValidator(); assertFalse(validator.isValid("http://finance.yahoo.com/news/Owners-54B-NY-housing-apf-2493139299.html?x=0&ap=%fr")); assertTrue(validator.isValid("http://finance.yahoo.com/news/Owners-54B-NY-housing-apf-2493139299.html?x=0&ap=%22")); }
@Override public <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream, final ValueJoiner<? super V, ? super VO, ? extends VR> joiner, final JoinWindows windows) { return leftJoin(otherStream, toValueJoinerWithKey(joiner), windows); }
@Test public void shouldNotAllowNullJoinedOnTableLeftJoin() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.leftJoin(testTable, MockValueJoiner.TOSTRING_JOINER, null)); assertThat(exception.getMessage(), equalTo("joined can't be null")); }
public static Read<byte[], byte[]> readBytes() { return KafkaIO.<byte[], byte[]>read() .withKeyDeserializer(ByteArrayDeserializer.class) .withValueDeserializer(ByteArrayDeserializer.class); }
@Test public void testSourceWithPatternDisplayData() { KafkaIO.Read<byte[], byte[]> read = KafkaIO.readBytes() .withBootstrapServers("myServer1:9092,myServer2:9092") .withTopicPattern("[a-z]est") .withConsumerFactoryFn( new ConsumerFactoryFn( Lists.newArrayList("test"), 10, 10, OffsetResetStrategy.EARLIEST)); DisplayData displayData = DisplayData.from(read); assertThat(displayData, hasDisplayItem("topicPattern", "[a-z]est")); assertThat(displayData, hasDisplayItem("enable.auto.commit", false)); assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServer1:9092,myServer2:9092")); assertThat(displayData, hasDisplayItem("auto.offset.reset", "latest")); assertThat(displayData, hasDisplayItem("receive.buffer.bytes", 524288)); }
public boolean unblock() { return false; }
@Test void shouldNotUnblockBecauseNotOtherProducersToRaceWith() { assertFalse(ringBuffer.unblock()); }
public void setValue( BigDecimal num ) { if ( value == null || value.getType() != VALUE_TYPE_BIGNUMBER ) { value = new ValueBigNumber( num ); } else { value.setBigNumber( num ); } setNull( false ); }
@Test public void testSetValue() { Value vs = new Value( "Name", Value.VALUE_TYPE_INTEGER ); vs.setValue( 100L ); vs.setOrigin( "origin" ); Value vs1 = new Value( (Value) null ); assertTrue( vs1.isNull() ); assertTrue( vs1.isEmpty() ); assertNull( vs1.getName() ); assertNull( vs1.getOrigin() ); assertEquals( Value.VALUE_TYPE_NONE, vs1.getType() ); Value vs2 = new Value( "newName", Value.VALUE_TYPE_INTEGER ); vs2.setOrigin( "origin1" ); vs2.setValue( vs ); assertEquals( "origin", vs2.getOrigin() ); assertEquals( vs.getInteger(), vs2.getInteger() ); Value vs3 = new Value( "newName", Value.VALUE_TYPE_INTEGER ); vs3.setValue( new StringBuffer( "Sven" ) ); assertEquals( Value.VALUE_TYPE_STRING, vs3.getType() ); assertEquals( "Sven", vs3.getString() ); Value vs4 = new Value( "newName", Value.VALUE_TYPE_STRING ); vs4.setValue( new StringBuffer( "Test" ) ); vs4.setValue( new StringBuffer( "Sven" ) ); assertEquals( Value.VALUE_TYPE_STRING, vs4.getType() ); assertEquals( "Sven", vs4.getString() ); Value vs5 = new Value( "Name", Value.VALUE_TYPE_INTEGER ); vs5.setValue( (byte) 4 ); assertEquals( 4L, vs5.getInteger() ); Value vs6 = new Value( "Name", Value.VALUE_TYPE_INTEGER ); vs6.setValue( (Value) null ); assertFalse( vs6.isNull() ); assertNull( vs6.getName() ); assertNull( vs6.getOrigin() ); assertEquals( Value.VALUE_TYPE_NONE, vs6.getType() ); }
@Override public DnsServerAddressStream nameServerAddressStream(String hostname) { for (;;) { int i = hostname.indexOf('.', 1); if (i < 0 || i == hostname.length() - 1) { return defaultNameServerAddresses.stream(); } DnsServerAddresses addresses = domainToNameServerStreamMap.get(hostname); if (addresses != null) { return addresses.stream(); } hostname = hostname.substring(i + 1); } }
@Test public void moreRefinedSelectionReturnedWhenMatch(@TempDir Path tempDir) throws Exception { File f = buildFile(tempDir, "domain linecorp.local\n" + "nameserver 127.0.0.2\n" + "nameserver 127.0.0.3\n"); File f2 = buildFile(tempDir, "domain dc1.linecorp.local\n" + "nameserver 127.0.0.4\n" + "nameserver 127.0.0.5\n"); UnixResolverDnsServerAddressStreamProvider p = new UnixResolverDnsServerAddressStreamProvider(f, f2); DnsServerAddressStream stream = p.nameServerAddressStream("myhost.dc1.linecorp.local"); assertHostNameEquals("127.0.0.4", stream.next()); assertHostNameEquals("127.0.0.5", stream.next()); }
public static Snowflake getSnowflake(long workerId, long datacenterId) { return Singleton.get(Snowflake.class, workerId, datacenterId); }
@Test @Disabled public void snowflakeBenchTest() { final Set<Long> set = new ConcurrentHashSet<>(); final Snowflake snowflake = IdUtil.getSnowflake(1, 1); //线程数 int threadCount = 100; //每个线程生成的ID数 final int idCountPerThread = 10000; final CountDownLatch latch = new CountDownLatch(threadCount); for(int i =0; i < threadCount; i++) { ThreadUtil.execute(() -> { for(int i1 = 0; i1 < idCountPerThread; i1++) { long id = snowflake.nextId(); set.add(id); // Console.log("Add new id: {}", id); } latch.countDown(); }); } //等待全部线程结束 try { latch.await(); } catch (InterruptedException e) { throw new UtilException(e); } assertEquals(threadCount * idCountPerThread, set.size()); }
@Override public boolean init( StepMetaInterface smi, StepDataInterface sdi ) { meta = (GetRepositoryNamesMeta) smi; data = (GetRepositoryNamesData) sdi; if ( super.init( smi, sdi ) ) { try { // Get the repository objects from the repository... // data.list = getRepositoryObjects(); } catch ( Exception e ) { logError( "Error initializing step: ", e ); return false; } data.rownr = 1L; data.filenr = 0; return true; } return false; }
@Test //PDI-16258 public void testShowHidden() throws KettleException { IUser user = Mockito.mock( IUser.class ); Mockito.when( user.isAdmin() ).thenReturn( true ); Mockito.when( repoExtended.getUserInfo() ).thenReturn( user ); init( repoExtended, "/", false, ".*", "", All, 0 ); Mockito.verify( repoExtended, Mockito.never() ) .loadRepositoryDirectoryTree( Mockito.anyString(), Mockito.anyString(), Mockito.anyInt(), Mockito.eq( false ), Mockito.anyBoolean(), anyBoolean() ); Mockito.when( user.isAdmin() ).thenReturn( false ); init( repoExtended, "/", false, ".*", "", All, 0 ); Mockito.verify( repoExtended ) .loadRepositoryDirectoryTree( Mockito.anyString(), Mockito.anyString(), Mockito.anyInt(), Mockito.eq( false ), Mockito.anyBoolean(), Mockito.anyBoolean() ); }
public <T extends __> Generic<T> root(String name, EnumSet<EOpt> opts) { return new Generic<T>(name, null, opts); }
@Test void testGeneric() { PrintWriter out = spy(new PrintWriter(System.out)); HamletImpl hi = new HamletImpl(out, 0, false); hi. root("start")._attr("name", "value"). __("start text"). elem("sub")._attr("name", "value"). __("sub text").__(). elem("sub1")._noEndTag()._attr("boolean", null). __("sub1text").__(). __("start text2"). elem("pre")._pre(). __("pre text"). elem("i")._inline().__("inline").__().__(). elem("i")._inline().__("inline after pre").__(). __("start text3"). elem("sub2"). __("sub2text").__(). elem("sub3")._noEndTag(). __("sub3text").__(). elem("sub4")._noEndTag(). elem("i")._inline().__("inline").__(). __("sub4text").__().__(); out.flush(); assertEquals(0, hi.nestLevel); assertEquals(20, hi.indents); verify(out).print("<start"); verify(out, times(2)).print(" name=\"value\""); verify(out).print(" boolean"); verify(out).print("</start>"); verify(out, never()).print("</sub1>"); verify(out, never()).print("</sub3>"); verify(out, never()).print("</sub4>"); }
@Override public void validate(final Analysis analysis) { failPersistentQueryOnWindowedTable(analysis); QueryValidatorUtil.validateNoUserColumnsWithSameNameAsPseudoColumns(analysis); }
@Test public void shouldNotThrowOnTransientPushQueryOnWindowedTable() { // Given: givenTransientQuery(); givenSourceTable(); givenWindowedSource(); // When/Then: validator.validate(analysis); }
@Description("pads a string on the right") @ScalarFunction("rpad") @LiteralParameters({"x", "y"}) @SqlType(StandardTypes.VARCHAR) public static Slice rightPad(@SqlType("varchar(x)") Slice text, @SqlType(StandardTypes.BIGINT) long targetLength, @SqlType("varchar(y)") Slice padString) { return pad(text, targetLength, padString, text.length()); }
@Test public void testRightPad() { assertFunction("RPAD('text', 5, 'x')", VARCHAR, "textx"); assertFunction("RPAD('text', 4, 'x')", VARCHAR, "text"); assertFunction("RPAD('text', 6, 'xy')", VARCHAR, "textxy"); assertFunction("RPAD('text', 7, 'xy')", VARCHAR, "textxyx"); assertFunction("RPAD('text', 9, 'xyz')", VARCHAR, "textxyzxy"); assertFunction("RPAD('\u4FE1\u5FF5 \u7231 \u5E0C\u671B ', 10, '\u671B')", VARCHAR, "\u4FE1\u5FF5 \u7231 \u5E0C\u671B \u671B"); assertFunction("RPAD('\u4FE1\u5FF5 \u7231 \u5E0C\u671B ', 11, '\u671B')", VARCHAR, "\u4FE1\u5FF5 \u7231 \u5E0C\u671B \u671B\u671B"); assertFunction("RPAD('\u4FE1\u5FF5 \u7231 \u5E0C\u671B ', 12, '\u5E0C\u671B')", VARCHAR, "\u4FE1\u5FF5 \u7231 \u5E0C\u671B \u5E0C\u671B\u5E0C"); assertFunction("RPAD('\u4FE1\u5FF5 \u7231 \u5E0C\u671B ', 13, '\u5E0C\u671B')", VARCHAR, "\u4FE1\u5FF5 \u7231 \u5E0C\u671B \u5E0C\u671B\u5E0C\u671B"); assertFunction("RPAD('', 3, 'a')", VARCHAR, "aaa"); assertFunction("RPAD('abc', 0, 'e')", VARCHAR, ""); // truncation assertFunction("RPAD('text', 3, 'xy')", VARCHAR, "tex"); assertFunction("RPAD('\u4FE1\u5FF5 \u7231 \u5E0C\u671B ', 5, '\u671B')", VARCHAR, "\u4FE1\u5FF5 \u7231 "); // failure modes assertInvalidFunction("RPAD('abc', 3, '')", "Padding string must not be empty"); // invalid target lengths long maxSize = Integer.MAX_VALUE; assertInvalidFunction("RPAD('abc', -1, 'foo')", "Target length must be in the range [0.." + maxSize + "]"); assertInvalidFunction("RPAD('abc', " + (maxSize + 1) + ", '')", "Target length must be in the range [0.." + maxSize + "]"); }
public static Map<String, Object> map(String metricName, Metric metric) { final Map<String, Object> metricMap = Maps.newHashMap(); metricMap.put("full_name", metricName); metricMap.put("name", metricName.substring(metricName.lastIndexOf(".") + 1)); if (metric instanceof Timer) { metricMap.put("metric", buildTimerMap((Timer) metric)); metricMap.put("type", "timer"); } else if(metric instanceof Meter) { metricMap.put("metric", buildMeterMap((Meter) metric)); metricMap.put("type", "meter"); } else if(metric instanceof Histogram) { metricMap.put("metric", buildHistogramMap((Histogram) metric)); metricMap.put("type", "histogram"); } else if(metric instanceof Counter) { metricMap.put("metric", metric); metricMap.put("type", "counter"); } else if(metric instanceof Gauge) { metricMap.put("metric", metric); metricMap.put("type", "gauge"); } else { throw new IllegalArgumentException("Unknown metric type " + metric.getClass()); } return metricMap; }
@Test public void mapThrowsIllegalArgumentExceptionForUnknownMetricType() { final Metric metric = new Metric() {}; assertThatIllegalArgumentException() .isThrownBy(() -> MetricUtils.map("metric", metric)) .withMessageStartingWith("Unknown metric type class org.graylog2.shared.metrics.MetricUtilsTest"); }
Bytes toBytes(final KO foreignKey, final K primaryKey) { //The serialization format - note that primaryKeySerialized may be null, such as when a prefixScan //key is being created. //{Integer.BYTES foreignKeyLength}{foreignKeySerialized}{Optional-primaryKeySerialized} final byte[] foreignKeySerializedData = foreignKeySerializer.serialize(foreignKeySerdeTopic, foreignKey); //? bytes final byte[] primaryKeySerializedData = primaryKeySerializer.serialize(primaryKeySerdeTopic, primaryKey); final ByteBuffer buf = ByteBuffer.allocate(Integer.BYTES + foreignKeySerializedData.length + primaryKeySerializedData.length); buf.putInt(foreignKeySerializedData.length); buf.put(foreignKeySerializedData); buf.put(primaryKeySerializedData); return Bytes.wrap(buf.array()); }
@Test public void nullForeignKeySerdeTest() { final CombinedKeySchema<String, Integer> cks = new CombinedKeySchema<>( () -> "fkTopic", Serdes.String(), () -> "pkTopic", Serdes.Integer() ); assertThrows(NullPointerException.class, () -> cks.toBytes(null, 10)); }
public static void parseResourcePerms(PlainAccessResource plainAccessResource, Boolean isTopic, List<String> resources) { if (resources == null || resources.isEmpty()) { return; } for (String resource : resources) { String[] items = StringUtils.split(resource, "="); if (items.length == 2) { plainAccessResource.addResourceAndPerm(isTopic ? items[0].trim() : PlainAccessResource.getRetryTopic(items[0].trim()), parsePermFromString(items[1].trim())); } else { throw new AclException(String.format("Parse resource permission failed for %s:%s", isTopic ? "topic" : "group", resource)); } } }
@Test(expected = AclException.class) public void setTopicPermTest() { PlainAccessResource plainAccessResource = new PlainAccessResource(); Map<String, Byte> resourcePermMap = plainAccessResource.getResourcePermMap(); Permission.parseResourcePerms(plainAccessResource, false, null); Assert.assertNull(resourcePermMap); List<String> groups = new ArrayList<>(); Permission.parseResourcePerms(plainAccessResource, false, groups); Assert.assertNull(resourcePermMap); groups.add("groupA=DENY"); groups.add("groupB=PUB|SUB"); groups.add("groupC=PUB"); Permission.parseResourcePerms(plainAccessResource, false, groups); resourcePermMap = plainAccessResource.getResourcePermMap(); byte perm = resourcePermMap.get(PlainAccessResource.getRetryTopic("groupA")); Assert.assertEquals(perm, Permission.DENY); perm = resourcePermMap.get(PlainAccessResource.getRetryTopic("groupB")); Assert.assertEquals(perm,Permission.PUB | Permission.SUB); perm = resourcePermMap.get(PlainAccessResource.getRetryTopic("groupC")); Assert.assertEquals(perm, Permission.PUB); List<String> topics = new ArrayList<>(); topics.add("topicA=DENY"); topics.add("topicB=PUB|SUB"); topics.add("topicC=PUB"); Permission.parseResourcePerms(plainAccessResource, true, topics); perm = resourcePermMap.get("topicA"); Assert.assertEquals(perm, Permission.DENY); perm = resourcePermMap.get("topicB"); Assert.assertEquals(perm, Permission.PUB | Permission.SUB); perm = resourcePermMap.get("topicC"); Assert.assertEquals(perm, Permission.PUB); List<String> erron = new ArrayList<>(); erron.add(""); Permission.parseResourcePerms(plainAccessResource, false, erron); }
@Override public boolean start() throws IOException { LOG.info("Starting reader using {}", initialCheckpointGenerator); try { shardReadersPool = createShardReadersPool(); shardReadersPool.start(); } catch (TransientKinesisException e) { throw new IOException(e); } return advance(); }
@Test public void startReturnsFalseIfNoDataAtTheBeginning() throws IOException { assertThat(reader.start()).isFalse(); }
@Override public boolean equals(@Nullable Object object) { if (object instanceof HistogramCell) { HistogramCell histogramCell = (HistogramCell) object; return Objects.equals(dirty, histogramCell.dirty) && Objects.equals(value, histogramCell.value) && Objects.equals(name, histogramCell.name); } return false; }
@Test public void testEquals() { HistogramCell cell = new HistogramCell(KV.of(MetricName.named("hello", "world"), bucketType)); HistogramCell equalCell = new HistogramCell(KV.of(MetricName.named("hello", "world"), bucketType)); Assert.assertEquals(equalCell, cell); Assert.assertEquals(equalCell.hashCode(), cell.hashCode()); }
@Override public void destroy() { if (this.pubSubClient != null) { try { this.pubSubClient.shutdown(); this.pubSubClient.awaitTermination(1, TimeUnit.SECONDS); } catch (Exception e) { log.error("Failed to shutdown PubSub client during destroy()", e); } } }
@Test public void givenPubSubClientIsNotNull_whenDestroy_thenShutDownAndAwaitTermination() throws InterruptedException { ReflectionTestUtils.setField(node, "pubSubClient", pubSubClientMock); node.destroy(); then(pubSubClientMock).should().shutdown(); then(pubSubClientMock).should().awaitTermination(1, TimeUnit.SECONDS); }
@Override public Long createDictData(DictDataSaveReqVO createReqVO) { // 校验字典类型有效 validateDictTypeExists(createReqVO.getDictType()); // 校验字典数据的值的唯一性 validateDictDataValueUnique(null, createReqVO.getDictType(), createReqVO.getValue()); // 插入字典类型 DictDataDO dictData = BeanUtils.toBean(createReqVO, DictDataDO.class); dictDataMapper.insert(dictData); return dictData.getId(); }
@Test public void testCreateDictData_success() { // 准备参数 DictDataSaveReqVO reqVO = randomPojo(DictDataSaveReqVO.class, o -> o.setStatus(randomCommonStatus())) .setId(null); // 防止 id 被赋值 // mock 方法 when(dictTypeService.getDictType(eq(reqVO.getDictType()))).thenReturn(randomDictTypeDO(reqVO.getDictType())); // 调用 Long dictDataId = dictDataService.createDictData(reqVO); // 断言 assertNotNull(dictDataId); // 校验记录的属性是否正确 DictDataDO dictData = dictDataMapper.selectById(dictDataId); assertPojoEquals(reqVO, dictData, "id"); }
public static ObjectNode convertFromGHResponse(GHResponse ghResponse, TranslationMap translationMap, Locale locale, DistanceConfig distanceConfig) { ObjectNode json = JsonNodeFactory.instance.objectNode(); if (ghResponse.hasErrors()) throw new IllegalStateException( "If the response has errors, you should use the method NavigateResponseConverter#convertFromGHResponseError"); PointList waypoints = ghResponse.getBest().getWaypoints(); final ArrayNode routesJson = json.putArray("routes"); List<ResponsePath> paths = ghResponse.getAll(); for (int i = 0; i < paths.size(); i++) { ResponsePath path = paths.get(i); ObjectNode pathJson = routesJson.addObject(); putRouteInformation(pathJson, path, i, translationMap, locale, distanceConfig); } final ArrayNode waypointsJson = json.putArray("waypoints"); for (int i = 0; i < waypoints.size(); i++) { ObjectNode waypointJson = waypointsJson.addObject(); // TODO get names waypointJson.put("name", ""); putLocation(waypoints.getLat(i), waypoints.getLon(i), waypointJson); } json.put("code", "Ok"); // TODO: Maybe we need a different format... uuid: "cji4ja4f8004o6xrsta8w4p4h" json.put("uuid", UUID.randomUUID().toString().replaceAll("-", "")); return json; }
@Test public void voiceInstructionTranslationTest() { GHResponse rsp = hopper.route(new GHRequest(42.554851, 1.536198, 42.510071, 1.548128).setProfile(profile)); ObjectNode json = NavigateResponseConverter.convertFromGHResponse(rsp, trMap, Locale.ENGLISH, distanceConfig); JsonNode steps = json.get("routes").get(0).get("legs").get(0).get("steps"); JsonNode voiceInstruction = steps.get(14).get("voiceInstructions").get(0); assertEquals("In 2 kilometers keep right", voiceInstruction.get("announcement").asText()); rsp = hopper.route( new GHRequest(42.554851, 1.536198, 42.510071, 1.548128).setProfile(profile).setLocale(Locale.GERMAN)); DistanceConfig distanceConfigGerman = new DistanceConfig(DistanceUtils.Unit.METRIC, trMap, Locale.GERMAN); json = NavigateResponseConverter.convertFromGHResponse(rsp, trMap, Locale.GERMAN, distanceConfigGerman); steps = json.get("routes").get(0).get("legs").get(0).get("steps"); voiceInstruction = steps.get(14).get("voiceInstructions").get(0); assertEquals("In 2 Kilometern rechts halten", voiceInstruction.get("announcement").asText()); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; // This handles a tombstone message if (value == null) { return SchemaAndValue.NULL; } try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (config.schemasEnabled() && (!jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME) || !jsonValue.has(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); // The deserialized data should either be an envelope object containing the schema and the payload or the schema // was stripped during serialization and we need to fill in an all-encompassing schema. if (!config.schemasEnabled()) { ObjectNode envelope = JSON_NODE_FACTORY.objectNode(); envelope.set(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME, null); envelope.set(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME, jsonValue); jsonValue = envelope; } Schema schema = asConnectSchema(jsonValue.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); return new SchemaAndValue( schema, convertToConnect(schema, jsonValue.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME), config) ); }
@Test public void mapToConnectNonStringKeys() { byte[] mapJson = "{ \"schema\": { \"type\": \"map\", \"keys\": { \"type\" : \"int32\" }, \"values\": { \"type\" : \"int32\" } }, \"payload\": [ [1, 12], [2, 15] ] }".getBytes(); Map<Integer, Integer> expected = new HashMap<>(); expected.put(1, 12); expected.put(2, 15); assertEquals(new SchemaAndValue(SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).build(), expected), converter.toConnectData(TOPIC, mapJson)); }