focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public TokenResponse exchangePkceCode( URI tokenEndpoint, String code, String redirectUri, String clientId, String codeVerifier) { var body = UrlFormBodyBuilder.create() .param("grant_type", "authorization_code") .param("redirect_uri", redirectUri) .param("client_id", clientId) .param("code", code) .param("code_verifier", codeVerifier) .build(); var headers = List.of( new Header(HttpHeaders.ACCEPT, MediaType.APPLICATION_JSON), new Header(HttpHeaders.CONTENT_TYPE, UrlFormBodyBuilder.MEDIA_TYPE)); var req = new Request(tokenEndpoint, "POST", headers, body); var res = httpClient.call(req); if (res.status() != 200) { throw HttpExceptions.httpFailBadStatus(req.method(), tokenEndpoint, res.status()); } return JsonCodec.readValue(res.body(), TokenResponse.class); }
@Test void exchangePkceCode_badStatus(WireMockRuntimeInfo wm) { var path = "/auth/token"; stubFor(post(path).willReturn(badRequest())); var base = URI.create(wm.getHttpBaseUrl()); var tokenEndpoint = base.resolve(path); var e = assertThrows( HttpException.class, () -> client.exchangePkceCode(tokenEndpoint, null, null, null, null)); assertEquals( "http request failed: bad status 'POST %s' status=400".formatted(tokenEndpoint), e.getMessage()); }
@Override public void validate(final Analysis analysis) { try { RULES.forEach(rule -> rule.check(analysis)); } catch (final KsqlException e) { throw new KsqlException(e.getMessage() + PULL_QUERY_SYNTAX_HELP, e); } QueryValidatorUtil.validateNoUserColumnsWithSameNameAsPseudoColumns(analysis); }
@Test public void shouldThrowOnPullQueryThatHasRefinement() { // Given: when(analysis.getRefinementInfo()).thenReturn(Optional.of(refinementInfo)); // When: final Exception e = assertThrows( KsqlException.class, () -> validator.validate(analysis) ); // Then: assertThat(e.getMessage(), containsString("Pull queries don't support EMIT clauses.")); }
@Override public void createOrUpdate(final String path, final Object data) { zkClient.createOrUpdate(path, data, CreateMode.PERSISTENT); }
@Test public void testOnRuleChangedUpdate() { RuleData ruleData = RuleData.builder() .id(MOCK_ID) .name(MOCK_NAME) .pluginName(MOCK_PLUGIN_NAME) .selectorId(MOCK_SELECTOR_ID) .build(); String ruleRealPath = DefaultPathConstants.buildRulePath(ruleData.getPluginName(), ruleData.getSelectorId(), ruleData.getId()); zookeeperDataChangedListener.onRuleChanged(ImmutableList.of(ruleData), DataEventTypeEnum.UPDATE); verify(zkClient, times(1)).createOrUpdate(ruleRealPath, ruleData, CreateMode.PERSISTENT); }
@Override public String toString() { return "SymmetricEncryptionConfig{" + "enabled=" + enabled + ", algorithm='" + algorithm + '\'' + ", password='***'" + ", salt='***'" + ", iterationCount=***" + ", key=***" + '}'; }
@Test public void testToString() { assertContains(config.toString(), "SymmetricEncryptionConfig"); }
public Future<Void> deletePersistentClaims(List<String> maybeDeletePvcs, List<String> desiredPvcs) { List<Future<Void>> futures = new ArrayList<>(); maybeDeletePvcs.removeAll(desiredPvcs); for (String pvcName : maybeDeletePvcs) { LOGGER.debugCr(reconciliation, "Considering PVC {} for deletion", pvcName); futures.add(considerPersistentClaimDeletion(pvcName)); } return Future.all(futures) .map((Void) null); }
@Test public void testVolumesDeletion(VertxTestContext context) { PersistentVolumeClaim pvcWithDeleteClaim = new PersistentVolumeClaimBuilder(createPvc("data-pod-3")) .editMetadata() .withAnnotations(Map.of(Annotations.ANNO_STRIMZI_IO_DELETE_CLAIM, "true")) .endMetadata() .build(); List<String> desiredPvcs = List.of( "data-pod-0", "data-pod-1", "data-pod-2" ); List<String> maybeDeletePvcs = List.of( "data-pod-0", "data-pod-1", "data-pod-2", "data-pod-3", "data-pod-4" ); List<PersistentVolumeClaim> pvcs = List.of( createPvc("data-pod-0"), createPvc("data-pod-1"), createPvc("data-pod-2"), pvcWithDeleteClaim, createPvc("data-pod-4") ); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); // Mock the PVC Operator PvcOperator mockPvcOps = supplier.pvcOperations; when(mockPvcOps.getAsync(eq(NAMESPACE), ArgumentMatchers.startsWith("data-"))) .thenAnswer(invocation -> { String pvcName = invocation.getArgument(1); return Future.succeededFuture(pvcs.stream().filter(pvc -> pvcName.equals(pvc.getMetadata().getName())).findFirst().orElse(null)); }); ArgumentCaptor<String> pvcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<PersistentVolumeClaim> pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); when(mockPvcOps.reconcile(any(), anyString(), pvcNameCaptor.capture(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); // Reconcile the PVCs PvcReconciler reconciler = new PvcReconciler( new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), mockPvcOps, supplier.storageClassOperations ); Checkpoint async = context.checkpoint(); reconciler.deletePersistentClaims(new ArrayList<>(maybeDeletePvcs), new ArrayList<>(desiredPvcs)) .onComplete(res -> { assertThat(res.succeeded(), is(true)); assertThat(pvcNameCaptor.getAllValues().size(), is(1)); assertThat(pvcNameCaptor.getValue(), is("data-pod-3")); assertThat(pvcCaptor.getAllValues().size(), is(1)); assertThat(pvcCaptor.getValue(), is(nullValue())); async.flag(); }); }
public RestLiAttachmentReader(final MultiPartMIMEReader multiPartMIMEReader) { _multiPartMIMEReader = multiPartMIMEReader; }
@Test public void testRestLiAttachmentReader() { //Create a mock MultiPartMIMEReader and pass to the RestLiAttachmentReader. Verify that API calls are propagated accordingly. final MultiPartMIMEReader multiPartMIMEReader = mock(MultiPartMIMEReader.class); final RestLiAttachmentReader attachmentReader = new RestLiAttachmentReader(multiPartMIMEReader); attachmentReader.drainAllAttachments(); attachmentReader.haveAllAttachmentsFinished(); final RestLiAttachmentReaderCallback dummyCallback = new RestLiAttachmentReaderCallback() { //None of these should be called. @Override public void onNewAttachment(RestLiAttachmentReader.SingleRestLiAttachmentReader singleRestLiAttachmentReader) { Assert.fail(); } @Override public void onFinished() { Assert.fail(); } @Override public void onDrainComplete() { Assert.fail(); } @Override public void onStreamError(Throwable throwable) { Assert.fail(); } }; attachmentReader.registerAttachmentReaderCallback(dummyCallback); //Verify the calls above made it correctly to the layer below verify(multiPartMIMEReader, times(1)).drainAllParts(); verify(multiPartMIMEReader, times(1)).haveAllPartsFinished(); verify(multiPartMIMEReader, times(1)).registerReaderCallback(isA(MultiPartMIMEReaderCallback.class)); verifyNoMoreInteractions(multiPartMIMEReader); }
@Override public EpoxyModel<?> set(int index, EpoxyModel<?> element) { EpoxyModel<?> previousModel = super.set(index, element); if (previousModel.id() != element.id()) { notifyRemoval(index, 1); notifyInsertion(index, 1); } return previousModel; }
@Test public void testSetSameIdDoesntNotify() { EpoxyModel<?> newModelWithSameId = new TestModel(); newModelWithSameId.id(modelList.get(0).id()); modelList.set(0, newModelWithSameId); verifyNoMoreInteractions(observer); assertEquals(newModelWithSameId, modelList.get(0)); }
@Override public void filter(final ContainerRequestContext requestContext, ContainerResponseContext response) { final Object entity = response.getEntity(); if (isEmptyOptional(entity)) { response.setStatus(Response.Status.NO_CONTENT.getStatusCode()); response.setEntity(null); } }
@Test void changesStatusToNoContentForEmptyOptional() { doReturn(Optional.empty()).when(response).getEntity(); toTest.filter(requestContext, response); verify(response).setStatus(204); verify(response).setEntity(null); }
@Override public void subscribe(Collection<String> topics) { subscribeInternal(topics, Optional.empty()); }
@Test public void testSubscriptionOnNullTopic() { consumer = newConsumer(); assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(singletonList(null))); }
@Override public Status check() { if (applicationContext == null) { SpringExtensionInjector springExtensionInjector = SpringExtensionInjector.get(applicationModel); applicationContext = springExtensionInjector.getContext(); } if (applicationContext == null) { return new Status(Status.Level.UNKNOWN); } Map<String, DataSource> dataSources = applicationContext.getBeansOfType(DataSource.class, false, false); if (CollectionUtils.isEmptyMap(dataSources)) { return new Status(Status.Level.UNKNOWN); } Status.Level level = Status.Level.OK; StringBuilder buf = new StringBuilder(); for (Map.Entry<String, DataSource> entry : dataSources.entrySet()) { DataSource dataSource = entry.getValue(); if (buf.length() > 0) { buf.append(", "); } buf.append(entry.getKey()); try (Connection connection = dataSource.getConnection()) { DatabaseMetaData metaData = connection.getMetaData(); try (ResultSet resultSet = metaData.getTypeInfo()) { if (!resultSet.next()) { level = Status.Level.ERROR; } } buf.append(metaData.getURL()); buf.append('('); buf.append(metaData.getDatabaseProductName()); buf.append('-'); buf.append(metaData.getDatabaseProductVersion()); buf.append(')'); } catch (Throwable e) { logger.warn(CONFIG_WARN_STATUS_CHECKER, "", "", e.getMessage(), e); return new Status(level, e.getMessage()); } } return new Status(level, buf.toString()); }
@Test void testWithDatasourceNotHasNextResult() throws SQLException { Map<String, DataSource> map = new HashMap<String, DataSource>(); DataSource dataSource = mock(DataSource.class); Connection connection = mock(Connection.class, Answers.RETURNS_DEEP_STUBS); given(dataSource.getConnection()).willReturn(connection); given(connection.getMetaData().getTypeInfo().next()).willReturn(false); map.put("mockDatabase", dataSource); given(applicationContext.getBeansOfType(eq(DataSource.class), anyBoolean(), anyBoolean())) .willReturn(map); Status status = dataSourceStatusChecker.check(); assertThat(status.getLevel(), is(Status.Level.ERROR)); }
public LogicalSlot allocateLogicalSlot() { LOG.debug("Allocating logical slot from shared slot ({})", physicalSlotRequestId); Preconditions.checkState( state == State.ALLOCATED, "The shared slot has already been released."); final LogicalSlot slot = new SingleLogicalSlot( new SlotRequestId(), physicalSlot, Locality.UNKNOWN, this, slotWillBeOccupiedIndefinitely); allocatedLogicalSlots.put(slot.getSlotRequestId(), slot); return slot; }
@Test void testReturnLogicalSlotTriggersExternalReleaseOnLastSlot() { final TestingPhysicalSlot physicalSlot = TestingPhysicalSlot.builder().build(); final AtomicBoolean externalReleaseInitiated = new AtomicBoolean(false); final SharedSlot sharedSlot = new SharedSlot( new SlotRequestId(), physicalSlot, false, () -> externalReleaseInitiated.set(true)); final LogicalSlot logicalSlot1 = sharedSlot.allocateLogicalSlot(); final LogicalSlot logicalSlot2 = sharedSlot.allocateLogicalSlot(); // this implicitly returns the slot logicalSlot1.releaseSlot(new Exception("test")); assertThat(externalReleaseInitiated).isFalse(); logicalSlot2.releaseSlot(new Exception("test")); assertThat(externalReleaseInitiated).isTrue(); }
@Override boolean putIfAbsent(String name, Mapping mapping) { return storage().putIfAbsent(name, mapping) == null; }
@Test public void when_putIfAbsent_then_doesNotOverwrite() { String name = randomName(); assertThat(storage.putIfAbsent(name, mapping(name, "type-1"))).isTrue(); assertThat(storage.putIfAbsent(name, mapping(name, "type-2"))).isFalse(); assertTrue(storage.allObjects().stream().anyMatch(m -> m instanceof Mapping && ((Mapping) m).connectorType().equals("type-1"))); assertTrue(storage.allObjects().stream().noneMatch(m -> m instanceof Mapping && ((Mapping) m).connectorType().equals("type-2"))); }
@Override public List<SelectorData> convert(final String json) { return GsonUtils.getInstance().fromList(json, SelectorData.class); }
@Test public void testConvert() { List<SelectorData> selectorDataList = new LinkedList<>(); ConditionData conditionData = new ConditionData(); conditionData.setParamName("conditionName-" + 0); List<ConditionData> conditionDataList = Collections.singletonList(conditionData); selectorDataList.add(SelectorData.builder().name("name1").enabled(true).continued(true).conditionList(conditionDataList).build()); selectorDataList.add(SelectorData.builder().name("name2").enabled(true).continued(true).build()); Gson gson = new Gson(); String json = gson.toJson(selectorDataList); List<SelectorData> convertedList = selectorDataHandler.convert(json); assertThat(convertedList, is(selectorDataList)); }
@Override public int write(ByteBuffer src) throws IOException { checkNotNull(src); checkOpen(); checkWritable(); int written = 0; // will definitely either be assigned or an exception will be thrown synchronized (this) { boolean completed = false; try { if (!beginBlocking()) { return 0; // AsynchronousCloseException will be thrown } file.writeLock().lockInterruptibly(); try { if (append) { position = file.size(); } written = file.write(position, src); position += written; file.setLastModifiedTime(fileSystemState.now()); completed = true; } finally { file.writeLock().unlock(); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); } finally { endBlocking(completed); } } return written; }
@Test public void testWrite() throws IOException { RegularFile file = regularFile(0); FileChannel channel = channel(file, WRITE); assertEquals(0, channel.position()); ByteBuffer buf = buffer("1234567890"); ByteBuffer buf2 = buffer("1234567890"); assertEquals(10, channel.write(buf)); assertEquals(10, channel.position()); buf.flip(); assertEquals(20, channel.write(new ByteBuffer[] {buf, buf2})); assertEquals(30, channel.position()); buf.flip(); buf2.flip(); assertEquals(20, channel.write(new ByteBuffer[] {buf, buf2}, 0, 2)); assertEquals(50, channel.position()); buf.flip(); assertEquals(10, channel.write(buf, 5)); assertEquals(50, channel.position()); }
@Override public URL use(ApplicationId applicationId, String resourceKey) throws YarnException { Path resourcePath = null; UseSharedCacheResourceRequest request = Records.newRecord( UseSharedCacheResourceRequest.class); request.setAppId(applicationId); request.setResourceKey(resourceKey); try { UseSharedCacheResourceResponse response = this.scmClient.use(request); if (response != null && response.getPath() != null) { resourcePath = new Path(response.getPath()); } } catch (Exception e) { // Just catching IOException isn't enough. // RPC call can throw ConnectionException. // We don't handle different exceptions separately at this point. throw new YarnException(e); } if (resourcePath != null) { URL pathURL = URL.fromPath(resourcePath); return pathURL; } else { // The resource was not in the cache. return null; } }
@Test(expected = YarnException.class) public void testUseError() throws Exception { String message = "Mock IOExcepiton!"; when(cProtocol.use(isA(UseSharedCacheResourceRequest.class))).thenThrow( new IOException(message)); client.use(mock(ApplicationId.class), "key"); }
@SuppressWarnings("unchecked") static SelMap of(Map<String, Object> input) { if (input == null) { return new SelMap(null); } SelMap map = new SelMap(new HashMap<>()); for (Map.Entry<String, Object> entry : input.entrySet()) { if (entry.getValue() instanceof String) { map.val.put(SelString.of(entry.getKey()), SelString.of((String) entry.getValue())); } else if (entry.getValue() instanceof Long || entry.getValue() instanceof Integer) { map.val.put( SelString.of(entry.getKey()), SelLong.of(((Number) entry.getValue()).longValue())); } else if (entry.getValue() instanceof Double || entry.getValue() instanceof Float) { map.val.put( SelString.of(entry.getKey()), SelDouble.of(((Number) entry.getValue()).doubleValue())); } else if (entry.getValue() instanceof Boolean) { map.val.put(SelString.of(entry.getKey()), SelBoolean.of((Boolean) entry.getValue())); } else if (entry.getValue() instanceof Map) { map.val.put( SelString.of(entry.getKey()), SelMap.of((Map<String, Object>) entry.getValue())); } else if (entry.getValue() instanceof String[]) { map.val.put( SelString.of(entry.getKey()), SelArray.of(entry.getValue(), SelTypes.STRING_ARRAY)); } else if (entry.getValue() instanceof long[]) { map.val.put( SelString.of(entry.getKey()), SelArray.of(entry.getValue(), SelTypes.LONG_ARRAY)); } else if (entry.getValue() instanceof double[]) { map.val.put( SelString.of(entry.getKey()), SelArray.of(entry.getValue(), SelTypes.DOUBLE_ARRAY)); } else if (entry.getValue() instanceof boolean[]) { map.val.put( SelString.of(entry.getKey()), SelArray.of(entry.getValue(), SelTypes.BOOLEAN_ARRAY)); } else { throw new IllegalArgumentException( "Invalid map entry type (" + (entry.getValue() == null ? "null" : entry.getValue().getClass().getName()) + ") for Map constructor"); } } return map; }
@Test(expected = IllegalArgumentException.class) public void testInvalidTypeNullValue() { SelMap.of(Collections.singletonMap("bar", null)); }
public static boolean isAssignableFrom(Class clazz, Class cls) { Objects.requireNonNull(cls, "cls"); return clazz.isAssignableFrom(cls); }
@Test public void isAssignableFromTest() { final boolean assignableFrom = ClassUtil.isAssignableFrom(TestClass.class, TestSubClass.class); Assert.isTrue(assignableFrom); }
public void setReactorNameSupplier(Supplier<String> reactorNameSupplier) { this.reactorNameSupplier = checkNotNull(reactorNameSupplier, "reactorNameSupplier"); }
@Test public void test_setReactorNameSupplier_whenNull() { ReactorBuilder builder = newBuilder(); assertThrows(NullPointerException.class, () -> builder.setReactorNameSupplier(null)); }
public static Class getJavaType(int sqlType) { switch (sqlType) { case Types.CHAR: case Types.VARCHAR: case Types.LONGVARCHAR: return String.class; case Types.BINARY: case Types.VARBINARY: case Types.LONGVARBINARY: return byte[].class; case Types.BIT: return Boolean.class; case Types.TINYINT: case Types.SMALLINT: return Short.class; case Types.INTEGER: return Integer.class; case Types.BIGINT: return Long.class; case Types.REAL: return Float.class; case Types.DOUBLE: case Types.FLOAT: return Double.class; case Types.DATE: return Date.class; case Types.TIME: return Time.class; case Types.TIMESTAMP: return Timestamp.class; default: throw new RuntimeException("We do not support tables with SqlType: " + getSqlTypeName(sqlType)); } }
@Test public void testError() { Exception e = assertThrows(Exception.class, () -> Util.getJavaType(Types.REF)); assertEquals("We do not support tables with SqlType: REF", e.getMessage()); e = assertThrows(Exception.class, () -> Util.getJavaType(-1000)); assertEquals("Unknown sqlType -1000", e.getMessage()); }
public static String getStackTrace(final Throwable t) { if (t == null) { return ""; } final ByteArrayOutputStream out = new ByteArrayOutputStream(); final PrintStream ps = new PrintStream(out); t.printStackTrace(ps); ps.flush(); return out.toString(); }
@Test void testGetStackTrace() { assertEquals("", ExceptionUtil.getStackTrace(null)); String stackTrace = ExceptionUtil.getStackTrace(nacosRuntimeException); assertTrue( stackTrace.contains("com.alibaba.nacos.api.exception.runtime.NacosRuntimeException: errCode: 500, errMsg: Test")); assertTrue(stackTrace.contains("at")); assertTrue(stackTrace.contains("Caused by: java.lang.RuntimeException: I'm caused exception.")); }
public static Getter newFieldGetter(Object object, Getter parent, Field field, String modifier) throws Exception { return newGetter(object, parent, modifier, field.getType(), field::get, (t, et) -> new FieldGetter(parent, field, modifier, t, et)); }
@Test public void newFieldGetter_whenExtractingFromNull_Collection_AndReducerSuffixInNotEmpty_thenReturnNullGetter() throws Exception { OuterObject object = OuterObject.nullInner("name"); Getter getter = GetterFactory.newFieldGetter(object, null, innersCollectionField, "[any]"); assertSame(NullMultiValueGetter.NULL_MULTIVALUE_GETTER, getter); }
@Override public SnowflakeTableMetadata loadTableMetadata(SnowflakeIdentifier tableIdentifier) { Preconditions.checkArgument( tableIdentifier.type() == SnowflakeIdentifier.Type.TABLE, "loadTableMetadata requires a TABLE identifier, got '%s'", tableIdentifier); SnowflakeTableMetadata tableMeta; try { final String finalQuery = "SELECT SYSTEM$GET_ICEBERG_TABLE_INFORMATION(?) AS METADATA"; tableMeta = connectionPool.run( conn -> queryHarness.query( conn, finalQuery, TABLE_METADATA_RESULT_SET_HANDLER, tableIdentifier.toIdentifierString())); } catch (SQLException e) { throw snowflakeExceptionToIcebergException( tableIdentifier, e, String.format("Failed to get table metadata for '%s'", tableIdentifier)); } catch (InterruptedException e) { throw new UncheckedInterruptedException( e, "Interrupted while getting table metadata for '%s'", tableIdentifier); } return tableMeta; }
@SuppressWarnings("unchecked") @Test public void testGetTableMetadataInterruptedException() throws SQLException, InterruptedException { Exception injectedException = new InterruptedException("Fake interrupted exception"); when(mockClientPool.run(any(ClientPool.Action.class))).thenThrow(injectedException); assertThatExceptionOfType(UncheckedInterruptedException.class) .isThrownBy( () -> snowflakeClient.loadTableMetadata( SnowflakeIdentifier.ofTable("DB_1", "SCHEMA_1", "TABLE_1"))) .withMessageContaining( "Interrupted while getting table metadata for 'TABLE: 'DB_1.SCHEMA_1.TABLE_1''") .withCause(injectedException); }
public static <T> String render(ClassPluginDocumentation<T> classPluginDocumentation) throws IOException { return render("task", JacksonMapper.toMap(classPluginDocumentation)); }
@SuppressWarnings({"rawtypes", "unchecked"}) @Test void dag() throws IOException { PluginScanner pluginScanner = new PluginScanner(ClassPluginDocumentationTest.class.getClassLoader()); RegisteredPlugin scan = pluginScanner.scan(); Class dag = scan.findClass(Dag.class.getName()).orElseThrow(); ClassPluginDocumentation<? extends Task> doc = ClassPluginDocumentation.of(jsonSchemaGenerator, scan, dag, Task.class); String render = DocumentationGenerator.render(doc); assertThat(render, containsString("Dag")); assertThat(render, containsString("**Required:** ✔️")); assertThat(render, containsString("`concurrent`")); assertThat(render, not(containsString("requires an Enterprise Edition"))); int propertiesIndex = render.indexOf("Properties"); int definitionsIndex = render.indexOf("Definitions"); assertRequiredPropsAreFirst(render.substring(propertiesIndex, definitionsIndex)); String definitionsDoc = render.substring(definitionsIndex); Arrays.stream(definitionsDoc.split("[^#]### ")) // first is 'Definitions' header .skip(1) .forEach(DocumentationGeneratorTest::assertRequiredPropsAreFirst); }
public static MetricsReporter loadMetricsReporter(Map<String, String> properties) { String impl = properties.get(CatalogProperties.METRICS_REPORTER_IMPL); if (impl == null) { return LoggingMetricsReporter.instance(); } LOG.info("Loading custom MetricsReporter implementation: {}", impl); DynConstructors.Ctor<MetricsReporter> ctor; try { ctor = DynConstructors.builder(MetricsReporter.class) .loader(CatalogUtil.class.getClassLoader()) .impl(impl) .buildChecked(); } catch (NoSuchMethodException e) { throw new IllegalArgumentException( String.format("Cannot initialize MetricsReporter, missing no-arg constructor: %s", impl), e); } MetricsReporter reporter; try { reporter = ctor.newInstance(); } catch (ClassCastException e) { throw new IllegalArgumentException( String.format( "Cannot initialize MetricsReporter, %s does not implement MetricsReporter.", impl), e); } reporter.initialize(properties); return reporter; }
@Test public void loadCustomMetricsReporter_badClass() { assertThatThrownBy( () -> CatalogUtil.loadMetricsReporter( ImmutableMap.of( CatalogProperties.METRICS_REPORTER_IMPL, TestFileIONotImpl.class.getName()))) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining("does not implement MetricsReporter"); }
@Override public ReadwriteSplittingRuleConfiguration swapToObject(final YamlReadwriteSplittingRuleConfiguration yamlConfig) { Collection<ReadwriteSplittingDataSourceGroupRuleConfiguration> dataSources = yamlConfig.getDataSourceGroups().entrySet().stream() .map(entry -> swapToObject(entry.getKey(), entry.getValue())).collect(Collectors.toList()); Map<String, AlgorithmConfiguration> loadBalancerMap = null == yamlConfig.getLoadBalancers() ? Collections.emptyMap() : yamlConfig.getLoadBalancers().entrySet().stream().collect(Collectors.toMap(Entry::getKey, entry -> algorithmSwapper.swapToObject(entry.getValue()))); return new ReadwriteSplittingRuleConfiguration(dataSources, loadBalancerMap); }
@Test void assertSwapToObject() { ReadwriteSplittingRuleConfiguration actual = getSwapper().swapToObject(createYamlReadwriteSplittingRuleConfiguration()); assertThat(actual.getDataSourceGroups().size(), is(1)); assertThat(actual.getLoadBalancers().size(), is(1)); assertReadwriteSplittingRule(actual); }
@SuppressWarnings("MethodMayBeStatic") // Non-static to support DI. public long parse(final String text) { final String date; final String time; final String timezone; if (text.contains("T")) { date = text.substring(0, text.indexOf('T')); final String withTimezone = text.substring(text.indexOf('T') + 1); timezone = getTimezone(withTimezone); time = completeTime(withTimezone.substring(0, withTimezone.length() - timezone.length()) .replaceAll("Z$","")); } else { date = completeDate(text); time = completeTime(""); timezone = ""; } try { final ZoneId zoneId = parseTimezone(timezone); return PARSER.parse(date + "T" + time, zoneId); } catch (final RuntimeException e) { throw new KsqlException("Failed to parse timestamp '" + text + "': " + e.getMessage() + HELP_MESSAGE, e ); } }
@Test public void shouldParseYear() { // When: assertThat(parser.parse("2017"), is(fullParse("2017-01-01T00:00:00.000+0000"))); }
@DoNotSub public int size() { return size; }
@Test void shouldReportEmpty() { assertThat(list.size(), is(0)); assertThat(list.isEmpty(), is(true)); }
@Override public boolean isEmpty() { checkState(!destroyed, destroyedMessage); return size() == 0; }
@Test public void testIsEmpty() throws Exception { expectPeerMessage(clusterCommunicator); assertTrue(ecMap.isEmpty()); ecMap.put(KEY1, VALUE1); assertFalse(ecMap.isEmpty()); ecMap.remove(KEY1); assertTrue(ecMap.isEmpty()); }
@SneakyThrows(SQLException.class) public static boolean isMatched(final EqualsBuilder equalsBuilder, final Object thisColumnValue, final Object thatColumnValue) { equalsBuilder.reset(); if (thisColumnValue instanceof Number && thatColumnValue instanceof Number) { return isNumberEquals((Number) thisColumnValue, (Number) thatColumnValue); } if (thisColumnValue instanceof SQLXML && thatColumnValue instanceof SQLXML) { return ((SQLXML) thisColumnValue).getString().equals(((SQLXML) thatColumnValue).getString()); } if (thisColumnValue instanceof Array && thatColumnValue instanceof Array) { return Objects.deepEquals(((Array) thisColumnValue).getArray(), ((Array) thatColumnValue).getArray()); } return equalsBuilder.append(thisColumnValue, thatColumnValue).isEquals(); }
@Test void assertIsIntegerEquals() { EqualsBuilder equalsBuilder = new EqualsBuilder(); String value = "123"; Long longValue = Long.parseLong(value); assertTrue(DataConsistencyCheckUtils.isMatched(equalsBuilder, longValue, Integer.parseInt(value))); assertTrue(DataConsistencyCheckUtils.isMatched(equalsBuilder, longValue, Short.parseShort(value))); assertTrue(DataConsistencyCheckUtils.isMatched(equalsBuilder, longValue, Byte.parseByte(value))); }
@Override public void execute(Exchange exchange) throws SmppException { byte[] message = getShortMessage(exchange.getIn()); ReplaceSm replaceSm = createReplaceSmTempate(exchange); replaceSm.setShortMessage(message); if (log.isDebugEnabled()) { log.debug("Sending replacement command for a short message for exchange id '{}' and message id '{}'", exchange.getExchangeId(), replaceSm.getMessageId()); } try { session.replaceShortMessage( replaceSm.getMessageId(), TypeOfNumber.valueOf(replaceSm.getSourceAddrTon()), NumberingPlanIndicator.valueOf(replaceSm.getSourceAddrNpi()), replaceSm.getSourceAddr(), replaceSm.getScheduleDeliveryTime(), replaceSm.getValidityPeriod(), new RegisteredDelivery(replaceSm.getRegisteredDelivery()), replaceSm.getSmDefaultMsgId(), replaceSm.getShortMessage()); } catch (Exception e) { throw new SmppException(e); } if (log.isDebugEnabled()) { log.debug("Sent replacement command for a short message for exchange id '{}' and message id '{}'", exchange.getExchangeId(), replaceSm.getMessageId()); } Message rspMsg = ExchangeHelper.getResultMessage(exchange); rspMsg.setHeader(SmppConstants.ID, replaceSm.getMessageId()); }
@Test public void bodyWithLatin1DataCodingNarrowedToCharset() throws Exception { final int dataCoding = 0x03; /* ISO-8859-1 (Latin1) */ byte[] body = { (byte) 0xFF, 'A', 'B', (byte) 0x00, (byte) 0xFF, (byte) 0x7F, 'C', (byte) 0xFF }; byte[] bodyNarrowed = { '?', 'A', 'B', '\0', '?', (byte) 0x7F, 'C', '?' }; Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut); exchange.getIn().setHeader(SmppConstants.COMMAND, "ReplaceSm"); exchange.getIn().setHeader(SmppConstants.DATA_CODING, dataCoding); exchange.getIn().setBody(body); command.execute(exchange); verify(session).replaceShortMessage((String) isNull(), eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1616"), (String) isNull(), (String) isNull(), eq(new RegisteredDelivery(SMSCDeliveryReceipt.SUCCESS_FAILURE)), eq((byte) 0), eq(bodyNarrowed)); }
public Future<KafkaVersionChange> reconcile() { return getVersionFromController() .compose(i -> getPods()) .compose(this::detectToAndFromVersions) .compose(i -> prepareVersionChange()); }
@Test public void testDowngradeWithOlderSubversions(VertxTestContext context) { String oldKafkaVersion = KafkaVersionTestUtils.LATEST_KAFKA_VERSION; String oldInterBrokerProtocolVersion = "2.8"; String oldLogMessageFormatVersion = "2.8"; String kafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION; String interBrokerProtocolVersion = "2.8"; String logMessageFormatVersion = "2.8"; VersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), mockNewCluster( null, mockSps(oldKafkaVersion), mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) ) ); Checkpoint async = context.checkpoint(); vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); assertThat(c.to(), is(VERSIONS.version(kafkaVersion))); assertThat(c.interBrokerProtocolVersion(), nullValue()); assertThat(c.logMessageFormatVersion(), nullValue()); async.flag(); }))); }
@Override public RestResponse<KsqlEntityList> makeKsqlRequest( final URI serverEndPoint, final String sql, final Map<String, ?> requestProperties) { final KsqlTarget target = sharedClient .target(serverEndPoint); return getTarget(target) .postKsqlRequest(sql, requestProperties, Optional.empty()); }
@Test public void shouldHandleNoAuthHeader() { // Given: client = new DefaultKsqlClient(Optional.empty(), sharedClient, ksqlConfig); // When: final RestResponse<KsqlEntityList> result = client.makeKsqlRequest(SERVER_ENDPOINT, "Sql", ImmutableMap.of()); // Then: verify(target, never()).authorizationHeader(any()); assertThat(result, is(response)); }
@Override public String toString() { final String nInfo = n == 1 ? "" : "(" + n + ")"; return getClass().getSimpleName() + nInfo + "[" + this.indicator + "]"; }
@Test public void testToStringMethodWithN1() { prevValueIndicator = new PreviousValueIndicator(openPriceIndicator); final String prevValueIndicatorAsString = prevValueIndicator.toString(); assertTrue(prevValueIndicatorAsString.startsWith("PreviousValueIndicator[")); assertTrue(prevValueIndicatorAsString.endsWith("]")); }
@Override public List<Input<int[][]>> divideData(int num) { if (this.data == null) { return null; } else { var divisions = makeDivisions(this.data, num); var result = new ArrayList<Input<int[][]>>(num); var rowsDone = 0; //number of rows divided so far for (var i = 0; i < num; i++) { var rows = divisions[i]; if (rows != 0) { var divided = new int[rows][this.data[0].length]; System.arraycopy(this.data, rowsDone, divided, 0, rows); rowsDone += rows; var dividedInput = new ArrayInput(divided); result.add(dividedInput); } else { break; //rest of divisions will also be 0 } } return result; } }
@Test void divideDataTest() { var rows = 10; var columns = 10; var inputMatrix = new int[rows][columns]; var rand = new Random(); for (var i = 0; i < rows; i++) { for (var j = 0; j < columns; j++) { inputMatrix[i][j] = rand.nextInt(10); } } var i = new ArrayInput(inputMatrix); var table = i.divideData(4); var division1 = new int[][]{inputMatrix[0], inputMatrix[1], inputMatrix[2]}; var division2 = new int[][]{inputMatrix[3], inputMatrix[4], inputMatrix[5]}; var division3 = new int[][]{inputMatrix[6], inputMatrix[7]}; var division4 = new int[][]{inputMatrix[8], inputMatrix[9]}; assertTrue(matricesSame(table.get(0).data, division1) && matricesSame(table.get(1).data, division2) && matricesSame(table.get(2).data, division3) && matricesSame(table.get(3).data, division4)); }
public static boolean checkOK(String checkIPsResult) { return CHECK_OK.equals(checkIPsResult); }
@Test void testCheckOk() { assertTrue(InternetAddressUtil.checkOK("ok")); assertFalse(InternetAddressUtil.checkOK("ojbk")); }
protected static int parseUpdate(String runtimeVersion) { LOGGER.debug(runtimeVersion); try { String[] parts = runtimeVersion.split("\\."); if (parts.length == 4 && isNumeric(parts)) { return Integer.parseInt(parts[2]); } int pos = runtimeVersion.indexOf('_'); if (pos <= 0) { pos = runtimeVersion.lastIndexOf('.'); if (pos <= 0) { //unexpected java version - return 0 return 0; } } int end = runtimeVersion.indexOf('+', pos); if (end < 0) { end = runtimeVersion.indexOf('-', pos); } if (end > pos) { return Integer.parseInt(runtimeVersion.substring(pos + 1, end)); } return Integer.parseInt(runtimeVersion.substring(pos + 1)); } catch (NumberFormatException nfe) { // If the update version is not available, return 0 return 0; } }
@Test public void testParseUpdate() { String runtimeVersion = "1.8.0_252-8u252-b09-1~deb9u1-b09"; int expResult = 252; int result = Utils.parseUpdate(runtimeVersion); assertEquals(expResult, result); runtimeVersion = "1.8.0_144"; expResult = 144; result = Utils.parseUpdate(runtimeVersion); assertEquals(expResult, result); runtimeVersion = "11.0.2+9"; expResult = 2; result = Utils.parseUpdate(runtimeVersion); assertEquals(expResult, result); runtimeVersion = "11.0.2"; expResult = 2; result = Utils.parseUpdate(runtimeVersion); assertEquals(expResult, result); runtimeVersion = "17.0.8.1"; expResult = 8; result = Utils.parseUpdate(runtimeVersion); assertEquals(expResult, result); }
@VisibleForTesting public String validateMobile(String mobile) { if (StrUtil.isEmpty(mobile)) { throw exception(SMS_SEND_MOBILE_NOT_EXISTS); } return mobile; }
@Test public void testCheckMobile_notExists() { // 准备参数 // mock 方法 // 调用,并断言异常 assertServiceException(() -> smsSendService.validateMobile(null), SMS_SEND_MOBILE_NOT_EXISTS); }
@Override public void onMsg(TbContext ctx, TbMsg msg) { JsonObject json = JsonParser.parseString(msg.getData()).getAsJsonObject(); String tmp; if (msg.getOriginator().getEntityType() != EntityType.DEVICE) { ctx.tellFailure(msg, new RuntimeException("Message originator is not a device entity!")); } else if (!json.has("method")) { ctx.tellFailure(msg, new RuntimeException("Method is not present in the message!")); } else if (!json.has("params")) { ctx.tellFailure(msg, new RuntimeException("Params are not present in the message!")); } else { int requestId = json.has("requestId") ? json.get("requestId").getAsInt() : random.nextInt(); boolean restApiCall = msg.isTypeOf(TbMsgType.RPC_CALL_FROM_SERVER_TO_DEVICE); tmp = msg.getMetaData().getValue("oneway"); boolean oneway = !StringUtils.isEmpty(tmp) && Boolean.parseBoolean(tmp); tmp = msg.getMetaData().getValue(DataConstants.PERSISTENT); boolean persisted = !StringUtils.isEmpty(tmp) && Boolean.parseBoolean(tmp); tmp = msg.getMetaData().getValue("requestUUID"); UUID requestUUID = !StringUtils.isEmpty(tmp) ? UUID.fromString(tmp) : Uuids.timeBased(); tmp = msg.getMetaData().getValue("originServiceId"); String originServiceId = !StringUtils.isEmpty(tmp) ? tmp : null; tmp = msg.getMetaData().getValue(DataConstants.EXPIRATION_TIME); long expirationTime = !StringUtils.isEmpty(tmp) ? Long.parseLong(tmp) : (System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(config.getTimeoutInSeconds())); tmp = msg.getMetaData().getValue(DataConstants.RETRIES); Integer retries = !StringUtils.isEmpty(tmp) ? Integer.parseInt(tmp) : null; String params = parseJsonData(json.get("params")); String additionalInfo = parseJsonData(json.get(DataConstants.ADDITIONAL_INFO)); RuleEngineDeviceRpcRequest request = RuleEngineDeviceRpcRequest.builder() .oneway(oneway) .method(json.get("method").getAsString()) .body(params) .tenantId(ctx.getTenantId()) .deviceId(new DeviceId(msg.getOriginator().getId())) .requestId(requestId) .requestUUID(requestUUID) .originServiceId(originServiceId) .expirationTime(expirationTime) .retries(retries) .restApiCall(restApiCall) .persisted(persisted) .additionalInfo(additionalInfo) .build(); ctx.getRpcService().sendRpcRequestToDevice(request, ruleEngineDeviceRpcResponse -> { if (ruleEngineDeviceRpcResponse.getError().isEmpty()) { TbMsg next = ctx.newMsg(msg.getQueueName(), msg.getType(), msg.getOriginator(), msg.getCustomerId(), msg.getMetaData(), ruleEngineDeviceRpcResponse.getResponse().orElse(TbMsg.EMPTY_JSON_OBJECT)); ctx.enqueueForTellNext(next, TbNodeConnectionType.SUCCESS); } else { TbMsg next = ctx.newMsg(msg.getQueueName(), msg.getType(), msg.getOriginator(), msg.getCustomerId(), msg.getMetaData(), wrap("error", ruleEngineDeviceRpcResponse.getError().get().name())); ctx.enqueueForTellFailure(next, ruleEngineDeviceRpcResponse.getError().get().name()); } }); ctx.ack(msg); } }
@Test public void givenRequestId_whenOnMsg_thenVerifyRequest() { given(ctxMock.getRpcService()).willReturn(rpcServiceMock); given(ctxMock.getTenantId()).willReturn(TENANT_ID); String data = """ { "method": "setGpio", "params": { "pin": "23", "value": 1 }, "requestId": 12345 } """; TbMsg msg = TbMsg.newMsg(TbMsgType.TO_SERVER_RPC_REQUEST, DEVICE_ID, TbMsgMetaData.EMPTY, data); node.onMsg(ctxMock, msg); ArgumentCaptor<RuleEngineDeviceRpcRequest> requestCaptor = captureRequest(); assertThat(requestCaptor.getValue().getRequestId()).isEqualTo(12345); }
@Override public Ring<T> createRing(Map<T, Integer> pointsMap) { return _ringFactory.createRing(pointsMap); }
@Test(groups = { "small", "back-end" }) public void testFactoryWithHashMethod() { RingFactory<String> factory = new DelegatingRingFactory<>(configBuilder(null, "uriRegex")); Ring<String> ring = factory.createRing(buildPointsMap(10)); assertTrue(ring instanceof MPConsistentHashRing); }
public ClusterInfo getBrokerClusterInfo( final long timeoutMillis) throws InterruptedException, RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException, MQBrokerException { RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_BROKER_CLUSTER_INFO, null); RemotingCommand response = this.remotingClient.invokeSync(null, request, timeoutMillis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { return ClusterInfo.decode(response.getBody(), ClusterInfo.class); } default: break; } throw new MQBrokerException(response.getCode(), response.getRemark()); }
@Test public void assertGetBrokerClusterInfo() throws RemotingException, InterruptedException, MQBrokerException { mockInvokeSync(); ClusterInfo responseBody = new ClusterInfo(); Map<String, Set<String>> clusterAddrTable = new HashMap<>(); clusterAddrTable.put(clusterName, new HashSet<>()); Map<String, BrokerData> brokerAddrTable = new HashMap<>(); brokerAddrTable.put(brokerName, new BrokerData()); responseBody.setClusterAddrTable(clusterAddrTable); responseBody.setBrokerAddrTable(brokerAddrTable); setResponseBody(responseBody); ClusterInfo actual = mqClientAPI.getBrokerClusterInfo(defaultTimeout); assertNotNull(actual); assertEquals(1, actual.getClusterAddrTable().size()); assertEquals(1, actual.getBrokerAddrTable().size()); }
public static byte[] longToByteArray(long longValue, int length) { return longToByteArray(longValue, length, true); }
@Test public void testLongToByteArray() { BeaconParser parser = new BeaconParser(); byte[] bytes = parser.longToByteArray(10, 1); assertEquals("first byte should be 10", 10, bytes[0]); }
public void defineDataTableType(DataTableType dataTableType) { DataTableType existing = tableTypeByType.get(dataTableType.getTargetType()); if (existing != null && !existing.isReplaceable()) { throw new DuplicateTypeException(format("" + "There already is a data table type registered that can supply %s.\n" + "You are trying to register a %s for %s.\n" + "The existing data table type registered a %s for %s.\n", dataTableType.getElementType(), dataTableType.getTransformerType().getSimpleName(), dataTableType.getElementType(), existing.getTransformerType().getSimpleName(), existing.getElementType())); } tableTypeByType.put(dataTableType.getTargetType(), dataTableType); }
@Test void throws_duplicate_type_exception() { registry.defineDataTableType(new DataTableType( Place.class, (TableTransformer<Place>) table -> new Place(table.cell(0, 0)))); DuplicateTypeException exception = assertThrows(DuplicateTypeException.class, () -> registry.defineDataTableType(new DataTableType( Place.class, (TableTransformer<Place>) table -> new Place(table.cell(0, 0))))); assertThat(exception.getMessage(), is("" + "There already is a data table type registered that can supply class io.cucumber.datatable.Place.\n" + "You are trying to register a TableTransformer for class io.cucumber.datatable.Place.\n" + "The existing data table type registered a TableTransformer for class io.cucumber.datatable.Place.\n")); }
public final void containsKey(@Nullable Object key) { check("keySet()").that(checkNotNull(actual).keySet()).contains(key); }
@Test public void failMapContainsKeyWithNull() { ImmutableMap<String, String> actual = ImmutableMap.of("a", "A"); expectFailureWhenTestingThat(actual).containsKey(null); assertFailureKeys("value of", "expected to contain", "but was", "map was"); assertFailureValue("value of", "map.keySet()"); assertFailureValue("expected to contain", "null"); assertFailureValue("but was", "[a]"); }
public List<ShardingCondition> createShardingConditions(final InsertStatementContext sqlStatementContext, final List<Object> params) { List<ShardingCondition> result = null == sqlStatementContext.getInsertSelectContext() ? createShardingConditionsWithInsertValues(sqlStatementContext, params) : createShardingConditionsWithInsertSelect(sqlStatementContext, params); appendGeneratedKeyConditions(sqlStatementContext, result); return result; }
@Test void assertCreateShardingConditionsWithParameterMarkers() { InsertValueContext insertValueContext = new InsertValueContext(Collections.singleton(new ParameterMarkerExpressionSegment(0, 0, 0)), Collections.singletonList(1), 0); when(insertStatementContext.getInsertValueContexts()).thenReturn(Collections.singletonList(insertValueContext)); when(shardingRule.findShardingColumn("foo_col_1", "foo_table")).thenReturn(Optional.of("foo_col_1")); List<ShardingCondition> shardingConditions = shardingConditionEngine.createShardingConditions(insertStatementContext, Collections.singletonList(1)); assertThat(shardingConditions.size(), is(1)); assertThat(shardingConditions.get(0).getValues().size(), is(1)); assertThat(shardingConditions.get(0).getValues().get(0).getParameterMarkerIndexes(), is(Collections.singletonList(0))); }
@Override public Cancellable schedule(final long delay, final TimeUnit unit, final Runnable command) { final IndirectRunnable indirectRunnable = new IndirectRunnable(command); final Cancellable cancellable = _executor.schedule(delay, unit, indirectRunnable); return new IndirectCancellable(cancellable, indirectRunnable); }
@Test public void testSchedule() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final Cancellable cancellable = _executor.schedule(50, TimeUnit.MILLISECONDS, new Runnable() { @Override public void run() { latch.countDown(); } }); assertTrue(latch.await(100, TimeUnit.MILLISECONDS)); // Should not be able to cancel now assertFalse(cancellable.cancel(new Exception())); }
public boolean isLast() { return this.page >= (this.totalPage - 1); }
@Test public void isLastTest(){ // 每页2条,共10条,总共5页,第一页是0,最后一页应该是4 final PageResult<String> result = new PageResult<>(4, 2, 10); assertTrue(result.isLast()); }
public static int getPidFromID(String msgID) { byte[] bytes = UtilAll.string2bytes(msgID); ByteBuffer wrap = ByteBuffer.wrap(bytes); int value = wrap.getShort(bytes.length - 2 - 4 - 4 - 2); return value & 0x0000FFFF; }
@Test public void testGetPidFromID() { // Temporary fix on MacOS short pid = (short) UtilAll.getPid(); String uniqID = MessageClientIDSetter.createUniqID(); short pidFromID = (short) MessageClientIDSetter.getPidFromID(uniqID); assertThat(pid).isEqualTo(pidFromID); }
public EmbeddedMetaStore getEmbeddedMetaStore() { return embeddedMetaStore; }
@Test public void testGetEmbeddedMetaStore() { assertNotNull( meta.getEmbeddedMetaStore() ); }
@HighFrequencyInvocation public Optional<ShardingSphereUser> findUser(final Grantee grantee) { return configuration.getUsers().stream().filter(each -> each.getGrantee().accept(grantee)).findFirst(); }
@Test void assertNotFindUser() { assertFalse(createAuthorityRule().findUser(new Grantee("admin", "127.0.0.1")).isPresent()); }
public abstract void prepareSnapshotPreBarrier(long checkpointId) throws Exception;
@Test void testPrepareCheckpointPreBarrier() throws Exception { final AtomicInteger intRef = new AtomicInteger(); final OneInputStreamOperator<String, String> one = new ValidatingOperator(intRef, 0); final OneInputStreamOperator<String, String> two = new ValidatingOperator(intRef, 1); final OneInputStreamOperator<String, String> three = new ValidatingOperator(intRef, 2); final OperatorChain<?, ?> chain = setupOperatorChain(one, two, three); chain.prepareSnapshotPreBarrier(ValidatingOperator.CHECKPOINT_ID); assertThat(intRef).hasValue(3); }
public void renewPresence(final UUID accountUuid, final byte deviceId) { renewPresenceScript.execute(List.of(getPresenceKey(accountUuid, deviceId)), List.of(managerId, String.valueOf(PRESENCE_EXPIRATION_SECONDS))); }
@Test void testRenewPresence() { final UUID accountUuid = UUID.randomUUID(); final byte deviceId = 1; final String presenceKey = ClientPresenceManager.getPresenceKey(accountUuid, deviceId); REDIS_CLUSTER_EXTENSION.getRedisCluster().useCluster(connection -> connection.sync().set(presenceKey, clientPresenceManager.getManagerId())); { final int ttl = REDIS_CLUSTER_EXTENSION.getRedisCluster().withCluster(connection -> connection.sync().ttl(presenceKey).intValue()); assertEquals(-1, ttl); } clientPresenceManager.renewPresence(accountUuid, deviceId); { final int ttl = REDIS_CLUSTER_EXTENSION.getRedisCluster().withCluster(connection -> connection.sync().ttl(presenceKey).intValue()); assertTrue(ttl > 0); } }
public JMXUriBuilder withServerName(String aServerName) { setServerName(aServerName); return this; }
@Test public void withServerName() { assertEquals("jmx:service:jmx:rmi:///jndi/rmi://localhost:1099/jmxrmi", new JMXUriBuilder().withServerName("service:jmx:rmi:///jndi/rmi://localhost:1099/jmxrmi").toString()); }
@Override public void put(final K key, final V value) { Objects.requireNonNull(key, "key cannot be null"); try { maybeMeasureLatency(() -> wrapped().put(keyBytes(key), serdes.rawValue(value)), time, putSensor); maybeRecordE2ELatency(); } catch (final ProcessorStateException e) { final String message = String.format(e.getMessage(), key, value); throw new ProcessorStateException(message, e); } }
@Test public void shouldThrowNullPointerOnPutIfKeyIsNull() { setUpWithoutContext(); assertThrows(NullPointerException.class, () -> metered.put(null, VALUE)); }
@Override public PortDescription combine(ConnectPoint cp, PortDescription descr) { checkNotNull(cp); // short-circuit for non-optical ports // must be removed if we need type override if (descr != null && !optical.contains(descr.type())) { return descr; } OpticalPortConfig opc = lookupConfig(cp); if (descr == null || opc == null) { return descr; } PortNumber number = descr.portNumber(); // handle PortNumber "name" portion if (!opc.name().isEmpty()) { number = PortNumber.portNumber(descr.portNumber().toLong(), opc.name()); } // handle additional annotations SparseAnnotations annotations = combine(opc, descr.annotations()); // (Future work) handle type overwrite? Type type = firstNonNull(opc.type(), descr.type()); if (type != descr.type()) { // TODO: Do we need to be able to overwrite Port.Type? log.warn("Port type overwrite requested for {}. Ignoring.", cp); } return updateDescription(number, annotations, descr); }
@Test public void testConfigAddStaticLambda() { opc.portType(Port.Type.ODUCLT) .portNumberName(PORT_NUMBER) .staticLambda(CFG_STATIC_LAMBDA); PortDescription res; res = oper.combine(CP, N_DESC); assertEquals("Original port name expected", DESC_PORT_NAME, res.portNumber().name()); assertEquals(DESC_STATIC_PORT, res.annotations().value(AnnotationKeys.STATIC_PORT)); long sl = Long.valueOf(res.annotations().value(AnnotationKeys.STATIC_LAMBDA)); assertEquals(CFG_STATIC_LAMBDA, sl); }
public JobStatus getJobStatus(JobID oldJobID) throws IOException { org.apache.hadoop.mapreduce.v2.api.records.JobId jobId = TypeConverter.toYarn(oldJobID); GetJobReportRequest request = recordFactory.newRecordInstance(GetJobReportRequest.class); request.setJobId(jobId); JobReport report = ((GetJobReportResponse) invoke("getJobReport", GetJobReportRequest.class, request)).getJobReport(); JobStatus jobStatus = null; if (report != null) { if (StringUtils.isEmpty(report.getJobFile())) { String jobFile = MRApps.getJobFile(conf, report.getUser(), oldJobID); report.setJobFile(jobFile); } String historyTrackingUrl = report.getTrackingUrl(); String url = StringUtils.isNotEmpty(historyTrackingUrl) ? historyTrackingUrl : trackingUrl; jobStatus = TypeConverter.fromYarn(report, url); } return jobStatus; }
@Test public void testUnknownAppInRM() throws Exception { MRClientProtocol historyServerProxy = mock(MRClientProtocol.class); when(historyServerProxy.getJobReport(getJobReportRequest())).thenReturn( getJobReportResponse()); ClientServiceDelegate clientServiceDelegate = getClientServiceDelegate( historyServerProxy, getRMDelegate()); JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); }
public BigDecimal calculateTDEE(ActiveLevel activeLevel) { if(activeLevel == null) return BigDecimal.valueOf(0); BigDecimal multiplayer = BigDecimal.valueOf(activeLevel.getMultiplayer()); return multiplayer.multiply(BMR).setScale(2, RoundingMode.HALF_DOWN); }
@Test void calculateTDEE_VERY_ACTIVE() { BigDecimal TDEE = bmrCalculator.calculate(attributes).calculateTDEE(ActiveLevel.VERY); assertEquals(new BigDecimal("3523.31"), TDEE); }
public static ThreadPoolExecutor newExecutor(int corePoolSize) { ExecutorBuilder builder = ExecutorBuilder.create(); if (corePoolSize > 0) { builder.setCorePoolSize(corePoolSize); } return builder.build(); }
@Test public void newExecutorTest(){ ThreadPoolExecutor executor = ThreadUtil.newExecutor(5); // 查询线程池 线程数 assertEquals(5, executor.getCorePoolSize()); }
public Map<String, String> penRequestAllowed(PenRequest request) throws PenRequestException, SharedServiceClientException { final List<PenRequestStatus> result = repository.findByBsnAndDocTypeAndSequenceNo(request.getBsn(), request.getDocType(), request.getSequenceNo()); checkIfTooSoonOrTooOften(result); return statusOK; }
@Test public void pinRequestTooSoonThrowsDWS1Exception() throws PenRequestException, SharedServiceClientException { // create a pinRequestStatus with a RequestDateTime in the repo status.setRequestDatetime(LocalDateTime.now(clock)); mockStatusList.add(status); // return arraylist with one dummy pinrequest Mockito.when(mockRepository.findByBsnAndDocTypeAndSequenceNo(request.getBsn(), request.getDocType(), request.getSequenceNo())).thenReturn(mockStatusList); Exception exception = assertThrows(PenRequestException.class, () -> { service.penRequestAllowed(request); }); assertEquals("DWS1", exception.getMessage()); }
@Override public AttributedList<Path> read(final Path directory, final List<String> replies) throws FTPInvalidListException { final AttributedList<Path> children = new AttributedList<>(); if(replies.isEmpty()) { return children; } // At least one entry successfully parsed boolean success = false; for(String line : replies) { final Map<String, Map<String, String>> file = this.parseFacts(line); if(null == file) { log.error(String.format("Error parsing line %s", line)); continue; } for(Map.Entry<String, Map<String, String>> f : file.entrySet()) { final String name = f.getKey(); // size -- Size in octets // modify -- Last modification time // create -- Creation time // type -- Entry type // unique -- Unique id of file/directory // perm -- File permissions, whether read, write, execute is allowed for the login id. // lang -- Language of the file name per IANA [11] registry. // media-type -- MIME media-type of file contents per IANA registry. // charset -- Character set per IANA registry (if not UTF-8) final Map<String, String> facts = f.getValue(); if(!facts.containsKey("type")) { log.error(String.format("No type fact in line %s", line)); continue; } final Path parsed; if("dir".equals(facts.get("type").toLowerCase(Locale.ROOT))) { parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.directory)); } else if("file".equals(facts.get("type").toLowerCase(Locale.ROOT))) { parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file)); } else if(facts.get("type").toLowerCase(Locale.ROOT).matches("os\\.unix=slink:.*")) { parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file, Path.Type.symboliclink)); // Parse symbolic link target in Type=OS.unix=slink:/foobar;Perm=;Unique=keVO1+4G4; foobar final String[] type = facts.get("type").split(":"); if(type.length == 2) { final String target = type[1]; if(target.startsWith(String.valueOf(Path.DELIMITER))) { parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(target), EnumSet.of(Path.Type.file))); } else { parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(String.format("%s/%s", directory.getAbsolute(), target)), EnumSet.of(Path.Type.file))); } } else { log.warn(String.format("Missing symbolic link target for type %s in line %s", facts.get("type"), line)); continue; } } else { log.warn(String.format("Ignored type %s in line %s", facts.get("type"), line)); continue; } if(!success) { if(parsed.isDirectory() && directory.getName().equals(name)) { log.warn(String.format("Possibly bogus response line %s", line)); } else { success = true; } } if(name.equals(".") || name.equals("..")) { if(log.isDebugEnabled()) { log.debug(String.format("Skip %s", name)); } continue; } if(facts.containsKey("size")) { parsed.attributes().setSize(Long.parseLong(facts.get("size"))); } if(facts.containsKey("unix.uid")) { parsed.attributes().setOwner(facts.get("unix.uid")); } if(facts.containsKey("unix.owner")) { parsed.attributes().setOwner(facts.get("unix.owner")); } if(facts.containsKey("unix.gid")) { parsed.attributes().setGroup(facts.get("unix.gid")); } if(facts.containsKey("unix.group")) { parsed.attributes().setGroup(facts.get("unix.group")); } if(facts.containsKey("unix.mode")) { parsed.attributes().setPermission(new Permission(facts.get("unix.mode"))); } else if(facts.containsKey("perm")) { if(PreferencesFactory.get().getBoolean("ftp.parser.mlsd.perm.enable")) { Permission.Action user = Permission.Action.none; final String flags = facts.get("perm"); if(StringUtils.contains(flags, 'r') || StringUtils.contains(flags, 'l')) { // RETR command may be applied to that object // Listing commands, LIST, NLST, and MLSD may be applied user = user.or(Permission.Action.read); } if(StringUtils.contains(flags, 'w') || StringUtils.contains(flags, 'm') || StringUtils.contains(flags, 'c')) { user = user.or(Permission.Action.write); } if(StringUtils.contains(flags, 'e')) { // CWD command naming the object should succeed user = user.or(Permission.Action.execute); if(parsed.isDirectory()) { user = user.or(Permission.Action.read); } } final Permission permission = new Permission(user, Permission.Action.none, Permission.Action.none); parsed.attributes().setPermission(permission); } } if(facts.containsKey("modify")) { // Time values are always represented in UTC parsed.attributes().setModificationDate(this.parseTimestamp(facts.get("modify"))); } if(facts.containsKey("create")) { // Time values are always represented in UTC parsed.attributes().setCreationDate(this.parseTimestamp(facts.get("create"))); } children.add(parsed); } } if(!success) { throw new FTPInvalidListException(children); } return children; }
@Test public void testMlsd() throws Exception { Path path = new Path( "/www", EnumSet.of(Path.Type.directory)); String[] replies = new String[]{ "Type=file;Perm=awr;Unique=keVO1+8G4; writable", "Type=file;Perm=r;Unique=keVO1+IH4; leading space", "Type=dir;Perm=cpmel;Unique=keVO1+7G4; incoming", }; final AttributedList<Path> children = new FTPMlsdListResponseReader() .read(path, Arrays.asList(replies)); assertEquals(3, children.size()); assertEquals("writable", children.get(0).getName()); assertTrue(children.get(0).isFile()); assertTrue(children.get(0).attributes().getPermission().isReadable()); assertTrue(children.get(0).attributes().getPermission().isWritable()); assertEquals(" leading space", children.get(1).getName()); assertTrue(children.get(1).isFile()); assertEquals(Permission.EMPTY, children.get(1).attributes().getPermission()); assertTrue(children.get(2).isDirectory()); assertEquals(Permission.EMPTY, children.get(2).attributes().getPermission()); }
@Injection( name = "IGNORE_INSERT_ERRORS" ) public void metaSetIgnoreInsertErrors( String value ) { setIgnoreErrors( "Y".equalsIgnoreCase( value ) ); }
@Test public void metaSetIgnoreInsertErrors() { TableOutputMeta tableOutputMeta = new TableOutputMeta(); tableOutputMeta.metaSetIgnoreInsertErrors( "Y" ); assertTrue( tableOutputMeta.ignoreErrors() ); tableOutputMeta.metaSetIgnoreInsertErrors( "N" ); assertFalse( tableOutputMeta.ignoreErrors() ); tableOutputMeta.metaSetIgnoreInsertErrors( "Ynot" ); assertFalse( tableOutputMeta.ignoreErrors() ); }
public URLNormalizer sortQueryParameters() { // Does it have query parameters? if (!url.contains("?")) { return this; } // It does, so proceed List<String> keyValues = new ArrayList<>(); String queryString = StringUtils.substringAfter(url, "?"); // extract and remove any fragments String fragment = StringUtils.substringAfter(queryString, "#"); if (StringUtils.isNotBlank(fragment)) { fragment = "#" + fragment; } queryString = StringUtils.substringBefore(queryString, "#"); String[] params = StringUtils.split(queryString, '&'); for (String param : params) { keyValues.add(param); } // sort it so that query string are in order Collections.sort(keyValues); String sortedQueryString = StringUtils.join(keyValues, '&'); if (StringUtils.isNotBlank(sortedQueryString)) { url = StringUtils.substringBefore( url, "?") + "?" + sortedQueryString + fragment; } return this; }
@Test public void testSortQueryParameters() { // test with fragment s = "http://example.com?z=1&a=1#frag"; t = "http://example.com?a=1&z=1#frag"; assertEquals(t, n(s).sortQueryParameters().toString()); // test duplicate params s = "http://example.com?z=1&a=1&a=1"; t = "http://example.com?a=1&a=1&z=1"; assertEquals(t, n(s).sortQueryParameters().toString()); s = "http://www.example.com/display?lang=en&article=fred"; t = "http://www.example.com/display?article=fred&lang=en"; assertEquals(t, n(s).sortQueryParameters().toString()); s = "http://www.example.com/?z=bb&y=cc&z=aa"; t = "http://www.example.com/?y=cc&z=aa&z=bb"; assertEquals(t, n(s).sortQueryParameters().toString()); // Sorting should not change encoding s = "http://www.example.com/spa ce?z=b%2Fb&y=c c&z=a/a"; t = "http://www.example.com/spa ce?y=c c&z=a/a&z=b%2Fb"; assertEquals(t, n(s).sortQueryParameters().toString()); s = "http://www.example.com/?z&y=c c&y=c c&a&d=&"; t = "http://www.example.com/?a&d=&y=c c&y=c c&z"; assertEquals(t, n(s).sortQueryParameters().toString()); }
@GetMapping("/api/v1/meetings/{uuid}/confirm") public MomoApiResponse<ConfirmedMeetingResponse> findConfirmedMeeting(@PathVariable String uuid) { ConfirmedMeetingResponse response = meetingConfirmService.findByUuid(uuid); return new MomoApiResponse<>(response); }
@DisplayName("확정된 약속을 조회하면 200 상태 코드를 응답한다.") @Test void findConfirmedMeeting() { Meeting meeting = MeetingFixture.MOVIE.create(); meeting.lock(); meeting = meetingRepository.save(meeting); confirmedMeetingRepository.save(ConfirmedMeetingFixture.MOVIE.create(meeting)); RestAssured.given().log().all() .pathParam("uuid", meeting.getUuid()) .contentType(ContentType.JSON) .when().get("/api/v1/meetings/{uuid}/confirm") .then().log().all() .statusCode(HttpStatus.OK.value()); }
@Override public boolean isSupported(final Path file, final Type type) { if(StringUtils.equals(EueResourceIdProvider.TRASH, file.attributes().getFileId())) { return false; } if(type == Type.upload) { return file.isDirectory(); } return DescriptiveUrl.EMPTY == file.attributes().getLink(); }
@Test public void testUploadUrlForFile() { final EueResourceIdProvider fileid = new EueResourceIdProvider(session); final Path file = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final EueShareFeature f = new EueShareFeature(session, fileid); assertFalse(f.isSupported(file, Share.Type.upload)); }
@Override public Sensor addRateTotalSensor(final String scopeName, final String entityName, final String operationName, final Sensor.RecordingLevel recordingLevel, final String... tags) { final String threadId = Thread.currentThread().getName(); final Map<String, String> tagMap = customizedTags(threadId, scopeName, entityName, tags); return customInvocationRateAndCountSensor( threadId, groupNameFromScope(scopeName), entityName, operationName, tagMap, recordingLevel ); }
@Test public void shouldThrowIfRateTotalSensorIsAddedWithOddTags() { final IllegalArgumentException exception = assertThrows( IllegalArgumentException.class, () -> streamsMetrics.addRateTotalSensor( SCOPE_NAME, ENTITY_NAME, OPERATION_NAME, RecordingLevel.DEBUG, "bad-tag") ); assertThat(exception.getMessage(), is("Tags needs to be specified in key-value pairs")); }
public List<ChangeStreamRecord> toChangeStreamRecords( PartitionMetadata partition, ChangeStreamResultSet resultSet, ChangeStreamResultSetMetadata resultSetMetadata) { if (this.isPostgres()) { // In PostgresQL, change stream records are returned as JsonB. return Collections.singletonList( toChangeStreamRecordJson(partition, resultSet.getPgJsonb(0), resultSetMetadata)); } // In GoogleSQL, change stream records are returned as an array of structs. return resultSet.getCurrentRowAsStruct().getStructList(0).stream() .flatMap(struct -> toChangeStreamRecord(partition, struct, resultSetMetadata)) .collect(Collectors.toList()); }
@Test public void testMappingStructRowWithUnknownModTypeAndValueCaptureTypeToDataChangeRecord() { final DataChangeRecord dataChangeRecord = new DataChangeRecord( "partitionToken", Timestamp.ofTimeSecondsAndNanos(10L, 20), "transactionId", false, "1", "tableName", Arrays.asList( new ColumnType("column1", new TypeCode("{\"code\":\"INT64\"}"), true, 1L), new ColumnType("column2", new TypeCode("{\"code\":\"BYTES\"}"), false, 2L)), Collections.singletonList( new Mod("{\"column1\":\"value1\"}", null, "{\"column2\":\"newValue2\"}")), ModType.UNKNOWN, ValueCaptureType.UNKNOWN, 10L, 2L, "transactionTag", true, null); final Struct struct = recordsWithUnknownModTypeAndValueCaptureType(dataChangeRecord); ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class); when(resultSet.getCurrentRowAsStruct()).thenReturn(struct); assertEquals( Collections.singletonList(dataChangeRecord), mapper.toChangeStreamRecords(partition, resultSet, resultSetMetadata)); }
@Override public ReadwriteSplittingRuleConfiguration buildToBeDroppedRuleConfiguration(final DropReadwriteSplittingRuleStatement sqlStatement) { Collection<ReadwriteSplittingDataSourceGroupRuleConfiguration> toBeDroppedDataSourceGroups = new LinkedList<>(); Map<String, AlgorithmConfiguration> toBeDroppedLoadBalancers = new HashMap<>(); for (String each : sqlStatement.getNames()) { toBeDroppedDataSourceGroups.add(new ReadwriteSplittingDataSourceGroupRuleConfiguration(each, null, null, null)); dropRule(each); } findUnusedLoadBalancers().forEach(each -> toBeDroppedLoadBalancers.put(each, rule.getConfiguration().getLoadBalancers().get(each))); return new ReadwriteSplittingRuleConfiguration(toBeDroppedDataSourceGroups, toBeDroppedLoadBalancers); }
@Test void assertBuildToBeDroppedRuleConfigurationWithInUsedLoadBalancer() { ReadwriteSplittingRuleConfiguration ruleConfig = createMultipleCurrentRuleConfigurations(); ReadwriteSplittingRule rule = mock(ReadwriteSplittingRule.class); when(rule.getConfiguration()).thenReturn(ruleConfig); executor.setRule(rule); ReadwriteSplittingRuleConfiguration actual = executor.buildToBeDroppedRuleConfiguration(createSQLStatement()); assertThat(actual.getDataSourceGroups().size(), is(1)); assertTrue(actual.getLoadBalancers().isEmpty()); }
@Override public void init(ServletConfig config) throws ServletException { super.init(config); final ServletContext context = config.getServletContext(); if (null == registry) { final Object registryAttr = context.getAttribute(METRICS_REGISTRY); if (registryAttr instanceof MetricRegistry) { this.registry = (MetricRegistry) registryAttr; } else { throw new ServletException("Couldn't find a MetricRegistry instance."); } } this.allowedOrigin = context.getInitParameter(ALLOWED_ORIGIN); this.jsonpParamName = context.getInitParameter(CALLBACK_PARAM); setupMetricsModule(context); }
@Test(expected = ServletException.class) public void constructorWithRegistryAsArgumentUsesServletConfigWhenNullButWrongTypeInContext() throws Exception { final ServletContext servletContext = mock(ServletContext.class); final ServletConfig servletConfig = mock(ServletConfig.class); when(servletConfig.getServletContext()).thenReturn(servletContext); when(servletContext.getAttribute(eq(MetricsServlet.METRICS_REGISTRY))) .thenReturn("IRELLEVANT_STRING"); final MetricsServlet metricsServlet = new MetricsServlet(null); metricsServlet.init(servletConfig); }
public FactMapping addFactMapping(int index, FactMapping toClone) { FactMapping toReturn = toClone.cloneFactMapping(); factMappings.add(index, toReturn); return toReturn; }
@Test public void addFactMappingByIndexAndFactIdentifierAndExpressionIdentifierFail() { assertThatIllegalArgumentException().isThrownBy(() -> modelDescriptor.addFactMapping(1, factIdentifier, expressionIdentifier)); }
@Override public void login(final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException { try { final IRODSAccount account = client.getIRODSAccount(); final Credentials credentials = host.getCredentials(); account.setUserName(credentials.getUsername()); account.setPassword(credentials.getPassword()); final AuthResponse response = client.getIRODSAccessObjectFactory().authenticateIRODSAccount(account); if(log.isDebugEnabled()) { log.debug(String.format("Connected to %s", response.getStartupResponse())); } if(!response.isSuccessful()) { throw new LoginFailureException(MessageFormat.format(LocaleFactory.localizedString( "Login {0} with username and password", "Credentials"), BookmarkNameProvider.toString(host))); } } catch(JargonException e) { throw new IRODSExceptionMappingService().map(e); } }
@Test public void testLoginWhitespaceHomeDirectory() throws Exception { final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new IRODSProtocol()))); final Profile profile = new ProfilePlistReader(factory).read( this.getClass().getResourceAsStream("/iRODS (iPlant Collaborative).cyberduckprofile")); final Host host = new Host(profile, profile.getDefaultHostname(), new Credentials( PROPERTIES.get("irods.key"), PROPERTIES.get("irods.secret") )); host.setDefaultPath("/cyber duck"); final IRODSSession session = new IRODSSession(host); assertNotNull(session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback())); assertTrue(session.isConnected()); assertNotNull(session.getClient()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); assertTrue(session.isConnected()); session.close(); assertFalse(session.isConnected()); }
@Override public synchronized Optional<ListenableFuture<V>> schedule( Checkable<K, V> target, K context) { if (checksInProgress.containsKey(target)) { return Optional.empty(); } final LastCheckResult<V> result = completedChecks.get(target); if (result != null) { final long msSinceLastCheck = timer.monotonicNow() - result.completedAt; if (msSinceLastCheck < minMsBetweenChecks) { LOG.debug("Skipped checking {}. Time since last check {}ms " + "is less than the min gap {}ms.", target, msSinceLastCheck, minMsBetweenChecks); return Optional.empty(); } } LOG.info("Scheduling a check for {}", target); final ListenableFuture<V> lfWithoutTimeout = executorService.submit( new Callable<V>() { @Override public V call() throws Exception { return target.check(context); } }); final ListenableFuture<V> lf; if (diskCheckTimeout > 0) { lf = TimeoutFuture .create(lfWithoutTimeout, diskCheckTimeout, TimeUnit.MILLISECONDS, scheduledExecutorService); } else { lf = lfWithoutTimeout; } checksInProgress.put(target, lf); addResultCachingCallback(target, lf); return Optional.of(lf); }
@Test (timeout=60000) public void testConcurrentChecks() throws Exception { final StalledCheckable target = new StalledCheckable(); final FakeTimer timer = new FakeTimer(); ThrottledAsyncChecker<Boolean, Boolean> checker = new ThrottledAsyncChecker<>(timer, MIN_ERROR_CHECK_GAP, 0, getExecutorService()); final Optional<ListenableFuture<Boolean>> olf1 = checker.schedule(target, true); final Optional<ListenableFuture<Boolean>> olf2 = checker.schedule(target, true); // Ensure that concurrent requests return the future object // for the first caller. assertTrue(olf1.isPresent()); assertFalse(olf2.isPresent()); }
@Override public long remove(long key) { final long valueAddr = hsa.get(key); if (valueAddr == NULL_ADDRESS) { return nullValue; } final long oldValue = mem.getLong(valueAddr); hsa.remove(key); return oldValue; }
@Test(expected = AssertionError.class) @RequireAssertEnabled public void test_removeIfEquals_Value() { map.remove(newKey(), MISSING_VALUE); }
public PrepareResult prepare(HostValidator hostValidator, DeployLogger logger, PrepareParams params, Optional<ApplicationVersions> activeApplicationVersions, Instant now, File serverDbSessionDir, ApplicationPackage applicationPackage, SessionZooKeeperClient sessionZooKeeperClient) { ApplicationId applicationId = params.getApplicationId(); Preparation preparation = new Preparation(hostValidator, logger, params, activeApplicationVersions, TenantRepository.getTenantPath(applicationId.tenant()), serverDbSessionDir, applicationPackage, sessionZooKeeperClient, onnxModelCost, endpointCertificateSecretStores); preparation.preprocess(); try { AllocatedHosts allocatedHosts = preparation.buildModels(now); preparation.makeResult(allocatedHosts); if ( ! params.isDryRun()) { FileReference fileReference = preparation.triggerDistributionOfApplicationPackage(); preparation.writeStateZK(fileReference); preparation.writeEndpointCertificateMetadataZK(); preparation.writeContainerEndpointsZK(); } log.log(Level.FINE, () -> "time used " + params.getTimeoutBudget().timesUsed() + " : " + applicationId); return preparation.result(); } catch (IllegalArgumentException e) { if (e instanceof InvalidApplicationException) throw e; throw new InvalidApplicationException("Invalid application package", e); } }
@Test public void require_that_container_endpoints_are_written_and_used() throws Exception { var modelFactory = new TestModelFactory(version123); preparer = createPreparer(new ModelFactoryRegistry(List.of(modelFactory)), HostProvisionerProvider.empty()); var endpoints = "[\n" + " {\n" + " \"clusterId\": \"foo\",\n" + " \"names\": [\n" + " \"foo.app1.tenant1.global.vespa.example.com\",\n" + " \"rotation-042.vespa.global.routing\"\n" + " ],\n" + " \"scope\": \"global\", \n" + " \"routingMethod\": \"shared\"\n" + " },\n" + " {\n" + " \"clusterId\": \"bar\",\n" + " \"names\": [\n" + " \"bar.app1.tenant1.global.vespa.example.com\",\n" + " \"rotation-043.vespa.global.routing\"\n" + " ],\n" + " \"scope\": \"global\",\n" + " \"routingMethod\": \"sharedLayer4\"\n" + " }\n" + "]"; var applicationId = applicationId("test"); var params = new PrepareParams.Builder().applicationId(applicationId) .containerEndpoints(endpoints) .build(); prepare(new File("src/test/resources/deploy/hosted-app"), params); var expected = List.of(new ContainerEndpoint("foo", ApplicationClusterEndpoint.Scope.global, List.of("foo.app1.tenant1.global.vespa.example.com", "rotation-042.vespa.global.routing"), OptionalInt.empty(), ApplicationClusterEndpoint.RoutingMethod.shared), new ContainerEndpoint("bar", ApplicationClusterEndpoint.Scope.global, List.of("bar.app1.tenant1.global.vespa.example.com", "rotation-043.vespa.global.routing"), OptionalInt.empty(), ApplicationClusterEndpoint.RoutingMethod.sharedLayer4)); assertEquals(expected, readContainerEndpoints(applicationId)); var modelContext = modelFactory.getModelContext(); var containerEndpointsFromModel = modelContext.properties().endpoints(); assertEquals(Set.copyOf(expected), containerEndpointsFromModel); // Preparing with null container endpoints keeps old value. This is what happens when deployments happen from // an existing session (e.g. internal redeployment). params = new PrepareParams.Builder().applicationId(applicationId).build(); prepare(new File("src/test/resources/deploy/hosted-app"), params); assertEquals(expected, readContainerEndpoints(applicationId)); // Preparing with empty container endpoints clears endpoints params = new PrepareParams.Builder().applicationId(applicationId).containerEndpoints("[]").build(); prepare(new File("src/test/resources/deploy/hosted-app"), params); assertEquals(List.of(), readContainerEndpoints(applicationId)); }
@Override public void characters(char[] ch, int start, int length) throws SAXException { advance(length); super.characters(ch, start, length); }
@Test public void testSomeCharactersWithoutInput() throws IOException { try { char[] ch = new char[100]; for (int i = 0; i < 100; i++) { handler.characters(ch, 0, ch.length); } } catch (SAXException e) { fail("Unexpected SAXException"); } }
public static String urlSanitize(String string) { StringBuilder sanitized = new StringBuilder(); for (char c : string.toCharArray()) { sanitized.append(ALLOWED_CHARS_SET.contains(c) ? c : REPLACE_CHAR); } return sanitized.toString(); }
@Test void testUrlSanitize() { String url = Sanitizer.urlSanitize("weird|~url.json"); assertEquals("weird--url.json", url); }
public static SinkConfig validateUpdate(SinkConfig existingConfig, SinkConfig newConfig) { SinkConfig mergedConfig = clone(existingConfig); if (!existingConfig.getTenant().equals(newConfig.getTenant())) { throw new IllegalArgumentException("Tenants differ"); } if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) { throw new IllegalArgumentException("Namespaces differ"); } if (!existingConfig.getName().equals(newConfig.getName())) { throw new IllegalArgumentException("Sink Names differ"); } if (!StringUtils.isEmpty(newConfig.getClassName())) { mergedConfig.setClassName(newConfig.getClassName()); } if (!StringUtils.isEmpty(newConfig.getSourceSubscriptionName()) && !newConfig.getSourceSubscriptionName() .equals(existingConfig.getSourceSubscriptionName())) { throw new IllegalArgumentException("Subscription Name cannot be altered"); } if (newConfig.getInputSpecs() == null) { newConfig.setInputSpecs(new HashMap<>()); } if (mergedConfig.getInputSpecs() == null) { mergedConfig.setInputSpecs(new HashMap<>()); } if (!StringUtils.isEmpty(newConfig.getLogTopic())) { mergedConfig.setLogTopic(newConfig.getLogTopic()); } if (newConfig.getInputs() != null) { newConfig.getInputs().forEach((topicName -> { newConfig.getInputSpecs().putIfAbsent(topicName, ConsumerConfig.builder().isRegexPattern(false).build()); })); } if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) { newConfig.getInputSpecs().put(newConfig.getTopicsPattern(), ConsumerConfig.builder() .isRegexPattern(true) .build()); } if (newConfig.getTopicToSerdeClassName() != null) { newConfig.getTopicToSerdeClassName().forEach((topicName, serdeClassName) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .serdeClassName(serdeClassName) .isRegexPattern(false) .build()); }); } if (newConfig.getTopicToSchemaType() != null) { newConfig.getTopicToSchemaType().forEach((topicName, schemaClassname) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .schemaType(schemaClassname) .isRegexPattern(false) .build()); }); } if (!newConfig.getInputSpecs().isEmpty()) { SinkConfig finalMergedConfig = mergedConfig; newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> { if (!existingConfig.getInputSpecs().containsKey(topicName)) { throw new IllegalArgumentException("Input Topics cannot be altered"); } if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) { throw new IllegalArgumentException( "isRegexPattern for input topic " + topicName + " cannot be altered"); } finalMergedConfig.getInputSpecs().put(topicName, consumerConfig); }); } if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees() .equals(existingConfig.getProcessingGuarantees())) { throw new IllegalArgumentException("Processing Guarantees cannot be altered"); } if (newConfig.getConfigs() != null) { mergedConfig.setConfigs(newConfig.getConfigs()); } if (newConfig.getSecrets() != null) { mergedConfig.setSecrets(newConfig.getSecrets()); } if (newConfig.getParallelism() != null) { mergedConfig.setParallelism(newConfig.getParallelism()); } if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering() .equals(existingConfig.getRetainOrdering())) { throw new IllegalArgumentException("Retain Ordering cannot be altered"); } if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering() .equals(existingConfig.getRetainKeyOrdering())) { throw new IllegalArgumentException("Retain Key Ordering cannot be altered"); } if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) { throw new IllegalArgumentException("AutoAck cannot be altered"); } if (newConfig.getResources() != null) { mergedConfig .setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources())); } if (newConfig.getTimeoutMs() != null) { mergedConfig.setTimeoutMs(newConfig.getTimeoutMs()); } if (newConfig.getCleanupSubscription() != null) { mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription()); } if (!StringUtils.isEmpty(newConfig.getArchive())) { mergedConfig.setArchive(newConfig.getArchive()); } if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) { mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags()); } if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) { mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions()); } if (newConfig.getTransformFunction() != null) { mergedConfig.setTransformFunction(newConfig.getTransformFunction()); } if (newConfig.getTransformFunctionClassName() != null) { mergedConfig.setTransformFunctionClassName(newConfig.getTransformFunctionClassName()); } if (newConfig.getTransformFunctionConfig() != null) { mergedConfig.setTransformFunctionConfig(newConfig.getTransformFunctionConfig()); } return mergedConfig; }
@Test public void testMergeEqual() { SinkConfig sinkConfig = createSinkConfig(); SinkConfig newSinkConfig = createSinkConfig(); SinkConfig mergedConfig = SinkConfigUtils.validateUpdate(sinkConfig, newSinkConfig); assertEquals( new Gson().toJson(sinkConfig), new Gson().toJson(mergedConfig) ); }
public static UnixMountInfo parseMountInfo(String line) { // Example mount lines: // ramfs on /mnt/ramdisk type ramfs (rw,relatime,size=1gb) // map -hosts on /net (autofs, nosuid, automounted, nobrowse) UnixMountInfo.Builder builder = new UnixMountInfo.Builder(); // First get and remove the mount type if it's provided. Matcher matcher = Pattern.compile(".* (type \\w+ ).*").matcher(line); String lineWithoutType; if (matcher.matches()) { String match = matcher.group(1); builder.setFsType(match.replace("type", "").trim()); lineWithoutType = line.replace(match, ""); } else { lineWithoutType = line; } // Now parse the rest matcher = Pattern.compile("(.*) on (.*) \\((.*)\\)").matcher(lineWithoutType); if (!matcher.matches()) { LOG.warn("Unable to parse output of '{}': {}", MOUNT_COMMAND, line); return builder.build(); } builder.setDeviceSpec(matcher.group(1)); builder.setMountPoint(matcher.group(2)); builder.setOptions(parseUnixMountOptions(matcher.group(3))); return builder.build(); }
@Test public void parseMountInfoInvalidOutput() throws Exception { UnixMountInfo info = ShellUtils.parseMountInfo("invalid output"); assertFalse(info.getDeviceSpec().isPresent()); assertFalse(info.getMountPoint().isPresent()); assertFalse(info.getFsType().isPresent()); assertFalse(info.getOptions().getSize().isPresent()); }
@Override public boolean containsAll(Collection<?> c) { if (c instanceof IntSet) { return containsAll((IntSet) c); } for (Object o : c) { if (!contains(o)) return false; } return true; }
@Test public void containsAll() throws Exception { RangeSet rs = new RangeSet(4); RangeSet rs2 = new RangeSet(4); assertTrue(rs.containsAll(rs2)); RangeSet rs3 = new RangeSet(3); assertTrue(rs.containsAll(rs3)); assertFalse(rs3.containsAll(rs)); }
@ExecuteOn(TaskExecutors.IO) @Post(consumes = MediaType.APPLICATION_YAML) @Operation(tags = {"Flows"}, summary = "Create a flow from yaml source") public HttpResponse<FlowWithSource> create( @Parameter(description = "The flow") @Body String flow ) throws ConstraintViolationException { Flow flowParsed = yamlFlowParser.parse(flow, Flow.class); return HttpResponse.ok(doCreate(flowParsed, flow)); }
@SuppressWarnings("OptionalGetWithoutIsPresent") @Test void invalidUpdateFlow() { String flowId = IdUtils.create(); Flow flow = generateFlow(flowId, "io.kestra.unittest", "a"); Flow result = client.toBlocking().retrieve(POST("/api/v1/flows", flow), Flow.class); assertThat(result.getId(), is(flow.getId())); Flow finalFlow = generateFlow(IdUtils.create(), "io.kestra.unittest2", "b"); ; HttpClientResponseException e = assertThrows(HttpClientResponseException.class, () -> { client.toBlocking().exchange( PUT("/api/v1/flows/" + flow.getNamespace() + "/" + flowId, finalFlow), Argument.of(Flow.class), Argument.of(JsonError.class) ); }); String jsonError = e.getResponse().getBody(String.class).get(); assertThat(e.getStatus(), is(UNPROCESSABLE_ENTITY)); assertThat(jsonError, containsString("flow.id")); assertThat(jsonError, containsString("flow.namespace")); }
public ClusterSerdes init(Environment env, ClustersProperties clustersProperties, int clusterIndex) { ClustersProperties.Cluster clusterProperties = clustersProperties.getClusters().get(clusterIndex); log.debug("Configuring serdes for cluster {}", clusterProperties.getName()); var globalPropertiesResolver = new PropertyResolverImpl(env); var clusterPropertiesResolver = new PropertyResolverImpl(env, "kafka.clusters." + clusterIndex); Map<String, SerdeInstance> registeredSerdes = new LinkedHashMap<>(); // initializing serdes from config if (clusterProperties.getSerde() != null) { for (int i = 0; i < clusterProperties.getSerde().size(); i++) { ClustersProperties.SerdeConfig serdeConfig = clusterProperties.getSerde().get(i); if (Strings.isNullOrEmpty(serdeConfig.getName())) { throw new ValidationException("'name' property not set for serde: " + serdeConfig); } if (registeredSerdes.containsKey(serdeConfig.getName())) { throw new ValidationException("Multiple serdes with same name: " + serdeConfig.getName()); } var instance = createSerdeFromConfig( serdeConfig, new PropertyResolverImpl(env, "kafka.clusters." + clusterIndex + ".serde." + i + ".properties"), clusterPropertiesResolver, globalPropertiesResolver ); registeredSerdes.put(serdeConfig.getName(), instance); } } // initializing remaining built-in serdes with empty selection patters builtInSerdeClasses.forEach((name, clazz) -> { if (!registeredSerdes.containsKey(name)) { BuiltInSerde serde = createSerdeInstance(clazz); if (autoConfigureSerde(serde, clusterPropertiesResolver, globalPropertiesResolver)) { registeredSerdes.put(name, new SerdeInstance(name, serde, null, null, null)); } } }); registerTopicRelatedSerde(registeredSerdes); return new ClusterSerdes( registeredSerdes, Optional.ofNullable(clusterProperties.getDefaultKeySerde()) .map(name -> Preconditions.checkNotNull(registeredSerdes.get(name), "Default key serde not found")) .orElse(null), Optional.ofNullable(clusterProperties.getDefaultValueSerde()) .map(name -> Preconditions.checkNotNull(registeredSerdes.get(name), "Default value serde not found")) .or(() -> Optional.ofNullable(registeredSerdes.get(SchemaRegistrySerde.name()))) .or(() -> Optional.ofNullable(registeredSerdes.get(ProtobufFileSerde.name()))) .orElse(null), createFallbackSerde() ); }
@Test void serdeWithBuiltInNameAndNoPropertiesCantBeInitializedIfSerdeNotSupportAutoConfigure() { ClustersProperties.SerdeConfig serdeConfig = new ClustersProperties.SerdeConfig(); serdeConfig.setName("BuiltIn2"); //auto-configuration not supported serdeConfig.setTopicKeysPattern("keys"); serdeConfig.setTopicValuesPattern("vals"); assertThatCode(() -> initializer.init(env, createProperties(serdeConfig), 0)) .isInstanceOf(ValidationException.class); }
@Override public List<Document> get() { try (var input = markdownResource.getInputStream()) { Node node = parser.parseReader(new InputStreamReader(input)); DocumentVisitor documentVisitor = new DocumentVisitor(config); node.accept(documentVisitor); return documentVisitor.getDocuments(); } catch (IOException e) { throw new RuntimeException(e); } }
@Test void testLists() { MarkdownDocumentReader reader = new MarkdownDocumentReader("classpath:/lists.md"); List<Document> documents = reader.get(); assertThat(documents).hasSize(2) .extracting(Document::getMetadata, Document::getContent) .containsOnly(tuple(Map.of("category", "header_2", "title", "Ordered list"), "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur diam eros, laoreet sit amet cursus vitae, varius sed nisi. Cras sit amet quam quis velit commodo porta consectetur id nisi. Phasellus tincidunt pulvinar augue. Proin vel laoreet leo, sed luctus augue. Sed et ligula commodo, commodo lacus at, consequat turpis. Maecenas eget sapien odio. Pellentesque auctor pharetra eros, viverra sodales lorem aliquet id. Curabitur semper nisi vel sem interdum suscipit. Maecenas urna lectus, pellentesque in accumsan aliquam, congue eu libero. Ut rhoncus nec justo a porttitor."), tuple(Map.of("category", "header_2", "title", "Unordered list"), "Aenean eu leo eu nibh tristique posuere quis quis massa. Aenean imperdiet libero dui, nec malesuada dui maximus vel. Vestibulum sed dui condimentum, cursus libero in, dapibus tortor. Etiam facilisis enim in egestas dictum.")); }
@Bean public MetaDataHandler tarsMetaDataHandler() { return new TarsMetaDataHandler(); }
@Test public void testTarsMetaDataHandler() { applicationContextRunner.run(context -> { MetaDataHandler handler = context.getBean("tarsMetaDataHandler", MetaDataHandler.class); assertNotNull(handler); } ); }
@Override public DefaultResultPartition getResultPartition( final IntermediateResultPartitionID intermediateResultPartitionId) { final DefaultResultPartition resultPartition = resultPartitionsById.get(intermediateResultPartitionId); if (resultPartition == null) { throw new IllegalArgumentException( "can not find partition: " + intermediateResultPartitionId); } return resultPartition; }
@Test void testGetResultPartition() { for (ExecutionVertex vertex : executionGraph.getAllExecutionVertices()) { for (Map.Entry<IntermediateResultPartitionID, IntermediateResultPartition> entry : vertex.getProducedPartitions().entrySet()) { IntermediateResultPartition partition = entry.getValue(); DefaultResultPartition schedulingResultPartition = adapter.getResultPartition(entry.getKey()); assertPartitionEquals(partition, schedulingResultPartition); } } }
@Override public int read() throws IOException { Preconditions.checkState(!closed, "Cannot read: already closed"); positionStream(); pos += 1; next += 1; readBytes.increment(); readOperations.increment(); return stream.read(); }
@Test public void testReadSingle() throws Exception { int i0 = 1; int i1 = 255; byte[] data = {(byte) i0, (byte) i1}; setupData(data); try (SeekableInputStream in = new ADLSInputStream(fileClient(), null, azureProperties, MetricsContext.nullMetrics())) { assertThat(in.read()).isEqualTo(i0); assertThat(in.read()).isEqualTo(i1); } }
@Override public void write(final MySQLPacketPayload payload, final Object value) { if (value instanceof byte[]) { payload.writeBytesLenenc((byte[]) value); } else { payload.writeStringLenenc(value.toString()); } }
@Test void assertWriteString() { new MySQLStringLenencBinaryProtocolValue().write(payload, "value"); verify(payload).writeStringLenenc("value"); }
@Override public String pluginNamed() { return PluginEnum.LOGGING_ELASTIC_SEARCH.getName(); }
@Test public void testPluginNamed() { Assertions.assertEquals(loggingElasticSearchPluginDataHandler.pluginNamed(), "loggingElasticSearch"); }
public static DateTime parse(CharSequence dateStr, DateFormat dateFormat) { return new DateTime(dateStr, dateFormat); }
@Test public void parseToDateTimeTest3() { final String dateStr1 = "2017-02-01 12:23:45"; final String dateStr2 = "2017/02/01 12:23:45"; final String dateStr3 = "2017.02.01 12:23:45"; final String dateStr4 = "2017年02月01日 12时23分45秒"; final DateTime dt1 = DateUtil.parse(dateStr1); final DateTime dt2 = DateUtil.parse(dateStr2); final DateTime dt3 = DateUtil.parse(dateStr3); final DateTime dt4 = DateUtil.parse(dateStr4); assertEquals(dt1, dt2); assertEquals(dt2, dt3); assertEquals(dt3, dt4); }
void runOnce() { if (transactionManager != null) { try { transactionManager.maybeResolveSequences(); RuntimeException lastError = transactionManager.lastError(); // do not continue sending if the transaction manager is in a failed state if (transactionManager.hasFatalError()) { if (lastError != null) maybeAbortBatches(lastError); client.poll(retryBackoffMs, time.milliseconds()); return; } if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) { return; } // Check whether we need a new producerId. If so, we will enqueue an InitProducerId // request which will be sent below transactionManager.bumpIdempotentEpochAndResetIdIfNeeded(); if (maybeSendAndPollTransactionalRequest()) { return; } } catch (AuthenticationException e) { // This is already logged as error, but propagated here to perform any clean ups. log.trace("Authentication exception while processing transactional request", e); transactionManager.authenticationFailed(e); } } long currentTimeMs = time.milliseconds(); long pollTimeout = sendProducerData(currentTimeMs); client.poll(pollTimeout, currentTimeMs); }
@Test public void testMessageFormatDownConversion() throws Exception { // this test case verifies the behavior when the version of the produce request supported by the // broker changes after the record set is created long offset = 0; // start off support produce request v3 apiVersions.update("0", NodeApiVersions.create()); Future<RecordMetadata> future = appendToAccumulator(tp0, 0L, "key", "value"); // now the partition leader supports only v2 apiVersions.update("0", NodeApiVersions.create(ApiKeys.PRODUCE.id, (short) 0, (short) 2)); client.prepareResponse(body -> { ProduceRequest request = (ProduceRequest) body; if (request.version() != 2) return false; MemoryRecords records = partitionRecords(request).get(tp0); return records != null && records.sizeInBytes() > 0 && records.hasMatchingMagic(RecordBatch.MAGIC_VALUE_V1); }, produceResponse(tp0, offset, Errors.NONE, 0)); sender.runOnce(); // connect sender.runOnce(); // send produce request assertTrue(future.isDone(), "Request should be completed"); assertEquals(offset, future.get().offset()); }
@Override public void collect(long elapsedTime, StatementContext ctx) { final Timer timer = getTimer(ctx); timer.update(elapsedTime, TimeUnit.NANOSECONDS); }
@Test public void updatesTimerForNonSqlishRawSql() throws Exception { final StatementNameStrategy strategy = new SmartNameStrategy(); final InstrumentedTimingCollector collector = new InstrumentedTimingCollector(registry, strategy); final StatementContext ctx = mock(StatementContext.class); doReturn("don't know what it is but it's not SQL").when(ctx).getRawSql(); collector.collect(TimeUnit.SECONDS.toNanos(3), ctx); final String name = strategy.getStatementName(ctx); final Timer timer = registry.timer(name); assertThat(name) .isEqualTo(name("sql", "raw", "don't know what it is but it's not SQL")); assertThat(timer.getSnapshot().getMax()) .isEqualTo(3000000000L); }
@Override public String toString() { return buildStringRepresentation(); }
@Test void shouldIncludePropsInToString() { var props = Map.of(KEY, (Object) VALUE); var document = new DocumentImplementation(props); assertTrue(document.toString().contains(KEY)); assertTrue(document.toString().contains(VALUE)); }
public static String basicEscape(String s) { char c; int len = s.length(); StringBuilder sbuf = new StringBuilder(len); int i = 0; while (i < len) { c = s.charAt(i++); if (c == '\\' && i < len ) { c = s.charAt(i++); if (c == 'n') { c = '\n'; } else if (c == 'r') { c = '\r'; } else if (c == 't') { c = '\t'; } else if (c == 'f') { c = '\f'; } else if (c == '\b') { c = '\b'; } else if (c == '\"') { c = '\"'; } else if (c == '\'') { c = '\''; } else if (c == '\\') { c = '\\'; } ///// } sbuf.append(c); } // while return sbuf.toString(); }
@Test public void basicEscape() { assertEquals("a", RegularEscapeUtil.basicEscape("a")); assertEquals("a\t", RegularEscapeUtil.basicEscape("a\t")); assertEquals("a\\", RegularEscapeUtil.basicEscape("a\\")); assertEquals("a\\", RegularEscapeUtil.basicEscape("a\\\\")); }
public SlidingTimeWindowMetrics(int timeWindowSizeInSeconds, Clock clock) { this.clock = clock; this.timeWindowSizeInSeconds = timeWindowSizeInSeconds; this.partialAggregations = new PartialAggregation[timeWindowSizeInSeconds]; this.headIndex = 0; long epochSecond = clock.instant().getEpochSecond(); for (int i = 0; i < timeWindowSizeInSeconds; i++) { partialAggregations[i] = new PartialAggregation(epochSecond); epochSecond++; } this.totalAggregation = new TotalAggregation(); }
@Test public void testSlidingTimeWindowMetrics() { MockClock clock = MockClock.at(2019, 8, 4, 12, 0, 0, ZoneId.of("UTC")); Metrics metrics = new SlidingTimeWindowMetrics(5, clock); Snapshot result = metrics.record(100, TimeUnit.MILLISECONDS, Metrics.Outcome.ERROR); assertThat(result.getTotalNumberOfCalls()).isEqualTo(1); assertThat(result.getNumberOfSuccessfulCalls()).isZero(); assertThat(result.getNumberOfFailedCalls()).isEqualTo(1); assertThat(result.getTotalDuration().toMillis()).isEqualTo(100); clock.advanceByMillis(100); result = metrics.record(100, TimeUnit.MILLISECONDS, Metrics.Outcome.SUCCESS); assertThat(result.getTotalNumberOfCalls()).isEqualTo(2); assertThat(result.getNumberOfSuccessfulCalls()).isEqualTo(1); assertThat(result.getNumberOfFailedCalls()).isEqualTo(1); assertThat(result.getTotalDuration().toMillis()).isEqualTo(200); clock.advanceByMillis(100); result = metrics.record(100, TimeUnit.MILLISECONDS, Metrics.Outcome.SUCCESS); assertThat(result.getTotalNumberOfCalls()).isEqualTo(3); assertThat(result.getNumberOfSuccessfulCalls()).isEqualTo(2); assertThat(result.getNumberOfFailedCalls()).isEqualTo(1); assertThat(result.getTotalDuration().toMillis()).isEqualTo(300); clock.advanceBySeconds(1); result = metrics.record(100, TimeUnit.MILLISECONDS, Metrics.Outcome.SUCCESS); assertThat(result.getTotalNumberOfCalls()).isEqualTo(4); assertThat(result.getNumberOfSuccessfulCalls()).isEqualTo(3); assertThat(result.getNumberOfFailedCalls()).isEqualTo(1); assertThat(result.getTotalDuration().toMillis()).isEqualTo(400); clock.advanceBySeconds(1); result = metrics.record(100, TimeUnit.MILLISECONDS, Metrics.Outcome.SUCCESS); assertThat(result.getTotalNumberOfCalls()).isEqualTo(5); assertThat(result.getNumberOfSuccessfulCalls()).isEqualTo(4); assertThat(result.getNumberOfFailedCalls()).isEqualTo(1); assertThat(result.getTotalDuration().toMillis()).isEqualTo(500); clock.advanceBySeconds(1); result = metrics.record(100, TimeUnit.MILLISECONDS, Metrics.Outcome.ERROR); assertThat(result.getTotalNumberOfCalls()).isEqualTo(6); assertThat(result.getNumberOfSuccessfulCalls()).isEqualTo(4); assertThat(result.getNumberOfFailedCalls()).isEqualTo(2); assertThat(result.getTotalDuration().toMillis()).isEqualTo(600); clock.advanceBySeconds(1); result = metrics.record(100, TimeUnit.MILLISECONDS, Metrics.Outcome.SUCCESS); assertThat(result.getTotalNumberOfCalls()).isEqualTo(7); assertThat(result.getNumberOfSuccessfulCalls()).isEqualTo(5); assertThat(result.getNumberOfFailedCalls()).isEqualTo(2); assertThat(result.getTotalDuration().toMillis()).isEqualTo(700); clock.advanceBySeconds(1); result = metrics.record(100, TimeUnit.MILLISECONDS, Metrics.Outcome.SUCCESS); assertThat(result.getTotalNumberOfCalls()).isEqualTo(5); assertThat(result.getNumberOfSuccessfulCalls()).isEqualTo(4); assertThat(result.getNumberOfFailedCalls()).isEqualTo(1); assertThat(result.getTotalDuration().toMillis()).isEqualTo(500); clock.advanceBySeconds(1); result = metrics.record(100, TimeUnit.MILLISECONDS, Metrics.Outcome.SUCCESS); assertThat(result.getTotalNumberOfCalls()).isEqualTo(5); assertThat(result.getNumberOfSuccessfulCalls()).isEqualTo(4); assertThat(result.getNumberOfFailedCalls()).isEqualTo(1); assertThat(result.getTotalDuration().toMillis()).isEqualTo(500); clock.advanceBySeconds(5); result = metrics.record(100, TimeUnit.MILLISECONDS, Metrics.Outcome.SUCCESS); assertThat(result.getTotalNumberOfCalls()).isEqualTo(1); assertThat(result.getNumberOfSuccessfulCalls()).isEqualTo(1); assertThat(result.getNumberOfFailedCalls()).isZero(); assertThat(result.getTotalDuration().toMillis()).isEqualTo(100); }
public static IntIndexedContainer withoutConsecutiveDuplicates(IntIndexedContainer arr) { IntArrayList result = new IntArrayList(); if (arr.isEmpty()) return result; int prev = arr.get(0); result.add(prev); for (int i = 1; i < arr.size(); i++) { int val = arr.get(i); if (val != prev) result.add(val); prev = val; } return result; }
@Test public void testWithoutConsecutiveDuplicates() { assertEquals(from(), ArrayUtil.withoutConsecutiveDuplicates(from())); assertEquals(from(1), ArrayUtil.withoutConsecutiveDuplicates(from(1))); assertEquals(from(1), ArrayUtil.withoutConsecutiveDuplicates(from(1, 1))); assertEquals(from(1), ArrayUtil.withoutConsecutiveDuplicates(from(1, 1, 1))); assertEquals(from(1, 2), ArrayUtil.withoutConsecutiveDuplicates(from(1, 1, 2))); assertEquals(from(1, 2, 1), ArrayUtil.withoutConsecutiveDuplicates(from(1, 2, 1))); assertEquals( from(5, 6, 5, 8, 9, 11, 2, -1, 3), ArrayUtil.withoutConsecutiveDuplicates(from(5, 5, 5, 6, 6, 5, 5, 8, 9, 11, 11, 2, 2, -1, 3, 3))); }
public static String getActivityTitle(Activity activity) { try { if (activity != null) { try { String activityTitle = null; if (Build.VERSION.SDK_INT >= 11) { String toolbarTitle = SensorsDataUtils.getToolbarTitle(activity); if (!TextUtils.isEmpty(toolbarTitle)) { activityTitle = toolbarTitle; } } if (TextUtils.isEmpty(activityTitle)) { activityTitle = activity.getTitle().toString(); } if (TextUtils.isEmpty(activityTitle)) { PackageManager packageManager = activity.getPackageManager(); if (packageManager != null) { ActivityInfo activityInfo = packageManager.getActivityInfo(activity.getComponentName(), 0); if (!TextUtils.isEmpty(activityInfo.loadLabel(packageManager))) { activityTitle = activityInfo.loadLabel(packageManager).toString(); } } } return activityTitle; } catch (Exception e) { return null; } } return null; } catch (Exception e) { SALog.printStackTrace(e); return null; } }
@Test public void getActivityTitle() { TestActivity activity = Robolectric.setupActivity(TestActivity.class); String title = SensorsDataUtils.getActivityTitle(activity); System.out.println("ActivityTitle = " + title); Assert.assertEquals("com.sensorsdata.analytics.android.sdk.util.SensorsDataUtilsTest$TestActivity", title); }
public static boolean equal(Number lhs, Number rhs) { Class lhsClass = lhs.getClass(); Class rhsClass = rhs.getClass(); assert lhsClass != rhsClass; if (isDoubleRepresentable(lhsClass)) { if (isDoubleRepresentable(rhsClass)) { return equalDoubles(lhs.doubleValue(), rhs.doubleValue()); } else if (isLongRepresentable(rhsClass)) { return equalLongAndDouble(rhs.longValue(), lhs.doubleValue()); } } else if (isLongRepresentable(lhsClass)) { if (isDoubleRepresentable(rhsClass)) { return equalLongAndDouble(lhs.longValue(), rhs.doubleValue()); } else if (isLongRepresentable(rhsClass)) { return lhs.longValue() == rhs.longValue(); } } return lhs.equals(rhs); }
@SuppressWarnings("ConstantConditions") @Test(expected = Throwable.class) public void testNullRhsInEqualThrows() { equal(1, null); }
@Override public UnboundedReader<KV<byte[], byte[]>> createReader( PipelineOptions options, @Nullable SyntheticRecordsCheckpoint checkpoint) { if (checkpoint == null) { return new SyntheticUnboundedReader(this, this.startOffset); } else { return new SyntheticUnboundedReader(this, checkpoint.getCurrentCheckMarkPosition()); } }
@Test public void startPositionShouldBeExclusive() throws IOException { int startPosition = 0; checkpoint = new SyntheticRecordsCheckpoint(startPosition); UnboundedSource.UnboundedReader<KV<byte[], byte[]>> reader = source.createReader(pipeline.getOptions(), checkpoint); reader.start(); KV<byte[], byte[]> currentElement = reader.getCurrent(); KV<byte[], byte[]> expectedElement = sourceOptions.genRecord(startPosition + 1).kv; assertEquals(expectedElement, currentElement); }
public static TableSchema toSchema(RowType rowType) { TableSchema.Builder builder = TableSchema.builder(); for (RowType.RowField field : rowType.getFields()) { builder.field(field.getName(), TypeConversions.fromLogicalToDataType(field.getType())); } return builder.build(); }
@Test public void testConvertFlinkSchemaWithNestedColumnInPrimaryKeys() { Schema icebergSchema = new Schema( Lists.newArrayList( Types.NestedField.required( 1, "struct", Types.StructType.of( Types.NestedField.required(2, "inner", Types.IntegerType.get())))), Sets.newHashSet(2)); assertThatThrownBy(() -> FlinkSchemaUtil.toSchema(icebergSchema)) .isInstanceOf(ValidationException.class) .hasMessageStartingWith("Could not create a PRIMARY KEY") .hasMessageContaining("Column 'struct.inner' does not exist."); }
public CruiseConfig deserializeConfig(String content) throws Exception { String md5 = md5Hex(content); Element element = parseInputStream(new ByteArrayInputStream(content.getBytes())); LOGGER.debug("[Config Save] Updating config cache with new XML"); CruiseConfig configForEdit = classParser(element, BasicCruiseConfig.class, configCache, new GoCipher(), registry, new ConfigReferenceElements()).parse(); setMd5(configForEdit, md5); configForEdit.setOrigins(new FileConfigOrigin()); return configForEdit; }
@Test void shouldThrowExceptionWhenCommandsContainTrailingSpaces() { String configXml = (""" <cruise schemaVersion='%d'> <pipelines group='first'> <pipeline name='Test'> <materials> <hg url='../manual-testing/ant_hg/dummy' /> </materials> <stage name='Functional'> <jobs> <job name='Functional'> <tasks> <exec command='bundle ' args='arguments' /> </tasks> </job> </jobs> </stage> </pipeline> </pipelines> </cruise>""").formatted(CONFIG_SCHEMA_VERSION); assertThatThrownBy(() -> xmlLoader.deserializeConfig(configXml)) .as("Should not allow command with trailing spaces") .hasMessageContaining("Command is invalid. \"bundle \" should conform to the pattern - \\S(.*\\S)?"); }
@Subscribe public void onChatMessage(ChatMessage event) { if (event.getType() != ChatMessageType.GAMEMESSAGE && event.getType() != ChatMessageType.SPAM) { return; } String message = event.getMessage(); if (message.equals("Your Ring of endurance doubles the duration of your stamina potion's effect.")) { Integer charges = getRingOfEnduranceCharges(); if (charges == null) { log.debug("Ring of endurance charge with no known charges"); return; } // subtract the used charge charges--; setRingOfEnduranceCharges(charges); if (!roeWarningSent && charges < RING_OF_ENDURANCE_PASSIVE_EFFECT && energyConfig.ringOfEnduranceChargeMessage()) { String chatMessage = new ChatMessageBuilder() .append(ChatColorType.HIGHLIGHT) .append("Your Ring of endurance now has less than " + RING_OF_ENDURANCE_PASSIVE_EFFECT + " charges. Add more charges to regain its passive stamina effect.") .build(); chatMessageManager.queue(QueuedMessage.builder() .type(ChatMessageType.CONSOLE) .runeLiteFormattedMessage(chatMessage) .build()); roeWarningSent = true; } } else if (message.startsWith("Your Ring of endurance is charged with") || message.startsWith("You load your Ring of endurance with")) { Matcher matcher = Pattern.compile("([0-9,]+)").matcher(message); int charges = -1; while (matcher.find()) { charges = Integer.parseInt(matcher.group(1).replaceAll(",", "")); } setRingOfEnduranceCharges(charges); if (charges >= RING_OF_ENDURANCE_PASSIVE_EFFECT) { roeWarningSent = false; } } }
@Test public void testPotionMessage() { String potionMessage = "Your Ring of endurance doubles the duration of your stamina potion's effect."; ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.SPAM, "", potionMessage, "", 0); runEnergyPlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration(RunEnergyConfig.GROUP_NAME, "ringOfEnduranceCharges", 0); }
@Override public void shutdown() { dLedgerServer.shutdown(); }
@Test public void testDLedgerAbnormallyRecover() throws Exception { String base = createBaseDir(); String peers = String.format("n0-localhost:%d", nextPort()); String group = UUID.randomUUID().toString(); String topic = UUID.randomUUID().toString(); int messageNumPerQueue = 100; DefaultMessageStore messageStore = createDledgerMessageStore(base, group, "n0", peers, null, false, 0); Thread.sleep(1000); doPutMessages(messageStore, topic, 0, messageNumPerQueue, 0); doPutMessages(messageStore, topic, 1, messageNumPerQueue, 0); Thread.sleep(1000); Assert.assertEquals(0, messageStore.getMinOffsetInQueue(topic, 0)); Assert.assertEquals(messageNumPerQueue, messageStore.getMaxOffsetInQueue(topic, 0)); Assert.assertEquals(0, messageStore.dispatchBehindBytes()); doGetMessages(messageStore, topic, 0, messageNumPerQueue, 0); StoreCheckpoint storeCheckpoint = messageStore.getStoreCheckpoint(); storeCheckpoint.setPhysicMsgTimestamp(0); storeCheckpoint.setLogicsMsgTimestamp(0); messageStore.shutdown(); String fileName = StorePathConfigHelper.getAbortFile(base); makeSureFileExists(fileName); File file = new File(base + File.separator + "consumequeue" + File.separator + topic + File.separator + "0" + File.separator + "00000000000000001040"); file.delete(); // truncateAllConsumeQueue(base + File.separator + "consumequeue" + File.separator + topic + File.separator); messageStore = createDledgerMessageStore(base, group, "n0", peers, null, false, 0); Thread.sleep(1000); doGetMessages(messageStore, topic, 0, messageNumPerQueue, 0); doGetMessages(messageStore, topic, 1, messageNumPerQueue, 0); messageStore.shutdown(); }
@Override public ColumnStatistics buildColumnStatistics() { Optional<IntegerStatistics> integerStatistics = buildIntegerStatistics(); if (integerStatistics.isPresent()) { return new IntegerColumnStatistics(nonNullValueCount, null, rawSize, storageSize, integerStatistics.get()); } return new ColumnStatistics(nonNullValueCount, null, rawSize, storageSize); }
@Test public void testMergeOverflow() { List<ColumnStatistics> statisticsList = new ArrayList<>(); statisticsList.add(new IntegerStatisticsBuilder().buildColumnStatistics()); assertMergedIntegerStatistics(statisticsList, 0, 0L); statisticsList.add(singleValueIntegerStatistics(MAX_VALUE)); assertMergedIntegerStatistics(statisticsList, 1, MAX_VALUE); statisticsList.add(singleValueIntegerStatistics(1)); assertMergedIntegerStatistics(statisticsList, 2, null); }