focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public double toBaseUnits() { return value * unit.factor(); }
@Test public void calculateValueInBaseUnits() throws Exception { Quantity<Metrics> quantity = new Quantity<Metrics>(1, Metrics.cm); assertThat(quantity.toBaseUnits()).isEqualTo(0.01); }
public H3IndexResolution getResolution() { return _resolution; }
@Test public void withEmptyConf() throws JsonProcessingException { String confStr = "{}"; H3IndexConfig config = JsonUtils.stringToObject(confStr, H3IndexConfig.class); assertFalse(config.isDisabled(), "Unexpected disabled"); assertNull(config.getResolution(), "Unexpected resolution"); }
public static Optional<EfestoOutputPMML> executeEfestoInputFromMap(EfestoInput<Map<String, Object>> toEvaluate, EfestoRuntimeContext runtimeContext) { PMMLRuntimeContext pmmlContext; if (runtimeContext instanceof PMMLRuntimeContext) { pmmlContext = (PMMLRuntimeContext) runtimeContext; } else { pmmlContext = getPMMLRuntimeContext(toEvaluate.getInputData(), runtimeContext.getGeneratedResourcesMap()); } EfestoInputPMML efestoInputPMML = getEfestoInputPMML(toEvaluate.getModelLocalUriId(), pmmlContext); return executeEfestoInputPMML(efestoInputPMML, pmmlContext); }
@Test void executeEfestoInputFromMap() { modelLocalUriId = getModelLocalUriIdFromPmmlIdFactory(FILE_NAME, MODEL_NAME); BaseEfestoInput<Map<String, Object>> inputPMML = new BaseEfestoInput<>(modelLocalUriId, getInputData(MODEL_NAME, FILE_NAME)); Optional<EfestoOutputPMML> retrieved = PMMLRuntimeHelper.executeEfestoInputFromMap(inputPMML, getPMMLContext(FILE_NAME, MODEL_NAME, memoryCompilerClassLoader)); assertThat(retrieved).isNotNull().isPresent(); assertThat(retrieved.get().getModelLocalUriId()).isNotNull(); assertThat(retrieved.get().getModelLocalUriId()).isEqualTo(inputPMML.getModelLocalUriId()); }
public static SchemaAndValue parseString(String value) { if (value == null) { return NULL_SCHEMA_AND_VALUE; } if (value.isEmpty()) { return new SchemaAndValue(Schema.STRING_SCHEMA, value); } ValueParser parser = new ValueParser(new Parser(value)); return parser.parse(false); }
@Test public void shouldParseTimeStringAsTimeInArray() throws Exception { String timeStr = "14:34:54.346Z"; String arrayStr = "[" + timeStr + "]"; SchemaAndValue result = Values.parseString(arrayStr); assertEquals(Type.ARRAY, result.schema().type()); Schema elementSchema = result.schema().valueSchema(); assertEquals(Type.INT32, elementSchema.type()); assertEquals(Time.LOGICAL_NAME, elementSchema.name()); java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(timeStr); assertEquals(Collections.singletonList(expected), result.value()); }
@Override public boolean alterOffsets(Map<String, String> connectorConfig, Map<Map<String, ?>, Map<String, ?>> offsets) { for (Map.Entry<Map<String, ?>, Map<String, ?>> offsetEntry : offsets.entrySet()) { Map<String, ?> sourceOffset = offsetEntry.getValue(); if (sourceOffset == null) { // We allow tombstones for anything; if there's garbage in the offsets for the connector, we don't // want to prevent users from being able to clean it up using the REST API continue; } Map<String, ?> sourcePartition = offsetEntry.getKey(); if (sourcePartition == null) { throw new ConnectException("Source partitions may not be null"); } MirrorUtils.validateSourcePartitionString(sourcePartition, SOURCE_CLUSTER_KEY); MirrorUtils.validateSourcePartitionString(sourcePartition, TOPIC_KEY); MirrorUtils.validateSourcePartitionPartition(sourcePartition); MirrorUtils.validateSourceOffset(sourcePartition, sourceOffset, false); } // We never commit offsets with our source consumer, so no additional effort is required beyond just validating // the format of the user-supplied offsets return true; }
@Test public void testSuccessfulAlterOffsets() { MirrorSourceConnector connector = new MirrorSourceConnector(); Map<Map<String, ?>, Map<String, ?>> offsets = Collections.singletonMap( sourcePartition("t2", 0, "backup"), MirrorUtils.wrapOffset(5) ); // Expect no exception to be thrown when a valid offsets map is passed. An empty offsets map is treated as valid // since it could indicate that the offsets were reset previously or that no offsets have been committed yet // (for a reset operation) assertTrue(connector.alterOffsets(null, offsets)); assertTrue(connector.alterOffsets(null, Collections.emptyMap())); }
public void addField(Value value) { final int num = this.numFields; setNumFields(num + 1); internallySetField(num, value); }
@Test void testAddField() { // Add a value to an empty record Record record = new Record(); assertThat(record.getNumFields()).isZero(); record.addField(this.origVal1); assertThat(record.getNumFields()).isOne(); assertThat(record.getField(0, StringValue.class).getValue()) .as("The value of the first field has changed") .isEqualTo(origVal1.getValue()); // Add 100 random integers to the record record = new Record(); for (int i = 0; i < 100; i++) { IntValue orig = new IntValue(this.rand.nextInt()); record.addField(orig); IntValue rec = record.getField(i, IntValue.class); assertThat(i + 1).isEqualTo(record.getNumFields()); assertThat(rec.getValue()).isEqualTo(orig.getValue()); } // Add 3 values of different type to the record record = new Record(this.origVal1, this.origVal2); record.addField(this.origVal3); assertThat(record.getNumFields()).isEqualTo(3); StringValue recVal1 = record.getField(0, StringValue.class); DoubleValue recVal2 = record.getField(1, DoubleValue.class); IntValue recVal3 = record.getField(2, IntValue.class); assertThat((Object) recVal1) .as("The value of the first field has changed") .isEqualTo(this.origVal1); assertThat(recVal2).as("The value of the second field changed").isEqualTo(this.origVal2); assertThat(recVal3).as("The value of the third field has changed").isEqualTo(this.origVal3); }
@Override public void checkAuthorization( final KsqlSecurityContext securityContext, final MetaStore metaStore, final Statement statement ) { if (statement instanceof Query) { validateQuery(securityContext, metaStore, (Query)statement); } else if (statement instanceof InsertInto) { validateInsertInto(securityContext, metaStore, (InsertInto)statement); } else if (statement instanceof CreateAsSelect) { validateCreateAsSelect(securityContext, metaStore, (CreateAsSelect)statement); } else if (statement instanceof PrintTopic) { validatePrintTopic(securityContext, (PrintTopic)statement); } else if (statement instanceof CreateSource) { validateCreateSource(securityContext, (CreateSource)statement); } }
@Test public void shouldThrowWhenJoinWithOneRightTopicWithReadPermissionsDenied() { // Given: givenTopicAccessDenied(AVRO_TOPIC, AclOperation.READ); final Statement statement = givenStatement(String.format( "SELECT * FROM %s A JOIN %s B ON A.F1 = B.F1;", KAFKA_STREAM_TOPIC, AVRO_STREAM_TOPIC) ); // When: final Exception e = assertThrows( KsqlTopicAuthorizationException.class, () -> authorizationValidator.checkAuthorization(securityContext, metaStore, statement) ); // Then: assertThat(e.getMessage(), containsString(String.format( "Authorization denied to Read on topic(s): [%s]", AVRO_TOPIC ))); }
public EmbeddedChannel flushOutbound() { executingStackCnt++; try { if (checkOpen(true)) { flushOutbound0(); } } finally { executingStackCnt--; maybeRunPendingTasks(); } checkException(voidPromise()); return this; }
@Test public void testFlushOutbound() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); EmbeddedChannel channel = new EmbeddedChannel(new ChannelOutboundHandlerAdapter() { @Override public void flush(ChannelHandlerContext ctx) throws Exception { latch.countDown(); } }); channel.flushOutbound(); if (!latch.await(1L, TimeUnit.SECONDS)) { fail("Nobody called #flush() in time."); } }
public static boolean isLocalAddress(String targetAddress, int timeoutMs) { return getLocalHostName(timeoutMs).equals(targetAddress) || getLocalIpAddress(timeoutMs).equals(targetAddress); }
@Test public void TestisLocalAddress() { int resolveTimeout = (int) mConfiguration.getMs(PropertyKey.NETWORK_HOST_RESOLUTION_TIMEOUT_MS); String localHostName = NetworkAddressUtils.getLocalHostName(resolveTimeout); String localIp = NetworkAddressUtils.getLocalIpAddress(resolveTimeout); assertTrue(NetworkAddressUtils.isLocalAddress(localHostName, resolveTimeout)); assertTrue(NetworkAddressUtils.isLocalAddress(localIp, resolveTimeout)); assertFalse(NetworkAddressUtils.isLocalAddress(localHostName + "false", resolveTimeout)); assertFalse(NetworkAddressUtils.isLocalAddress(localIp + "false", resolveTimeout)); }
public static List<UpdateRequirement> forUpdateTable( TableMetadata base, List<MetadataUpdate> metadataUpdates) { Preconditions.checkArgument(null != base, "Invalid table metadata: null"); Preconditions.checkArgument(null != metadataUpdates, "Invalid metadata updates: null"); Builder builder = new Builder(base, false); builder.require(new UpdateRequirement.AssertTableUUID(base.uuid())); metadataUpdates.forEach(builder::update); return builder.build(); }
@Test public void addAndRemoveSnapshot() { List<UpdateRequirement> requirements = UpdateRequirements.forUpdateTable( metadata, ImmutableList.of(new MetadataUpdate.AddSnapshot(mock(Snapshot.class)))); requirements.forEach(req -> req.validate(metadata)); assertThat(requirements) .hasSize(1) .hasOnlyElementsOfTypes(UpdateRequirement.AssertTableUUID.class); assertTableUUID(requirements); requirements = UpdateRequirements.forUpdateTable( metadata, ImmutableList.of(new MetadataUpdate.RemoveSnapshot(0L))); assertThat(requirements) .hasSize(1) .hasOnlyElementsOfTypes(UpdateRequirement.AssertTableUUID.class); assertTableUUID(requirements); }
@Udf public <T> List<T> except( @UdfParameter(description = "Array of values") final List<T> left, @UdfParameter(description = "Array of exceptions") final List<T> right) { if (left == null || right == null) { return null; } final Set<T> distinctRightValues = new HashSet<>(right); final Set<T> distinctLeftValues = new LinkedHashSet<>(left); return distinctLeftValues .stream() .filter(e -> !distinctRightValues.contains(e)) .collect(Collectors.toList()); }
@Test public void shouldReturnNullForNullLeftInput() { final List<String> input1 = Arrays.asList("foo"); final List<String> result = udf.except(input1, null); assertThat(result, is(nullValue())); }
@VisibleForTesting MissingSegmentInfo findMissingSegments(Map<String, Map<String, String>> idealStateMap, Instant now) { // create the maps Map<Integer, LLCSegmentName> partitionGroupIdToLatestConsumingSegmentMap = new HashMap<>(); Map<Integer, LLCSegmentName> partitionGroupIdToLatestCompletedSegmentMap = new HashMap<>(); idealStateMap.forEach((segmentName, instanceToStatusMap) -> { LLCSegmentName llcSegmentName = LLCSegmentName.of(segmentName); if (llcSegmentName != null) { // Skip the uploaded realtime segments that don't conform to llc naming if (instanceToStatusMap.containsValue(SegmentStateModel.CONSUMING)) { updateMap(partitionGroupIdToLatestConsumingSegmentMap, llcSegmentName); } else if (instanceToStatusMap.containsValue(SegmentStateModel.ONLINE)) { updateMap(partitionGroupIdToLatestCompletedSegmentMap, llcSegmentName); } } }); MissingSegmentInfo missingSegmentInfo = new MissingSegmentInfo(); if (!_partitionGroupIdToLargestStreamOffsetMap.isEmpty()) { _partitionGroupIdToLargestStreamOffsetMap.forEach((partitionGroupId, largestStreamOffset) -> { if (!partitionGroupIdToLatestConsumingSegmentMap.containsKey(partitionGroupId)) { LLCSegmentName latestCompletedSegment = partitionGroupIdToLatestCompletedSegmentMap.get(partitionGroupId); if (latestCompletedSegment == null) { // There's no consuming or completed segment for this partition group. Possibilities: // 1) it's a new partition group that has not yet been detected // 2) the first consuming segment has been deleted from ideal state manually missingSegmentInfo._newPartitionGroupCount++; missingSegmentInfo._totalCount++; } else { // Completed segment is available, but there's no consuming segment. // Note that there is no problem in case the partition group has reached its end of life. SegmentZKMetadata segmentZKMetadata = _segmentMetadataFetcher .fetchSegmentZkMetadata(_realtimeTableName, latestCompletedSegment.getSegmentName()); StreamPartitionMsgOffset completedSegmentEndOffset = _streamPartitionMsgOffsetFactory.create(segmentZKMetadata.getEndOffset()); if (completedSegmentEndOffset.compareTo(largestStreamOffset) < 0) { // there are unconsumed messages available on the stream missingSegmentInfo._totalCount++; updateMaxDurationInfo(missingSegmentInfo, partitionGroupId, segmentZKMetadata.getCreationTime(), now); } } } }); } else { partitionGroupIdToLatestCompletedSegmentMap.forEach((partitionGroupId, latestCompletedSegment) -> { if (!partitionGroupIdToLatestConsumingSegmentMap.containsKey(partitionGroupId)) { missingSegmentInfo._totalCount++; long segmentCompletionTimeMillis = _segmentMetadataFetcher .fetchSegmentCompletionTime(_realtimeTableName, latestCompletedSegment.getSegmentName()); updateMaxDurationInfo(missingSegmentInfo, partitionGroupId, segmentCompletionTimeMillis, now); } }); } return missingSegmentInfo; }
@Test public void noMissingConsumingSegmentsScenario2() { // scenario 2: no missing segments and there's no exception in connecting to stream Map<String, Map<String, String>> idealStateMap = new HashMap<>(); // partition 0 idealStateMap.put("tableA__0__0__20220601T0900Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__0__1__20220601T1200Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__0__2__20220601T1500Z", ImmutableMap.of("ServerX", "CONSUMING", "ServerY", "CONSUMING")); // partition 1 idealStateMap.put("tableA__1__0__20220601T0900Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__1__1__20220601T1200Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__1__2__20220601T1500Z", ImmutableMap.of("ServerX", "CONSUMING", "ServerY", "CONSUMING")); // partition 2 idealStateMap.put("tableA__2__0__20220601T0900Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__2__1__20220601T1200Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__2__2__20220601T1500Z", ImmutableMap.of("ServerX", "CONSUMING", "ServerY", "CONSUMING")); // partition 3 idealStateMap.put("tableA__3__0__20220601T0900Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__3__1__20220601T1200Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__3__2__20220601T1500Z", ImmutableMap.of("ServerX", "CONSUMING", "ServerY", "CONSUMING")); Map<Integer, StreamPartitionMsgOffset> partitionGroupIdToLargestStreamOffsetMap = ImmutableMap.of( 0, new LongMsgOffset(1000), 1, new LongMsgOffset(1001), 2, new LongMsgOffset(1002), 3, new LongMsgOffset(1003) ); Instant now = Instant.parse("2022-06-01T18:00:00.00Z"); MissingConsumingSegmentFinder finder = new MissingConsumingSegmentFinder("tableA", null, partitionGroupIdToLargestStreamOffsetMap, null); MissingConsumingSegmentFinder.MissingSegmentInfo info = finder.findMissingSegments(idealStateMap, now); assertEquals(info._totalCount, 0); assertEquals(info._newPartitionGroupCount, 0); assertEquals(info._maxDurationInMinutes, 0); }
@Override public Class<T> getInterface() { return directory.getInterface(); }
@Test void testAddMenu1() { // setup url = url.addParameter(MERGER_KEY, ".merge"); String menu = "first"; List<String> menuItems = new ArrayList<String>() { { add("1"); add("2"); } }; given(invocation.getMethodName()).willReturn("addMenu"); given(invocation.getParameterTypes()).willReturn(new Class<?>[] {String.class, List.class}); given(invocation.getArguments()).willReturn(new Object[] {menu, menuItems}); given(invocation.getObjectAttachments()).willReturn(new HashMap<>()); given(invocation.getInvoker()).willReturn(firstInvoker); firstInvoker = (Invoker) Proxy.newProxyInstance( getClass().getClassLoader(), new Class<?>[] {Invoker.class}, (proxy, method, args) -> { if ("getUrl".equals(method.getName())) { return url.addParameter(GROUP_KEY, "first"); } if ("getInterface".equals(method.getName())) { return MenuService.class; } if ("invoke".equals(method.getName())) { return AsyncRpcResult.newDefaultAsyncResult(firstMenu, invocation); } return null; }); secondInvoker = (Invoker) Proxy.newProxyInstance( getClass().getClassLoader(), new Class<?>[] {Invoker.class}, (proxy, method, args) -> { if ("getUrl".equals(method.getName())) { return url.addParameter(GROUP_KEY, "second"); } if ("getInterface".equals(method.getName())) { return MenuService.class; } if ("invoke".equals(method.getName())) { return AsyncRpcResult.newDefaultAsyncResult(secondMenu, invocation); } return null; }); given(directory.list(invocation)).willReturn(new ArrayList() { { add(firstInvoker); add(secondInvoker); } }); given(directory.getUrl()).willReturn(url); given(directory.getConsumerUrl()).willReturn(url); given(directory.getConsumerUrl()).willReturn(url); given(directory.getInterface()).willReturn(MenuService.class); mergeableClusterInvoker = new MergeableClusterInvoker<MenuService>(directory); Result result = mergeableClusterInvoker.invoke(invocation); Assertions.assertNull(result.getValue()); }
public void setErrorAndRollback(final long ntail, final Status st) { Requires.requireTrue(ntail > 0, "Invalid ntail=" + ntail); if (this.currEntry == null || this.currEntry.getType() != EnumOutter.EntryType.ENTRY_TYPE_DATA) { this.currentIndex -= ntail; } else { this.currentIndex -= ntail - 1; } if (fsmCommittedIndex >= 0) { // can't roll back before fsmCommittedIndex. this.currentIndex = Math.max(this.currentIndex, fsmCommittedIndex + 1); } this.currEntry = null; getOrCreateError().setType(EnumOutter.ErrorType.ERROR_TYPE_STATE_MACHINE); getOrCreateError().getStatus().setError(RaftError.ESTATEMACHINE, "StateMachine meet critical error when applying one or more tasks since index=%d, %s", this.currentIndex, st != null ? st.toString() : "none"); }
@Test public void testSetErrorAndRollback() { testNext(); assertFalse(iter.hasError()); this.iter.setErrorAndRollback(5, new Status(-1, "test")); assertTrue(iter.hasError()); Assert.assertEquals(EnumOutter.ErrorType.ERROR_TYPE_STATE_MACHINE, iter.getError().getType()); Assert.assertEquals(RaftError.ESTATEMACHINE.getNumber(), iter.getError().getStatus().getCode()); Assert .assertEquals( "StateMachine meet critical error when applying one or more tasks since index=6, Status[UNKNOWN<-1>: test]", iter.getError().getStatus().getErrorMsg()); assertEquals(6, iter.getIndex()); }
@Udf(description = "Returns a masked version of the input string. All characters of the input" + " will be replaced according to the default masking rules.") @SuppressWarnings("MethodMayBeStatic") // Invoked via reflection public String mask( @UdfParameter("input STRING to be masked") final String input ) { return doMask(new Masker(), input); }
@Test public void shouldMaskNothingIfNullMasks() { final String result = udf.mask("AbCd#$123xy Z", null, null, null, null); assertThat(result, is("AbCd#$123xy Z")); }
@Override public <R extends MessageResponse<?>> R chat(Prompt<R> prompt, ChatOptions options) { Map<String, String> headers = new HashMap<>(); headers.put("Content-Type", "application/json"); headers.put("Authorization", "Bearer " + config.getApiKey()); String endpoint = config.getEndpoint(); String payload = OllamaLlmUtil.promptToPayload(prompt, config, false); String response = httpClient.post(endpoint + "/api/chat", headers, payload); if (StringUtil.noText(response)) { return null; } if (config.isDebug()) { System.out.println(">>>>receive payload:" + response); } JSONObject jsonObject = JSON.parseObject(response); String error = jsonObject.getString("error"); AbstractBaseMessageResponse<?> messageResponse; if (prompt instanceof FunctionPrompt) { throw new IllegalStateException("OLlama not support function calling"); } else { messageResponse = new AiMessageResponse(aiMessageParser.parse(jsonObject)); } if (error != null && !error.isEmpty()) { messageResponse.setError(true); messageResponse.setErrorMessage(error); } //noinspection unchecked return (R) messageResponse; }
@Test public void test01() { OllamaLlmConfig config = new OllamaLlmConfig(); config.setEndpoint("http://localhost:11434"); config.setModel("llama3"); config.setDebug(true); Llm llm = new OllamaLlm(config); String chat = llm.chat("who are your"); System.out.println(chat); }
@SuppressWarnings("unchecked") public QueryMetadataHolder handleStatement( final ServiceContext serviceContext, final Map<String, Object> configOverrides, final Map<String, Object> requestProperties, final PreparedStatement<?> statement, final Optional<Boolean> isInternalRequest, final MetricsCallbackHolder metricsCallbackHolder, final Context context, final boolean excludeTombstones ) { if (statement.getStatement() instanceof Query) { return handleQuery( serviceContext, (PreparedStatement<Query>) statement, isInternalRequest, metricsCallbackHolder, configOverrides, requestProperties, context, excludeTombstones ); } else { return QueryMetadataHolder.unhandled(); } }
@Test public void queryLoggerShouldReceiveStatementsWhenHandlePushQuery() { when(ksqlEngine.executeTransientQuery(any(), any(), anyBoolean())) .thenReturn(transientQueryMetadata); try (MockedStatic<QueryLogger> logger = Mockito.mockStatic(QueryLogger.class)) { queryExecutor.handleStatement(serviceContext, ImmutableMap.of(), ImmutableMap.of(), pushQuery, Optional.empty(), metricsCallbackHolder, context, false); logger.verify(() -> QueryLogger.info("Transient query created", PUSH_QUERY_STRING), times(1)); } }
public static boolean sizeIsEmpty(Object object) { if (object instanceof Collection) { return ((Collection) object).isEmpty(); } else if (object instanceof Map) { return ((Map) object).isEmpty(); } else if (object instanceof Object[]) { return ((Object[]) object).length == 0; } else if (object instanceof Iterator) { return ((Iterator) object).hasNext() == false; } else if (object instanceof Enumeration) { return ((Enumeration) object).hasMoreElements() == false; } else if (object == null) { throw new IllegalArgumentException("Unsupported object type: null"); } else { try { return Array.getLength(object) == 0; } catch (IllegalArgumentException ex) { throw new IllegalArgumentException("Unsupported object type: " + object.getClass().getName()); } } }
@Test void testSizeIsEmpty() { // collection assertTrue(CollectionUtils.sizeIsEmpty(Collections.emptyList())); assertFalse(CollectionUtils.sizeIsEmpty(Collections.singletonList(""))); // map Map<String, String> map = new HashMap<>(); map.put("key1", "value1"); map.put("key2", "value2"); assertTrue(CollectionUtils.sizeIsEmpty(Collections.emptyMap())); assertFalse(CollectionUtils.sizeIsEmpty(map)); // array assertTrue(CollectionUtils.sizeIsEmpty(new Object[] {})); assertFalse(CollectionUtils.sizeIsEmpty(new Object[] {"1", "2"})); // primitive array assertTrue(CollectionUtils.sizeIsEmpty(new int[] {})); assertFalse(CollectionUtils.sizeIsEmpty(new int[] {1, 2})); // iterator assertTrue(CollectionUtils.sizeIsEmpty(Collections.emptyIterator())); assertFalse(CollectionUtils.sizeIsEmpty(Arrays.asList("1", "2").iterator())); // enumeration assertTrue(CollectionUtils.sizeIsEmpty(asEnumeration(Collections.emptyIterator()))); assertFalse(CollectionUtils.sizeIsEmpty(asEnumeration(Collections.singleton("").iterator()))); }
@Override public MaskRuleConfiguration findRuleConfiguration(final ShardingSphereDatabase database) { return database.getRuleMetaData().findSingleRule(MaskRule.class) .map(optional -> getConfiguration(optional.getConfiguration())).orElseGet(() -> new MaskRuleConfiguration(new LinkedList<>(), new LinkedHashMap<>())); }
@Test void assertFindRuleConfigurationWhenMaskAlgorithmDoesNotExist() { ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getRuleMetaData().findSingleRule(MaskRule.class)).thenReturn(Optional.of(new MaskRule(new MaskRuleConfiguration(Collections.emptyList(), Collections.emptyMap())))); assertTrue(new MaskAlgorithmChangedProcessor().findRuleConfiguration(database).getMaskAlgorithms().isEmpty()); }
@Override public T deserialize(final String topic, final byte[] bytes) { try { if (bytes == null) { return null; } // don't use the JsonSchemaConverter to read this data because // we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS, // which is not currently available in the standard converters final JsonNode value = isJsonSchema ? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class) : MAPPER.readTree(bytes); final Object coerced = enforceFieldType( "$", new JsonValueContext(value, schema) ); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } return SerdeUtils.castToTargetType(coerced, targetType); } catch (final Exception e) { // Clear location in order to avoid logging data, for security reasons if (e instanceof JsonParseException) { ((JsonParseException) e).clearLocation(); } throw new SerializationException( "Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e); } }
@Test public void shouldDeserializedJsonNumberAsBigInt() { // Given: final KsqlJsonDeserializer<Long> deserializer = givenDeserializerForSchema(Schema.OPTIONAL_INT64_SCHEMA, Long.class); final List<String> validCoercions = ImmutableList.of( "42", "42.456", "\"42\"" ); validCoercions.forEach(value -> { final byte[] bytes = addMagic(value.getBytes(StandardCharsets.UTF_8)); // When: final Object result = deserializer.deserialize(SOME_TOPIC, bytes); // Then: assertThat(result, is(42L)); }); }
@Override public boolean evaluate(Map<String, Object> values) { boolean toReturn = false; if (values.containsKey(name)) { logger.debug("found matching parameter, evaluating... "); toReturn = evaluation(values.get(name)); } return toReturn; }
@Test void evaluateRealNotIn() { ARRAY_TYPE arrayType = ARRAY_TYPE.REAL; List<Object> values = getObjects(arrayType, 4); KiePMMLSimpleSetPredicate kiePMMLSimpleSetPredicate = getKiePMMLSimpleSetPredicate(values, arrayType, IN_NOTIN.NOT_IN); Map<String, Object> inputData = new HashMap<>(); inputData.put("FAKE", "23.4"); assertThat(kiePMMLSimpleSetPredicate.evaluate(inputData)).isFalse(); inputData.put(SIMPLE_SET_PREDICATE_NAME, values.get(0)); assertThat(kiePMMLSimpleSetPredicate.evaluate(inputData)).isFalse(); inputData.put(SIMPLE_SET_PREDICATE_NAME, "4.32"); assertThat(kiePMMLSimpleSetPredicate.evaluate(inputData)).isTrue(); }
@Override public <T> ResponseFuture<T> sendRequest(Request<T> request, RequestContext requestContext) { doEvaluateDisruptContext(request, requestContext); return _client.sendRequest(request, requestContext); }
@Test public void testSendRequest9() { when(_builder.build()).thenReturn(_request); when(_controller.getDisruptContext(any(String.class), any(ResourceMethod.class))).thenReturn(_disrupt); _client.sendRequest(_builder, _context, _callback); verify(_underlying, times(1)).sendRequest(eq(_request), eq(_context), eq(_callback)); verify(_context, times(1)).putLocalAttr(eq(DISRUPT_CONTEXT_KEY), eq(_disrupt)); verify(_context, times(1)).putLocalAttr(eq(DISRUPT_SOURCE_KEY), any(String.class)); }
@VisibleForTesting static Optional<String> performUpdateCheck( Path configDir, String currentVersion, String versionUrl, String toolName, Consumer<LogEvent> log) { Path lastUpdateCheck = configDir.resolve(LAST_UPDATE_CHECK_FILENAME); try { // Check time of last update check if (Files.exists(lastUpdateCheck)) { try { String fileContents = new String(Files.readAllBytes(lastUpdateCheck), StandardCharsets.UTF_8); Instant modifiedTime = Instant.parse(fileContents); if (modifiedTime.plus(Duration.ofDays(1)).isAfter(Instant.now())) { return Optional.empty(); } } catch (DateTimeParseException | IOException ex) { // If reading update time failed, file might be corrupt, so delete it log.accept(LogEvent.debug("Failed to read lastUpdateCheck; " + ex.getMessage())); Files.delete(lastUpdateCheck); } } // Check for update FailoverHttpClient httpClient = new FailoverHttpClient(true, false, ignored -> {}); try { Response response = httpClient.get( new URL(versionUrl), Request.builder() .setHttpTimeout(3000) .setUserAgent("jib " + currentVersion + " " + toolName) .build()); VersionJsonTemplate version = JsonTemplateMapper.readJson(response.getBody(), VersionJsonTemplate.class); Path lastUpdateCheckTemp = Files.createTempFile(configDir, LAST_UPDATE_CHECK_FILENAME, null); lastUpdateCheckTemp.toFile().deleteOnExit(); Files.write(lastUpdateCheckTemp, Instant.now().toString().getBytes(StandardCharsets.UTF_8)); Files.move(lastUpdateCheckTemp, lastUpdateCheck, StandardCopyOption.REPLACE_EXISTING); if (currentVersion.equals(version.latest)) { return Optional.empty(); } return Optional.of(version.latest); } finally { httpClient.shutDown(); } } catch (IOException ex) { log.accept(LogEvent.debug("Update check failed; " + ex.getMessage())); } return Optional.empty(); }
@Test public void testPerformUpdateCheck_onLatest() throws IOException, InterruptedException { Instant before = Instant.now(); Thread.sleep(100); setupLastUpdateCheck(); Optional<String> message = UpdateChecker.performUpdateCheck( configDir, "2.0.0", testWebServer.getEndpoint(), "tool-name", ignored -> {}); assertThat(message).isEmpty(); String modifiedTime = new String( Files.readAllBytes(configDir.resolve("lastUpdateCheck")), StandardCharsets.UTF_8); assertThat(testWebServer.getInputRead()).contains("User-Agent: jib 2.0.0 tool-name"); assertThat(Instant.parse(modifiedTime)).isGreaterThan(before); }
@Override @TpsControl(pointName = "RemoteNamingInstanceRegisterDeregister", name = "RemoteNamingInstanceRegisterDeregister") @Secured(action = ActionTypes.WRITE) @ExtractorManager.Extractor(rpcExtractor = PersistentInstanceRequestParamExtractor.class) public InstanceResponse handle(PersistentInstanceRequest request, RequestMeta meta) throws NacosException { Service service = Service.newService(request.getNamespace(), request.getGroupName(), request.getServiceName(), false); InstanceUtil.setInstanceIdIfEmpty(request.getInstance(), service.getGroupedServiceName()); switch (request.getType()) { case NamingRemoteConstants.REGISTER_INSTANCE: return registerInstance(service, request, meta); case NamingRemoteConstants.DE_REGISTER_INSTANCE: return deregisterInstance(service, request, meta); default: throw new NacosException(NacosException.INVALID_PARAM, String.format("Unsupported request type %s", request.getType())); } }
@Test void testHandle() throws NacosException { PersistentInstanceRequest instanceRequest = new PersistentInstanceRequest(); instanceRequest.setType(NamingRemoteConstants.REGISTER_INSTANCE); instanceRequest.setServiceName("service1"); instanceRequest.setGroupName("group1"); Instance instance = new Instance(); instanceRequest.setInstance(instance); RequestMeta requestMeta = new RequestMeta(); persistentInstanceRequestHandler.handle(instanceRequest, requestMeta); Mockito.verify(clientOperationService).registerInstance(Mockito.any(), Mockito.any(), Mockito.anyString()); instanceRequest.setType(NamingRemoteConstants.DE_REGISTER_INSTANCE); persistentInstanceRequestHandler.handle(instanceRequest, requestMeta); Mockito.verify(clientOperationService).deregisterInstance(Mockito.any(), Mockito.any(), Mockito.anyString()); instanceRequest.setType("xxx"); try { persistentInstanceRequestHandler.handle(instanceRequest, requestMeta); } catch (Exception e) { assertEquals(NacosException.INVALID_PARAM, ((NacosException) e).getErrCode()); } }
public static Optional<CheckpointStorage> fromConfig( ReadableConfig config, ClassLoader classLoader, @Nullable Logger logger) throws IllegalStateException, DynamicCodeLoadingException { Preconditions.checkNotNull(config, "config"); Preconditions.checkNotNull(classLoader, "classLoader"); final String storageName = config.get(CheckpointingOptions.CHECKPOINT_STORAGE); if (storageName == null) { if (logger != null) { logger.debug( "The configuration {} has not be set in the current" + " sessions config.yaml. Falling back to a default CheckpointStorage" + " type. Users are strongly encouraged explicitly set this configuration" + " so they understand how their applications are checkpointing" + " snapshots for fault-tolerance.", CheckpointingOptions.CHECKPOINT_STORAGE.key()); } return Optional.empty(); } switch (storageName.toLowerCase()) { case JOB_MANAGER_STORAGE_NAME: return Optional.of(createJobManagerCheckpointStorage(config, classLoader, logger)); case FILE_SYSTEM_STORAGE_NAME: return Optional.of(createFileSystemCheckpointStorage(config, classLoader, logger)); default: if (logger != null) { logger.info("Loading state backend via factory '{}'", storageName); } CheckpointStorageFactory<?> factory; try { @SuppressWarnings("rawtypes") Class<? extends CheckpointStorageFactory> clazz = Class.forName(storageName, false, classLoader) .asSubclass(CheckpointStorageFactory.class); factory = clazz.newInstance(); } catch (ClassNotFoundException e) { throw new DynamicCodeLoadingException( "Cannot find configured state backend factory class: " + storageName, e); } catch (ClassCastException | InstantiationException | IllegalAccessException e) { throw new DynamicCodeLoadingException( "The class configured under '" + CheckpointingOptions.CHECKPOINT_STORAGE.key() + "' is not a valid checkpoint storage factory (" + storageName + ')', e); } return Optional.of(factory.createFromConfig(config, classLoader)); } }
@Test void testLoadJobManagerStorageWithParameters() throws Exception { final String savepointDir = new Path(TempDirUtils.newFolder(tmp).toURI()).toString(); final Path expectedSavepointPath = new Path(savepointDir); // we configure with the explicit string (rather than // AbstractStateBackend#X_STATE_BACKEND_NAME) // to guard against config-breaking changes of the name final Configuration config1 = new Configuration(); config1.set(CheckpointingOptions.CHECKPOINT_STORAGE, "jobmanager"); config1.set(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointDir); CheckpointStorage storage1 = CheckpointStorageLoader.fromConfig(config1, cl, null).get(); assertThat(storage1).isInstanceOf(JobManagerCheckpointStorage.class); assertThat(((JobManagerCheckpointStorage) storage1).getSavepointPath()) .isEqualTo(expectedSavepointPath); }
static BitArray stuffBits(BitArray bits, int wordSize) { BitArray out = new BitArray(); int n = bits.getSize(); int mask = (1 << wordSize) - 2; for (int i = 0; i < n; i += wordSize) { int word = 0; for (int j = 0; j < wordSize; j++) { if (i + j >= n || bits.get(i + j)) { word |= 1 << (wordSize - 1 - j); } } if ((word & mask) == mask) { out.appendBits(word & mask, wordSize); i--; } else if ((word & mask) == 0) { out.appendBits(word | 1, wordSize); i--; } else { out.appendBits(word, wordSize); } } return out; }
@Test public void testStuffBits() { testStuffBits(5, ".X.X. X.X.X .X.X.", ".X.X. X.X.X .X.X."); testStuffBits(5, ".X.X. ..... .X.X", ".X.X. ....X ..X.X"); testStuffBits(3, "XX. ... ... ..X XXX .X. ..", "XX. ..X ..X ..X ..X .XX XX. .X. ..X"); testStuffBits(6, ".X.X.. ...... ..X.XX", ".X.X.. .....X. ..X.XX XXXX."); testStuffBits(6, ".X.X.. ...... ...... ..X.X.", ".X.X.. .....X .....X ....X. X.XXXX"); testStuffBits(6, ".X.X.. XXXXXX ...... ..X.XX", ".X.X.. XXXXX. X..... ...X.X XXXXX."); testStuffBits(6, "...... ..XXXX X..XX. .X.... .X.X.X .....X .X.... ...X.X .....X ....XX ..X... ....X. X..XXX X.XX.X", ".....X ...XXX XX..XX ..X... ..X.X. X..... X.X... ....X. X..... X....X X..X.. .....X X.X..X XXX.XX .XXXXX"); }
public static Duration parse(final String text) { try { final String[] parts = text.split("\\s"); if (parts.length != 2) { throw new IllegalArgumentException("Expected 2 tokens, got: " + parts.length); } final long size = parseNumeric(parts[0]); return buildDuration(size, parts[1]); } catch (final Exception e) { throw new IllegalArgumentException("Invalid duration: '" + text + "'. " + e.getMessage(), e); } }
@Test public void shouldParseMultiple() { assertThat(DurationParser.parse("10 minutes"), is(Duration.ofMinutes(10))); }
public static AggregateFunctionInitArguments createAggregateFunctionInitArgs( final int numInitArgs, final FunctionCall functionCall ) { return createAggregateFunctionInitArgs( numInitArgs, Collections.emptyList(), functionCall, KsqlConfig.empty() ); }
@Test public void shouldThrowIfSecondParamIsInitArgAndNotALiteral() { // Given: when(functionCall.getArguments()).thenReturn(ImmutableList.of( new UnqualifiedColumnReferenceExp(ColumnName.of("Bob")), new UnqualifiedColumnReferenceExp(ColumnName.of("Not good!")), new StringLiteral("No issue here") )); // When: final Exception e = assertThrows( KsqlException.class, () -> UdafUtil.createAggregateFunctionInitArgs( Math.max(0, functionCall.getArguments().size() - 1), Collections.singletonList(0), functionCall, KsqlConfig.empty() ) ); // Then: assertThat(e.getMessage(), is("Parameter 2 passed to function AGG must be a literal constant, " + "but was expression: 'Not good!'")); }
@Override public boolean partitionExists(Table table, List<String> partitionValues) { HiveTable hiveTable = (HiveTable) table; String dbName = hiveTable.getDbName(); String tableName = hiveTable.getTableName(); if (metastoreType == MetastoreType.GLUE && hiveTable.hasBooleanTypePartitionColumn()) { List<String> allPartitionNames = client.getPartitionKeys(dbName, tableName); String hivePartitionName = toHivePartitionName(hiveTable.getPartitionColumnNames(), partitionValues); return allPartitionNames.contains(hivePartitionName); } else { return !client.getPartitionKeysByValue(dbName, tableName, partitionValues).isEmpty(); } }
@Test public void testPartitionExists() { HiveMetaClient client = new MockedHiveMetaClient(); HiveMetastore metastore = new HiveMetastore(client, "hive_catalog", MetastoreType.HMS); Assert.assertTrue(metastore.partitionExists(metastore.getTable("db1", "tbl1"), new ArrayList<>())); }
@Override public Rule register(String ref, RuleKey ruleKey) { requireNonNull(ruleKey, "ruleKey can not be null"); Rule rule = rulesByUuid.get(ref); if (rule != null) { if (!ruleKey.repository().equals(rule.repository()) || !ruleKey.rule().equals(rule.key())) { throw new IllegalArgumentException(format( "Specified RuleKey '%s' is not equal to the one already registered in repository for ref %s: '%s'", ruleKey, ref, RuleKey.of(rule.repository(), rule.key()))); } return rule; } rule = new Rule(ref, ruleKey.repository(), ruleKey.rule()); rulesByUuid.put(ref, rule); return rule; }
@Test public void register_fails_IAE_if_RuleKey_is_not_the_same_repository_for_a_specific_ref() { underTest.register(SOME_UUID, RuleKey.of(SOME_REPOSITORY, SOME_RULE_KEY)); assertThatThrownBy(() -> underTest.register(SOME_UUID, RuleKey.of("other repo", SOME_RULE_KEY))) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Specified RuleKey 'other repo:key' is not equal to the one already registered in repository for ref " + SOME_UUID + ": 'rep:key'"); }
public void close() { if (delegateResolver instanceof AutoCloseable) { CloseHelper.close((AutoCloseable)delegateResolver); } }
@Test void closeIsANoOpIfDelegateResolverIsNotCloseable() { final NameResolver delegateResolver = mock(NameResolver.class); final NanoClock clock = mock(NanoClock.class); final DutyCycleTracker maxTime = mock(DutyCycleTracker.class); final TimeTrackingNameResolver resolver = new TimeTrackingNameResolver(delegateResolver, clock, maxTime); resolver.close(); verifyNoInteractions(delegateResolver, clock, maxTime); }
@Override protected void encode(final ChannelHandlerContext context, final DatabasePacket message, final ByteBuf out) { databasePacketCodecEngine.encode(context, message, out); if (log.isDebugEnabled()) { log.debug("Write to client {} :\n{}", context.channel().id().asShortText(), ByteBufUtil.prettyHexDump(out)); } }
@Test void assertEncode() { DatabasePacket databasePacket = mock(DatabasePacket.class); packetCodec.encode(context, databasePacket, byteBuf); verify(databasePacketCodecEngine).encode(context, databasePacket, byteBuf); }
@Override public SecurityContext writeSecurityContext(AwsProxyRequest event, Context lambdaContext) { currentContext = new AwsProxySecurityContext(lambdaContext, event); return currentContext; }
@Test void write_noAuth_emptySecurityContext() { AwsProxyRequest request = new AwsProxyRequestBuilder("/test").build(); SecurityContext context = writer.writeSecurityContext(request, null); assertNotNull(context); assertNull(context.getAuthenticationScheme()); assertFalse(context.isSecure()); }
@Override public QualityGate.Condition apply(Condition input) { String metricKey = input.getMetric().getKey(); ConditionStatus conditionStatus = statusPerConditions.get(input); checkState(conditionStatus != null, "Missing ConditionStatus for condition on metric key %s", metricKey); return builder .setStatus(convert(conditionStatus.getStatus())) .setMetricKey(metricKey) .setOperator(convert(input.getOperator())) .setErrorThreshold(input.getErrorThreshold()) .setValue(conditionStatus.getValue()) .build(); }
@Test public void apply_copies_value() { Condition otherCondition = new Condition(newMetric(METRIC_KEY), Condition.Operator.LESS_THAN.getDbValue(), ERROR_THRESHOLD); ConditionToCondition underTest = new ConditionToCondition(of( SOME_CONDITION, SOME_CONDITION_STATUS, otherCondition, ConditionStatus.NO_VALUE_STATUS)); assertThat(underTest.apply(SOME_CONDITION).getValue()).isEqualTo(SOME_VALUE); QualityGate.Condition res = underTest.apply(otherCondition); assertThatThrownBy(res::getValue) .isInstanceOf(IllegalStateException.class) .hasMessage("There is no value when status is NO_VALUE"); }
@UdafFactory(description = "collect values of a field into a single Array") public static <T> TableUdaf<T, List<T>, List<T>> createCollectListT() { return new Collect<>(); }
@Test public void shouldCollectBytes() { final TableUdaf<ByteBuffer, List<ByteBuffer>, List<ByteBuffer>> udaf = CollectListUdaf.createCollectListT(); final ByteBuffer[] values = new ByteBuffer[] {ByteBuffer.wrap(new byte[] {1}), ByteBuffer.wrap(new byte[] {2})}; List<ByteBuffer> runningList = udaf.initialize(); for (final ByteBuffer i : values) { runningList = udaf.aggregate(i, runningList); } assertThat(runningList, contains(ByteBuffer.wrap(new byte[] {1}), ByteBuffer.wrap(new byte[] {2}))); }
@Override public String format(final Schema schema) { final String converted = SchemaWalker.visit(schema, new Converter()) + typePostFix(schema); return options.contains(Option.AS_COLUMN_LIST) ? stripTopLevelStruct(converted) : converted; }
@Test public void shouldFormatBigint() { assertThat(DEFAULT.format(Schema.INT64_SCHEMA), is("BIGINT")); assertThat(STRICT.format(Schema.INT64_SCHEMA), is("BIGINT NOT NULL")); }
public static Path compose(final Path root, final String path) { if(StringUtils.startsWith(path, String.valueOf(Path.DELIMITER))) { // Mount absolute path final String normalized = normalize(StringUtils.replace(path, "\\", String.valueOf(Path.DELIMITER)), true); if(StringUtils.equals(normalized, String.valueOf(Path.DELIMITER))) { return root; } return new Path(normalized, normalized.equals(String.valueOf(Path.DELIMITER)) ? EnumSet.of(Path.Type.volume, Path.Type.directory) : EnumSet.of(Path.Type.directory)); } else { final String normalized; if(StringUtils.startsWith(path, String.format("%s%s", Path.HOME, Path.DELIMITER))) { // Relative path to the home directory normalized = normalize(StringUtils.removeStart(StringUtils.removeStart( StringUtils.replace(path, "\\", String.valueOf(Path.DELIMITER)), Path.HOME), String.valueOf(Path.DELIMITER)), false); } else { // Relative path normalized = normalize(StringUtils.replace(path, "\\", String.valueOf(Path.DELIMITER)), false); } if(StringUtils.equals(normalized, String.valueOf(Path.DELIMITER))) { return root; } return new Path(String.format("%s%s%s", root.getAbsolute(), root.isRoot() ? StringUtils.EMPTY : Path.DELIMITER, normalized), EnumSet.of(Path.Type.directory)); } }
@Test public void testComposeInvalidName() { final Path workdir = new Path("/workdir", EnumSet.of(Path.Type.directory)); assertSame(workdir, PathNormalizer.compose(workdir, "/")); assertSame(workdir, PathNormalizer.compose(workdir, "//")); assertSame(workdir, PathNormalizer.compose(workdir, "")); assertEquals(new Path(workdir, " ", EnumSet.of(Path.Type.directory)), PathNormalizer.compose(workdir, " ")); }
@Override public String type() { return super.actualType; }
@Test void testType() { String type = ClusterProtocolNegotiatorBuilderSingleton.getSingleton().type(); assertNotNull(type); assertEquals(ClusterProtocolNegotiatorBuilderSingleton.TYPE_PROPERTY_KEY, type); }
public synchronized void lock() throws IOException { LOGGER.trace("Acquiring lock on {}", file.getAbsolutePath()); flock = channel.lock(); }
@Test void testLock() throws IOException { File tempFile = TestUtils.tempFile(); FileLock lock1 = new FileLock(tempFile); try { lock1.lock(); assertThrows(OverlappingFileLockException.class, lock1::lock); FileLock lock2 = new FileLock(tempFile); assertThrows(OverlappingFileLockException.class, lock2::lock); assertFalse(lock2.tryLock()); lock1.unlock(); } finally { lock1.destroy(); } }
@Override public void isNotEqualTo(@Nullable Object expected) { super.isNotEqualTo(expected); }
@Test public void isNotEqualTo_WithoutToleranceParameter_Success_Longer() { assertThat(array(2.2f, 3.3f)).isNotEqualTo(array(2.2f, 3.3f, 4.4f)); }
@Override public Map<String, Object> assembleFrom(OAuth2AccessTokenEntity accessToken, UserInfo userInfo, Set<String> authScopes) { Map<String, Object> result = newLinkedHashMap(); OAuth2Authentication authentication = accessToken.getAuthenticationHolder().getAuthentication(); result.put(ACTIVE, true); if (accessToken.getPermissions() != null && !accessToken.getPermissions().isEmpty()) { Set<Object> permissions = Sets.newHashSet(); for (Permission perm : accessToken.getPermissions()) { Map<String, Object> o = newLinkedHashMap(); o.put("resource_set_id", perm.getResourceSet().getId().toString()); Set<String> scopes = Sets.newHashSet(perm.getScopes()); o.put("scopes", scopes); permissions.add(o); } result.put("permissions", permissions); } else { Set<String> scopes = Sets.intersection(authScopes, accessToken.getScope()); result.put(SCOPE, Joiner.on(SCOPE_SEPARATOR).join(scopes)); } if (accessToken.getExpiration() != null) { try { result.put(EXPIRES_AT, dateFormat.valueToString(accessToken.getExpiration())); result.put(EXP, accessToken.getExpiration().getTime() / 1000L); } catch (ParseException e) { logger.error("Parse exception in token introspection", e); } } if (userInfo != null) { // if we have a UserInfo, use that for the subject result.put(SUB, userInfo.getSub()); } else { // otherwise, use the authentication's username result.put(SUB, authentication.getName()); } if(authentication.getUserAuthentication() != null) { result.put(USER_ID, authentication.getUserAuthentication().getName()); } result.put(CLIENT_ID, authentication.getOAuth2Request().getClientId()); result.put(TOKEN_TYPE, accessToken.getTokenType()); return result; }
@Test public void shouldAssembleExpectedResultForRefreshTokenWithoutExpiry() { // given OAuth2RefreshTokenEntity refreshToken = refreshToken(null, oauth2AuthenticationWithUser(oauth2Request("clientId", scopes("foo", "bar")), "name")); UserInfo userInfo = userInfo("sub"); Set<String> authScopes = scopes("foo", "bar", "baz"); // when Map<String, Object> result = assembler.assembleFrom(refreshToken, userInfo, authScopes); // then Map<String, Object> expected = new ImmutableMap.Builder<String, Object>() .put("sub", "sub") .put("scope", "bar foo") .put("active", Boolean.TRUE) .put("user_id", "name") .put("client_id", "clientId") .build(); assertThat(result, is(equalTo(expected))); }
public void write(Attributes attrs) { gen.writeStartObject(); writeAttributes(attrs); gen.writeEnd(); }
@Test public void testInfinityAndNaN() { Attributes dataset = new Attributes(); dataset.setDouble(Tag.SelectorFDValue, VR.FD, Double.NEGATIVE_INFINITY, Double.NaN, Double.POSITIVE_INFINITY); dataset.setFloat(Tag.SelectorFLValue, VR.FL, Float.NEGATIVE_INFINITY, Float.NaN, Float.POSITIVE_INFINITY); StringWriter writer = new StringWriter(); JsonGenerator gen = Json.createGenerator(writer); new JSONWriter(gen).write(dataset); gen.flush(); assertEquals(INFINITY_AND_NAN, writer.toString()); }
public void resetOffsetsTo(final Consumer<byte[], byte[]> client, final Set<TopicPartition> inputTopicPartitions, final Long offset) { final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions); final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions); final Map<TopicPartition, Long> topicPartitionsAndOffset = new HashMap<>(inputTopicPartitions.size()); for (final TopicPartition topicPartition : inputTopicPartitions) { topicPartitionsAndOffset.put(topicPartition, offset); } final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset = checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets); for (final TopicPartition topicPartition : inputTopicPartitions) { client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition)); } }
@Test public void testResetToSpecificOffsetWhenBetweenBeginningAndEndOffset() { final Map<TopicPartition, Long> endOffsets = new HashMap<>(); endOffsets.put(topicPartition, 4L); consumer.updateEndOffsets(endOffsets); final Map<TopicPartition, Long> beginningOffsets = new HashMap<>(); beginningOffsets.put(topicPartition, 0L); consumer.updateBeginningOffsets(beginningOffsets); streamsResetter.resetOffsetsTo(consumer, inputTopicPartitions, 2L); final ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofMillis(500)); assertEquals(3, records.count()); }
@Override public BaseCombineOperator run() { try (InvocationScope ignored = Tracing.getTracer().createScope(CombinePlanNode.class)) { return getCombineOperator(); } }
@Test public void testCancelPlanNode() { CountDownLatch ready = new CountDownLatch(20); List<PlanNode> planNodes = new ArrayList<>(); for (int i = 0; i < 20; i++) { planNodes.add(() -> { ready.countDown(); return null; }); } // This planNode will keep the planning running and wait to be cancelled. CountDownLatch hold = new CountDownLatch(1); planNodes.add(() -> { try { hold.await(); return null; } catch (InterruptedException e) { throw new RuntimeException(e); } }); _queryContext.setEndTimeMs(System.currentTimeMillis() + Server.DEFAULT_QUERY_EXECUTOR_TIMEOUT_MS); CombinePlanNode combinePlanNode = new CombinePlanNode(planNodes, _queryContext, _executorService, null); AtomicReference<Exception> exp = new AtomicReference<>(); // Avoid early finalization by not using Executors.newSingleThreadExecutor (java <= 20, JDK-8145304) ExecutorService combineExecutor = Executors.newFixedThreadPool(1); try { Future<?> future = combineExecutor.submit(() -> { try { return combinePlanNode.run(); } catch (Exception e) { exp.set(e); throw e; } }); ready.await(); // At this point, the combinePlanNode is or will be waiting on future.get() for all sub planNodes, and the // waiting can be cancelled as below. future.cancel(true); } catch (Exception e) { Assert.fail(); } finally { combineExecutor.shutdownNow(); } TestUtils.waitForCondition((aVoid) -> exp.get() instanceof QueryCancelledException, 10_000, "Should have been cancelled"); }
@Override public Map<String, Integer> getCounts(UUID jobId) { Query<Entity> query = getCountsQuery(jobId); QueryResults<Entity> results = datastore.run(query); ImmutableMap.Builder<String, Integer> countsMapBuilder = ImmutableMap.builder(); while (results.hasNext()) { Entity result = results.next(); String dataType = result.getKey().getName(); long count = result.getLong(COUNTS_FIELD); countsMapBuilder.put(dataType, (int) count); } return countsMapBuilder.build(); }
@Test public void canAddExistingKeysToCurrentCountsTest() throws IOException { addItemToJobStoreCounts(ITEM_NAME); addItemToJobStoreCounts(ITEM_NAME); final Map<String, Integer> counts = googleJobStore.getCounts(JOB_ID); Truth.assertThat(counts.size()).isEqualTo(1); Truth.assertThat(counts.get(ITEM_NAME)).isEqualTo(2); }
@Override public String getName() { return name; }
@Test public void testReadOnly() throws Exception { Config config = new Config(); DurableExecutorConfig durableExecutorConfig = config.getDurableExecutorConfig(randomString()); DurableExecutorConfig readOnly = new DurableExecutorConfigReadOnly(durableExecutorConfig); Method[] methods = DurableExecutorConfig.class.getMethods(); for (Method method : methods) { if (method.getName().startsWith("set")) { try { Object param = newParameter(method); method.invoke(readOnly, param); fail(); } catch (InvocationTargetException e) { assertTrue(e.getCause() instanceof UnsupportedOperationException); } } } }
@Override public <K, V> Optional<Map<K, V>> getMapProperty(String key, Class<K> keyType, Class<V> valueType) { var targetKey = targetPropertyName(key); var mapResult = binder.bind(targetKey, Bindable.mapOf(keyType, valueType)); return mapResult.isBound() ? Optional.of(mapResult.get()) : Optional.empty(); }
@Test void resolvesMapProperties() { env.setProperty("prop.0.strMap.k1", "v1"); env.setProperty("prop.0.strMap.k2", "v2"); env.setProperty("prop.0.intToLongMap.100", "111"); env.setProperty("prop.0.intToLongMap.200", "222"); var resolver = new PropertyResolverImpl(env); assertThat(resolver.getMapProperty("prop.0.strMap", String.class, String.class)) .hasValue(Map.of("k1", "v1", "k2", "v2")); assertThat(resolver.getMapProperty("prop.0.intToLongMap", Integer.class, Long.class)) .hasValue(Map.of(100, 111L, 200, 222L)); }
@Override List<DiscoveryNode> resolveNodes() { if (serviceName != null && !serviceName.isEmpty()) { logger.fine("Using service name to discover nodes."); return getSimpleDiscoveryNodes(client.endpointsByName(serviceName)); } else if (serviceLabel != null && !serviceLabel.isEmpty()) { logger.fine("Using service label to discover nodes."); return getSimpleDiscoveryNodes(client.endpointsByServiceLabel(serviceLabel, serviceLabelValue)); } else if (podLabel != null && !podLabel.isEmpty()) { logger.fine("Using pod label to discover nodes."); return getSimpleDiscoveryNodes(client.endpointsByPodLabel(podLabel, podLabelValue)); } return getSimpleDiscoveryNodes(client.endpoints()); }
@Test public void resolveWithServiceNameWhenNotReadyAddressesAndNotReadyEnabled() { // given List<Endpoint> endpoints = createNotReadyEndpoints(2); given(client.endpointsByName(SERVICE_NAME)).willReturn(endpoints); KubernetesApiEndpointResolver sut = new KubernetesApiEndpointResolver(LOGGER, SERVICE_NAME, 0, null, null, null, null, RESOLVE_NOT_READY_ADDRESSES, client); // when List<DiscoveryNode> nodes = sut.resolveNodes(); // then assertEquals(1, nodes.size()); }
static public boolean areOnSameFileStore(File a, File b) throws RolloverFailure { if (!a.exists()) { throw new IllegalArgumentException("File [" + a + "] does not exist."); } if (!b.exists()) { throw new IllegalArgumentException("File [" + b + "] does not exist."); } // Implements the following by reflection // Path pathA = a.toPath(); // Path pathB = b.toPath(); // // FileStore fileStoreA = Files.getFileStore(pathA); // FileStore fileStoreB = Files.getFileStore(pathB); // // return fileStoreA.equals(fileStoreB); try { Class<?> pathClass = Class.forName(PATH_CLASS_STR); Class<?> filesClass = Class.forName(FILES_CLASS_STR); Method toPath = File.class.getMethod("toPath"); Method getFileStoreMethod = filesClass.getMethod("getFileStore", pathClass); Object pathA = toPath.invoke(a); Object pathB = toPath.invoke(b); Object fileStoreA = getFileStoreMethod.invoke(null, pathA); Object fileStoreB = getFileStoreMethod.invoke(null, pathB); return fileStoreA.equals(fileStoreB); } catch (Exception e) { throw new RolloverFailure("Failed to check file store equality for [" + a + "] and [" + b + "]", e); } }
@Test public void filesOnSameFolderShouldBeOnTheSameFileStore() throws RolloverFailure, IOException { if(!EnvUtil.isJDK7OrHigher()) return; File parent = new File(pathPrefix); File file = new File(pathPrefix+"filesOnSameFolderShouldBeOnTheSameFileStore"); FileUtil.createMissingParentDirectories(file); file.createNewFile(); assertTrue(FileStoreUtil.areOnSameFileStore(parent, file)); }
@Override public String getName() { return "Bitbucket Pipelines"; }
@Test public void getName() { assertThat(underTest.getName()).isEqualTo("Bitbucket Pipelines"); }
public static String removeQuotes(final String s) { if (ObjectHelper.isEmpty(s)) { return s; } return s.replace("'", "") .replace("\"", ""); }
@Test public void testRemoveQuotes() { assertEquals("Hello World", StringHelper.removeQuotes("Hello World")); assertEquals("", StringHelper.removeQuotes("")); assertNull(StringHelper.removeQuotes(null)); assertEquals(" ", StringHelper.removeQuotes(" ")); assertEquals("foo", StringHelper.removeQuotes("'foo'")); assertEquals("foo", StringHelper.removeQuotes("'foo")); assertEquals("foo", StringHelper.removeQuotes("foo'")); assertEquals("foo", StringHelper.removeQuotes("\"foo\"")); assertEquals("foo", StringHelper.removeQuotes("\"foo")); assertEquals("foo", StringHelper.removeQuotes("foo\"")); assertEquals("foo", StringHelper.removeQuotes("'foo\"")); }
@Override public void clear() { super.clear(); memoryLimiter.reset(); }
@Test public void testClear() { MemoryLimitedLinkedBlockingQueue<Runnable> queue = new MemoryLimitedLinkedBlockingQueue<>(instrumentation); queue.offer(() -> { }); queue.clear(); assertEquals(0, queue.getCurrentMemory()); }
@Override public <T> T unwrap(Class<T> clazz) { if (clazz.isInstance(cache)) { return clazz.cast(cache); } else if (clazz.isInstance(this)) { return clazz.cast(this); } throw new IllegalArgumentException("Unwrapping to " + clazz + " is not supported by this implementation"); }
@Test public void unwrap() { assertThat(jcache.unwrap(Cache.class)).isSameInstanceAs(jcache); assertThat(jcache.unwrap(CacheProxy.class)).isSameInstanceAs(jcache); assertThat(jcache.unwrap(com.github.benmanes.caffeine.cache.Cache.class)) .isSameInstanceAs(jcache.cache); }
@Override public Set<String> filterCatalogs(Identity identity, AccessControlContext context, Set<String> catalogs) { ImmutableSet.Builder<String> filteredCatalogs = ImmutableSet.builder(); for (String catalog : catalogs) { if (canAccessCatalog(identity, catalog, READ_ONLY)) { filteredCatalogs.add(catalog); } } return filteredCatalogs.build(); }
@Test public void testCatalogOperations() throws IOException { TransactionManager transactionManager = createTestTransactionManager(); AccessControlManager accessControlManager = newAccessControlManager(transactionManager, "catalog.json"); transaction(transactionManager, accessControlManager) .execute(transactionId -> { assertEquals(accessControlManager.filterCatalogs(admin, context, allCatalogs), allCatalogs); Set<String> aliceCatalogs = ImmutableSet.of("open-to-all", "alice-catalog", "all-allowed"); assertEquals(accessControlManager.filterCatalogs(alice, context, allCatalogs), aliceCatalogs); Set<String> bobCatalogs = ImmutableSet.of("open-to-all", "all-allowed"); assertEquals(accessControlManager.filterCatalogs(bob, context, allCatalogs), bobCatalogs); Set<String> nonAsciiUserCatalogs = ImmutableSet.of("open-to-all", "all-allowed", "\u0200\u0200\u0200"); assertEquals(accessControlManager.filterCatalogs(nonAsciiUser, context, allCatalogs), nonAsciiUserCatalogs); }); }
public Group getGroup(JID jid) throws GroupNotFoundException { JID groupJID = GroupJID.fromJID(jid); return (groupJID instanceof GroupJID) ? getGroup(((GroupJID)groupJID).getGroupName()) : null; }
@Test public void willCacheAMissIfNotAlreadyCached() throws Exception { doThrow(new GroupNotFoundException()).when(groupProvider).getGroup(GROUP_NAME); try { groupManager.getGroup(GROUP_NAME, false); } catch (final GroupNotFoundException ignored) { verify(groupProvider).getGroup(GROUP_NAME); assertThat(groupCache.get(GROUP_NAME), is(CacheableOptional.of(null))); return; } fail(); }
static Coder<Message> of() { return INSTANCE; }
@Test public void testMessageDecodeEncodeEquals() throws Exception { Message message = new Message() .withMessageId("messageId") .withReceiptHandle("receiptHandle") .withBody("body") .withAttributes( ImmutableMap.of(SentTimestamp.name(), Long.toString(new Random().nextLong()))) .withMessageAttributes( ImmutableMap.of( REQUEST_TIME, new MessageAttributeValue() .withStringValue(Long.toString(new Random().nextLong())))); Message clone = CoderUtils.clone(SqsMessageCoder.of(), message); assertThat(clone).isEqualTo(message); }
@Override public void put(String key, String value) { // Assume any header property that begins with 'Camel' is for internal use if (!key.startsWith("Camel")) { this.map.put(encodeDash(key), value); } }
@Test public void propertyWithDash() { CamelMessagingHeadersInjectAdapter adapter = new CamelMessagingHeadersInjectAdapter(map, true); adapter.put("-key-1-", "value1"); assertEquals("value1", map.get(JMS_DASH + "key" + JMS_DASH + "1" + JMS_DASH)); }
public String summarize(final ExecutionStep<?> step) { return summarize(step, "").summary; }
@Test public void shouldSummarizePlanWithMultipleSources() { // Given: final LogicalSchema sourceSchema2 = LogicalSchema.builder() .keyColumn(SystemColumns.ROWKEY_NAME, SqlTypes.STRING) .valueColumn(ColumnName.of("L0_2"), SqlTypes.STRING) .build(); final LogicalSchema schema = LogicalSchema.builder() .keyColumn(SystemColumns.ROWKEY_NAME, SqlTypes.STRING) .valueColumn(ColumnName.of("L1"), SqlTypes.STRING) .build(); final ExecutionStep<?> sourceStep2 = givenStep(StreamSource.class, "src2", sourceSchema2); final ExecutionStep<?> step = givenStep(StreamStreamJoin.class, "child", schema, sourceStep, sourceStep2); // When: final String summary = planSummaryBuilder.summarize(step); // Then: assertThat(summary, is( " > [ JOIN ] | Schema: ROWKEY STRING KEY, L1 STRING | Logger: QID.child" + "\n\t\t > [ SOURCE ] | Schema: ROWKEY STRING KEY, L0 INTEGER | Logger: QID.src" + "\n\t\t > [ SOURCE ] | Schema: ROWKEY STRING KEY, L0_2 STRING | Logger: QID.src2\n" )); }
@Override public void destroy() { destroy(null); }
@Test public void testDestroyWithDestroyHook() { List<String> hookActionResult = new ArrayList<>(2); Destroyable.DestroyHook destroyHook = new Destroyable.DestroyHook() { @Override public void preDestroy() { hookActionResult.add("preDestroy"); } @Override public void postDestroy() { hookActionResult.add("postDestroy"); } }; abstractCluster.destroy(destroyHook); Assert.assertEquals(2, hookActionResult.size()); Assert.assertEquals("preDestroy", hookActionResult.get(0)); Assert.assertEquals("postDestroy", hookActionResult.get(1)); }
public static <T> T convertQuietly(Type type, Object value) { return convertQuietly(type, value, null); }
@Test public void toIntTest2() { final ArrayList<String> array = new ArrayList<>(); final Integer aInt = Convert.convertQuietly(Integer.class, array, -1); assertEquals(Integer.valueOf(-1), aInt); }
public static MediaContainerResource videoToMedia(VideosContainerResource videosContainer) { return new MediaContainerResource( videosContainer .getAlbums() .stream() .map(MediaAlbum::videoToMediaAlbum) .collect(Collectors.toList()), null /*photos*/, videosContainer.getVideos()); }
@Test public void verifyVideoToMediaContainer() { List<MediaAlbum> mediaAlbums = ImmutableList.of(new MediaAlbum("id1", "albumb1", "This:a fake album!")); List<VideoAlbum> videoAlbums = ImmutableList.of(new VideoAlbum("id1", "albumb1", "This:a fake album!")); List<VideoModel> videos = ImmutableList.of( new VideoModel( "Vid1", "http://fake.com/1.mp4", "A vid", "mediatype", "p1", "id1", false, null), new VideoModel( "Vid3", "http://fake.com/2.mp4", "A vid", "mediatype", "p3", "id1", false, null)); VideosContainerResource data = new VideosContainerResource(videoAlbums, videos); MediaContainerResource expected = new MediaContainerResource(mediaAlbums, null, videos); MediaContainerResource actual = MediaContainerResource.videoToMedia(data); assertEquals(expected, actual); }
public static String generateFileName(String string) { string = StringUtils.stripAccents(string); StringBuilder buf = new StringBuilder(); for (int i = 0; i < string.length(); i++) { char c = string.charAt(i); if (Character.isSpaceChar(c) && (buf.length() == 0 || Character.isSpaceChar(buf.charAt(buf.length() - 1)))) { continue; } if (ArrayUtils.contains(validChars, c)) { buf.append(c); } } String filename = buf.toString().trim(); if (TextUtils.isEmpty(filename)) { return randomString(8); } else if (filename.length() >= MAX_FILENAME_LENGTH) { return filename.substring(0, MAX_FILENAME_LENGTH - MD5_HEX_LENGTH - 1) + "_" + md5(filename); } else { return filename; } }
@Test public void testLongFilenameNotEquals() { // Verify that the name is not just trimmed and different suffixes end up with the same name String longName = StringUtils.repeat("x", 20 + FileNameGenerator.MAX_FILENAME_LENGTH); String result1 = FileNameGenerator.generateFileName(longName + "a"); String result2 = FileNameGenerator.generateFileName(longName + "b"); assertNotEquals(result1, result2); }
public static void recursivelyRegisterType( TypeInformation<?> typeInfo, SerializerConfig config, Set<Class<?>> alreadySeen) { if (typeInfo instanceof GenericTypeInfo) { GenericTypeInfo<?> genericTypeInfo = (GenericTypeInfo<?>) typeInfo; Serializers.recursivelyRegisterType( genericTypeInfo.getTypeClass(), config, alreadySeen); } else if (typeInfo instanceof CompositeType) { List<GenericTypeInfo<?>> genericTypesInComposite = new ArrayList<>(); getContainedGenericTypes((CompositeType<?>) typeInfo, genericTypesInComposite); for (GenericTypeInfo<?> gt : genericTypesInComposite) { Serializers.recursivelyRegisterType(gt.getTypeClass(), config, alreadySeen); } } else if (typeInfo instanceof ObjectArrayTypeInfo) { ObjectArrayTypeInfo<?, ?> objectArrayTypeInfo = (ObjectArrayTypeInfo<?, ?>) typeInfo; recursivelyRegisterType(objectArrayTypeInfo.getComponentInfo(), config, alreadySeen); } }
@Test void testTypeRegistration() { SerializerConfigImpl conf = new SerializerConfigImpl(); Serializers.recursivelyRegisterType(ClassWithNested.class, conf, new HashSet<Class<?>>()); KryoSerializer<String> kryo = new KryoSerializer<>(String.class, conf); // we create Kryo from another type. assertThat(kryo.getKryo().getRegistration(FromNested.class).getId()).isPositive(); assertThat(kryo.getKryo().getRegistration(ClassWithNested.class).getId()).isPositive(); assertThat(kryo.getKryo().getRegistration(Path.class).getId()).isPositive(); // check if the generic type from one field is also registered (its very likely that // generic types are also used as fields somewhere. assertThat(kryo.getKryo().getRegistration(FromGeneric1.class).getId()).isPositive(); assertThat(kryo.getKryo().getRegistration(FromGeneric2.class).getId()).isPositive(); assertThat(kryo.getKryo().getRegistration(Node.class).getId()).isPositive(); // register again and make sure classes are still registered SerializerConfigImpl conf2 = new SerializerConfigImpl(); Serializers.recursivelyRegisterType(ClassWithNested.class, conf2, new HashSet<Class<?>>()); KryoSerializer<String> kryo2 = new KryoSerializer<>(String.class, conf); assertThat(kryo2.getKryo().getRegistration(FromNested.class).getId()).isPositive(); }
@Override public Type classify(final Throwable e) { final Type type = e instanceof AuthorizationException || e instanceof StreamsException && e.getCause() instanceof AuthorizationException ? Type.USER : Type.UNKNOWN; if (type == Type.USER) { LOG.info( "Classified error as USER error based on missing access rights." + " Query ID: {} Exception: {}", queryId, e); } return type; }
@Test public void shouldClassifyNoTransactionalIdAuthorizationExceptionAsUnknownError() { // Given: final Exception e = new Exception("foo"); // When: final Type type = new AuthorizationClassifier("").classify(e); // Then: assertThat(type, is(Type.UNKNOWN)); }
@Override public String toString() { String contentTypeStr; if (contentType == null) { contentTypeStr = "unknown-content-type"; } else { contentTypeStr = new String(contentType, StandardCharsets.UTF_8); } String valueStr; if (value == null) { valueStr = "value.length=0"; } else if (contentTypeStr.contains("text")) { valueStr = "value=\"" + new String(value, StandardCharsets.UTF_8) + "\""; } else { valueStr = "value.length=" + value.length; } return "RestValue{" + "contentType='" + contentTypeStr + "', " + valueStr + '}'; }
@Test public void testToString() { assertContains(restValue.toString(), "unknown-content-type"); assertContains(restValue.toString(), "value.length=0"); }
public static Producer<byte[]> createExclusiveProducerWithRetry(PulsarClient client, String topic, String producerName, Supplier<Boolean> isLeader, int sleepInBetweenMs) throws NotLeaderAnymore { try { int tries = 0; do { try { return client.newProducer().topic(topic) .accessMode(ProducerAccessMode.Exclusive) .enableBatching(false) .blockIfQueueFull(true) .compressionType(CompressionType.LZ4) .producerName(producerName) .createAsync().get(10, TimeUnit.SECONDS); } catch (Exception e) { log.info("Encountered exception while at creating exclusive producer to topic {}", topic, e); } tries++; if (tries % 6 == 0) { if (log.isDebugEnabled()) { log.debug( "Failed to acquire exclusive producer to topic {} after {} attempts. " + "Will retry if we are still the leader.", topic, tries); } } Thread.sleep(sleepInBetweenMs); } while (isLeader.get()); } catch (InterruptedException e) { throw new RuntimeException("Failed to create exclusive producer on topic " + topic, e); } throw new NotLeaderAnymore(); }
@Test public void testCreateExclusiveProducerWithRetry() { Producer<byte[]> producer = mock(Producer.class); ProducerBuilder<byte[]> builder = mock(ProducerBuilder.class); when(builder.topic(anyString())).thenReturn(builder); when(builder.producerName(anyString())).thenReturn(builder); when(builder.enableBatching(anyBoolean())).thenReturn(builder); when(builder.blockIfQueueFull(anyBoolean())).thenReturn(builder); when(builder.compressionType(any(CompressionType.class))).thenReturn(builder); when(builder.sendTimeout(anyInt(), any(TimeUnit.class))).thenReturn(builder); when(builder.accessMode(any())).thenReturn(builder); when(builder.createAsync()).thenReturn(CompletableFuture.completedFuture(producer)); PulsarClient pulsarClient = mock(PulsarClient.class); when(pulsarClient.newProducer()).thenReturn(builder); Producer<byte[]> p = null; try { p = WorkerUtils .createExclusiveProducerWithRetry(pulsarClient, "test-topic", "test-producer", () -> true, 0); } catch (WorkerUtils.NotLeaderAnymore notLeaderAnymore) { fail(); } assertNotNull(p); verify(builder, times(1)).topic(eq("test-topic")); verify(builder, times(1)).producerName(eq("test-producer")); verify(builder, times(1)).accessMode(eq(ProducerAccessMode.Exclusive)); CompletableFuture completableFuture = new CompletableFuture(); completableFuture.completeExceptionally(new PulsarClientException.ProducerFencedException("test")); when(builder.createAsync()).thenReturn(completableFuture); try { WorkerUtils.createExclusiveProducerWithRetry(pulsarClient, "test-topic", "test-producer", () -> false, 0); fail(); } catch (WorkerUtils.NotLeaderAnymore notLeaderAnymore) { } AtomicInteger i = new AtomicInteger(); try { WorkerUtils.createExclusiveProducerWithRetry(pulsarClient, "test-topic", "test-producer", new Supplier<Boolean>() { @Override public Boolean get() { if (i.getAndIncrement() < 6) { return true; } return false; } }, 0); fail(); } catch (WorkerUtils.NotLeaderAnymore notLeaderAnymore) { } }
@Override public boolean replace(long key, long oldValue, long newValue) { assert oldValue != nullValue : "replace() called with null-sentinel oldValue " + nullValue; assert newValue != nullValue : "replace() called with null-sentinel newValue " + nullValue; final long valueAddr = hsa.get(key); if (valueAddr == NULL_ADDRESS) { return false; } final long actualValue = mem.getLong(valueAddr); if (actualValue != oldValue) { return false; } mem.putLong(valueAddr, newValue); return true; }
@Test(expected = AssertionError.class) @RequireAssertEnabled public void test_replaceIfEquals_invalidNewValue() { map.replace(newKey(), newValue(), MISSING_VALUE); }
@Override public boolean register(final Application application) { final Local helper = new FinderLocal(new BundleApplicationResourcesFinder().find().getParent(), String.format("Library/LoginItems/%s.app", application.getName())); if(!finder.register(helper)) { log.warn(String.format("Failed to register %s (%s) with launch services", helper, finder.getDescription(application.getIdentifier()))); } if(!ServiceManagementFunctions.library.SMLoginItemSetEnabled(application.getIdentifier(), true)) { log.warn(String.format("Failed to register %s as login item", application)); return false; } return true; }
@Test public void testRegister() { assertFalse(new ServiceManagementApplicationLoginRegistry().register( new Application("bundle.helper"))); }
public Map<TopicPartition, Long> endOffsets(Set<TopicPartition> partitions) { if (partitions == null || partitions.isEmpty()) { return Collections.emptyMap(); } Map<TopicPartition, OffsetSpec> offsetSpecMap = partitions.stream().collect(Collectors.toMap(Function.identity(), tp -> OffsetSpec.latest())); ListOffsetsResult resultFuture = admin.listOffsets(offsetSpecMap, new ListOffsetsOptions(IsolationLevel.READ_UNCOMMITTED)); // Get the individual result for each topic partition so we have better error messages Map<TopicPartition, Long> result = new HashMap<>(); for (TopicPartition partition : partitions) { try { ListOffsetsResultInfo info = resultFuture.partitionResult(partition).get(); result.put(partition, info.offset()); } catch (ExecutionException e) { Throwable cause = e.getCause(); String topic = partition.topic(); if (cause instanceof AuthorizationException) { String msg = String.format("Not authorized to get the end offsets for topic '%s' on brokers at %s", topic, bootstrapServers); throw new ConnectException(msg, cause); } else if (cause instanceof UnsupportedVersionException) { // Should theoretically never happen, because this method is the same as what the consumer uses and therefore // should exist in the broker since before the admin client was added String msg = String.format("API to get the get the end offsets for topic '%s' is unsupported on brokers at %s", topic, bootstrapServers); throw new UnsupportedVersionException(msg, cause); } else if (cause instanceof TimeoutException) { String msg = String.format("Timed out while waiting to get end offsets for topic '%s' on brokers at %s", topic, bootstrapServers); throw new TimeoutException(msg, cause); } else if (cause instanceof LeaderNotAvailableException) { String msg = String.format("Unable to get end offsets during leader election for topic '%s' on brokers at %s", topic, bootstrapServers); throw new LeaderNotAvailableException(msg, cause); } else if (cause instanceof org.apache.kafka.common.errors.RetriableException) { throw (org.apache.kafka.common.errors.RetriableException) cause; } else { String msg = String.format("Error while getting end offsets for topic '%s' on brokers at %s", topic, bootstrapServers); throw new ConnectException(msg, cause); } } catch (InterruptedException e) { Thread.interrupted(); String msg = String.format("Interrupted while attempting to read end offsets for topic '%s' on brokers at %s", partition.topic(), bootstrapServers); throw new RetriableException(msg, e); } } return result; }
@Test public void endOffsetsShouldFailWithNonRetriableWhenUnknownErrorOccurs() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); Set<TopicPartition> tps = Collections.singleton(tp1); Long offset = null; // response should use error Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); env.kafkaClient().prepareResponse(listOffsetsResultWithUnknownError(tp1, offset)); TopicAdmin admin = new TopicAdmin(env.adminClient()); ConnectException e = assertThrows(ConnectException.class, () -> admin.endOffsets(tps)); assertTrue(e.getMessage().contains("Error while getting end offsets for topic")); } }
void forwardToStateService(DeviceStateServiceMsgProto deviceStateServiceMsg, TbCallback callback) { if (statsEnabled) { stats.log(deviceStateServiceMsg); } stateService.onQueueMsg(deviceStateServiceMsg, callback); }
@Test public void givenStatsEnabled_whenForwardingConnectMsgToStateService_thenStatsAreRecorded() { // GIVEN ReflectionTestUtils.setField(defaultTbCoreConsumerServiceMock, "stats", statsMock); ReflectionTestUtils.setField(defaultTbCoreConsumerServiceMock, "statsEnabled", true); var connectMsg = TransportProtos.DeviceConnectProto.newBuilder() .setTenantIdMSB(tenantId.getId().getMostSignificantBits()) .setTenantIdLSB(tenantId.getId().getLeastSignificantBits()) .setDeviceIdMSB(deviceId.getId().getMostSignificantBits()) .setDeviceIdLSB(deviceId.getId().getLeastSignificantBits()) .setLastConnectTime(time) .build(); doCallRealMethod().when(defaultTbCoreConsumerServiceMock).forwardToStateService(connectMsg, tbCallbackMock); // WHEN defaultTbCoreConsumerServiceMock.forwardToStateService(connectMsg, tbCallbackMock); // THEN then(statsMock).should().log(connectMsg); }
public void doesNotMatch(@Nullable String regex) { checkNotNull(regex); if (actual == null) { failWithActual("expected a string that does not match", regex); } else if (actual.matches(regex)) { failWithActual("expected not to match", regex); } }
@Test public void stringDoesNotMatchString() { assertThat("abcaqadev").doesNotMatch(".*aaa.*"); }
@Override public Object convert(String value) { if (isNullOrEmpty(value)) { return value; } if (value.contains("=")) { final Map<String, String> fields = new HashMap<>(); Matcher m = PATTERN.matcher(value); while (m.find()) { if (m.groupCount() != 2) { continue; } fields.put(removeQuotes(m.group(1)), removeQuotes(m.group(2))); } return fields; } else { return Collections.emptyMap(); } }
@Test public void testFilterRetainsNestedSingleQuotesInDoubleQuotedValues() { TokenizerConverter f = new TokenizerConverter(new HashMap<String, Object>()); @SuppressWarnings("unchecked") Map<String, String> result = (Map<String, String>) f.convert("otters in k1= v1 k2=\" 'v2'\" k3=\" 'v3' \" more otters"); assertThat(result) .hasSize(3) .containsEntry("k1", "v1") .containsEntry("k2", " 'v2'") .containsEntry("k3", " 'v3' "); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { try { final AttributedList<Path> buckets = new AttributedList<Path>(); final Ds3Client client = new SpectraClientBuilder().wrap(session.getClient(), session.getHost()); final GetServiceResponse response = client.getService(new GetServiceRequest()); for(final BucketDetails b : response.getListAllMyBucketsResult().getBuckets()) { final Path bucket = new Path(PathNormalizer.normalize(b.getName()), EnumSet.of(Path.Type.volume, Path.Type.directory)); bucket.attributes().setCreationDate(b.getCreationDate().getTime()); buckets.add(bucket); } return buckets; } catch(FailedRequestException e) { throw new SpectraExceptionMappingService().map("Listing directory {0} failed", e, directory); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Listing directory {0} failed", e, directory); } }
@Test public void testListWithRootDefaultPath() throws Exception { final AttributedList<Path> list = new SpectraBucketListService(session).list( new Path(String.valueOf(Path.DELIMITER), EnumSet.of(Path.Type.volume, Path.Type.directory)), new DisabledListProgressListener()); }
@Override public void start() { boolean isTelemetryActivated = config.getBoolean(SONAR_TELEMETRY_ENABLE.getKey()) .orElseThrow(() -> new IllegalStateException(String.format("Setting '%s' must be provided.", SONAR_TELEMETRY_URL.getKey()))); boolean hasOptOut = internalProperties.read(I_PROP_OPT_OUT).isPresent(); if (!isTelemetryActivated && !hasOptOut) { optOut(); internalProperties.write(I_PROP_OPT_OUT, String.valueOf(system2.now())); LOG.info("Sharing of SonarQube statistics is disabled."); } if (isTelemetryActivated && hasOptOut) { internalProperties.write(I_PROP_OPT_OUT, null); } if (!isTelemetryActivated) { return; } LOG.info("Sharing of SonarQube statistics is enabled."); int frequencyInSeconds = frequency(); scheduleWithFixedDelay(telemetryCommand(), frequencyInSeconds, frequencyInSeconds, TimeUnit.SECONDS); }
@Test void write_sequence_correctly_incremented() { initTelemetrySettingsToDefaultValues(); when(lockManager.tryLock(any(), anyInt())).thenReturn(true); settings.setProperty("sonar.telemetry.frequencyInSeconds", "1"); internalProperties.write("telemetry.messageSeq", "10"); mockDataJsonWriterDoingSomething(); underTest.start(); verify(internalProperties, timeout(4_000)).write("telemetry.messageSeq", "10"); // force another ping internalProperties.write("telemetry.lastPing", String.valueOf(system2.now() - ONE_DAY)); verify(internalProperties, timeout(4_000)).write("telemetry.messageSeq", "11"); }
@Override public Map<String, String> getRemoteRegionUrlsWithName() { String propName = namespace + "remoteRegionUrlsWithName"; String remoteRegionUrlWithNameString = configInstance.getStringProperty(propName, null).get(); if (null == remoteRegionUrlWithNameString) { return Collections.emptyMap(); } String[] remoteRegionUrlWithNamePairs = remoteRegionUrlWithNameString.split(","); Map<String, String> toReturn = new HashMap<String, String>(remoteRegionUrlWithNamePairs.length); final String pairSplitChar = ";"; for (String remoteRegionUrlWithNamePair : remoteRegionUrlWithNamePairs) { String[] pairSplit = remoteRegionUrlWithNamePair.split(pairSplitChar); if (pairSplit.length < 2) { logger.error("Error reading eureka remote region urls from property {}. " + "Invalid entry {} for remote region url. The entry must contain region name and url " + "separated by a {}. Ignoring this entry.", propName, remoteRegionUrlWithNamePair, pairSplitChar); } else { String regionName = pairSplit[0]; String regionUrl = pairSplit[1]; if (pairSplit.length > 2) { StringBuilder regionUrlAssembler = new StringBuilder(); for (int i = 1; i < pairSplit.length; i++) { if (regionUrlAssembler.length() != 0) { regionUrlAssembler.append(pairSplitChar); } regionUrlAssembler.append(pairSplit[i]); } regionUrl = regionUrlAssembler.toString(); } toReturn.put(regionName, regionUrl); } } return toReturn; }
@Test public void testRemoteRegionUrlsWithName2Regions() throws Exception { String region1 = "myregion1"; String region1url = "http://local:888/eee"; String region2 = "myregion2"; String region2url = "http://local:888/eee"; ConfigurationManager.getConfigInstance().setProperty("eureka.remoteRegionUrlsWithName", region1 + ';' + region1url + ',' + region2 + ';' + region2url); DefaultEurekaServerConfig config = new DefaultEurekaServerConfig(); Map<String, String> remoteRegionUrlsWithName = config.getRemoteRegionUrlsWithName(); Assert.assertEquals("Unexpected remote region url count.", 2, remoteRegionUrlsWithName.size()); Assert.assertTrue("Remote region 1 not found.", remoteRegionUrlsWithName.containsKey(region1)); Assert.assertTrue("Remote region 2 not found.", remoteRegionUrlsWithName.containsKey(region2)); Assert.assertEquals("Unexpected remote region 1 url.", region1url, remoteRegionUrlsWithName.get(region1)); Assert.assertEquals("Unexpected remote region 2 url.", region2url, remoteRegionUrlsWithName.get(region2)); }
@Override public <T> T get(Class<T> entityClass, Object id) { addExpireListener(commandExecutor); T proxied = createLiveObject(entityClass, id); if (asLiveObject(proxied).isExists()) { return proxied; } return null; }
@Test public void testGet() { RLiveObjectService service = redisson.getLiveObjectService(); assertNull(service.get(TestClass.class, new ObjectId(100))); TestClass ts = new TestClass(new ObjectId(100)); TestClass persisted = service.persist(ts); assertNotNull(service.get(TestClass.class, new ObjectId(100))); persisted.setCode("CODE"); assertNotNull(service.get(TestClass.class, new ObjectId(100))); }
public static SchemaBuilder builder() { return SchemaBuilder.int32() .name(LOGICAL_NAME) .version(1); }
@Test public void testBuilder() { Schema plain = Time.SCHEMA; assertEquals(Time.LOGICAL_NAME, plain.name()); assertEquals(1, (Object) plain.version()); }
public static BackoffIdleStrategy createBackoffIdleStrategy(String config) { String[] args = config.split(","); if (args.length != ARG_COUNT) { throw new IllegalArgumentException( format("Invalid backoff configuration '%s', 4 arguments expected", config)); } long maxSpins = parseLong(args[ARG_MAX_SPINS]); long maxYields = parseLong(args[ARG_MAX_YIELDS]); long minParkPeriodNs = parseLong(args[ARG_MIN_PARK_PERIOD]); long maxParkNanos = parseLong(args[ARG_MAX_PARK_PERIOD]); return new BackoffIdleStrategy(maxSpins, maxYields, minParkPeriodNs, maxParkNanos); }
@Test public void test_createBackoffIdleStrategy() { BackoffIdleStrategy idleStrategy = BackoffIdleStrategy.createBackoffIdleStrategy("foo,1,2,10,15"); assertEquals(1, idleStrategy.yieldThreshold); assertEquals(3, idleStrategy.parkThreshold); assertEquals(10, idleStrategy.minParkPeriodNs); assertEquals(15, idleStrategy.maxParkPeriodNs); }
public synchronized boolean isWebkitUnavailable() { String path = getWebkitPath(); String osName = getEnvironmentName(); return ( path == null || !path.contains("webkit") ) && osName.contains( SUPPORTED_DISTRIBUTION_NAME ); }
@Test public void testIsWebkitUnavailable_mac() { EnvironmentUtilsMock mock = new EnvironmentUtilsMock( Case.MAC_OS_X ); assertFalse( mock.getMockedInstance().isWebkitUnavailable() ); }
public int indexOf(PDPage page) { SearchContext context = new SearchContext(page); if (findPage(context, root)) { return context.index; } return -1; }
@Test void positiveMultipleLevel() throws IOException { doc = Loader.loadPDF(RandomAccessReadBuffer.createBufferFromStream( TestPDPageTree.class.getResourceAsStream("page_tree_multiple_levels.pdf"))); for (int i = 0; i < doc.getNumberOfPages(); i++) { assertEquals(i, doc.getPages().indexOf(doc.getPage(i))); } }
@Override public JobState getJobStatus(String project, String region, String jobId) throws IOException { return handleJobState(getJob(project, region, jobId)); }
@Test public void testGetJobThrowsException() throws IOException { when(getLocationJobs(client).get(any(), any(), any())).thenThrow(new IOException()); assertThrows( FailsafeException.class, () -> new FakePipelineLauncher(client).getJobStatus(PROJECT, REGION, JOB_ID)); }
public static boolean validSchemaFilename(String fn) { if (! fn.endsWith(SD_NAME_SUFFIX)) { return false; } int lastSlash = fn.lastIndexOf('/'); if (lastSlash >= 0) { fn = fn.substring(lastSlash+1); } if (fn.startsWith(".")) { return false; } return true; }
@Test public void testValidSchemaFilename() { checkValid("foo.sd"); checkValid("schemas/foo.sd"); checkValid("./foo.sd"); checkValid("./schemas/foo.sd"); checkInvalid("foo"); checkInvalid("foo.ds"); checkInvalid(".foo.sd"); checkInvalid("schemas/.foo.sd"); checkInvalid("schemas/subdir/._foo.sd"); }
public static Object getFieldValue(Object obj, String fieldName) { if (null == obj || StringUtil.isBlank(fieldName)) { return null; } Field field = getField(obj instanceof Class ? (Class<?>) obj : obj.getClass(), fieldName); return getFieldValue(obj, field); }
@Test public void getFieldValueTest() { TestSubClass testSubClass = new TestSubClass(); Object privateField = ReflectUtil.getFieldValue(testSubClass, "privateField"); Assert.assertEquals("privateField", privateField); Object field = ReflectUtil.getFieldValue(testSubClass, "field"); Assert.assertEquals("field", field); }
@Override public ApplicationReport getApplication(ApplicationId appId) throws IOException { return convertToApplicationReport(historyStore.getApplication(appId)); }
@Test void testApplicationReport() throws IOException, YarnException { ApplicationId appId = null; appId = ApplicationId.newInstance(0, 1); writeApplicationStartData(appId); writeApplicationFinishData(appId); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); writeApplicationAttemptStartData(appAttemptId); writeApplicationAttemptFinishData(appAttemptId); ApplicationReport appReport = applicationHistoryManagerImpl.getApplication(appId); assertNotNull(appReport); assertEquals(appId, appReport.getApplicationId()); assertEquals(appAttemptId, appReport.getCurrentApplicationAttemptId()); assertEquals(appAttemptId.toString(), appReport.getHost()); assertEquals("test type", appReport.getApplicationType().toString()); assertEquals("test queue", appReport.getQueue().toString()); }
@Override public List<CompletedCheckpoint> getAllCheckpoints() { return new ArrayList<>(completedCheckpoints); }
@Test void testRecoverSortedCheckpoints() throws Exception { final TestingStateHandleStore<CompletedCheckpoint> stateHandleStore = builder.setGetAllSupplier(() -> createStateHandles(3)).build(); final CompletedCheckpointStore completedCheckpointStore = createCompletedCheckpointStore(stateHandleStore); final List<CompletedCheckpoint> recoveredCompletedCheckpoint = completedCheckpointStore.getAllCheckpoints(); assertThat(recoveredCompletedCheckpoint).hasSize(3); final List<Long> checkpointIds = recoveredCompletedCheckpoint.stream() .map(CompletedCheckpoint::getCheckpointID) .collect(Collectors.toList()); assertThat(checkpointIds).containsExactly(1L, 2L, 3L); }
public Page<Instance> findInstancesByNamespaceAndInstanceAppId(String instanceAppId, String appId, String clusterName, String namespaceName, Pageable pageable) { Page<Object> instanceIdResult = instanceConfigRepository .findInstanceIdsByNamespaceAndInstanceAppId(instanceAppId, appId, clusterName, namespaceName, getValidInstanceConfigDate(), pageable); List<Instance> instances = Collections.emptyList(); if (instanceIdResult.hasContent()) { Set<Long> instanceIds = instanceIdResult.getContent().stream().map((Object o) -> { if (o == null) { return null; } if (o instanceof Integer) { return ((Integer)o).longValue(); } if (o instanceof Long) { return (Long) o; } //for h2 test if (o instanceof BigInteger) { return ((BigInteger) o).longValue(); } return null; }).filter(Objects::nonNull).collect(Collectors.toSet()); instances = findInstancesByIds(instanceIds); } return new PageImpl<>(instances, pageable, instanceIdResult.getTotalElements()); }
@Test @Rollback public void testFindInstancesByNamespaceAndInstanceAppId() throws Exception { String someConfigAppId = "someConfigAppId"; String someConfigClusterName = "someConfigClusterName"; String someConfigNamespaceName = "someConfigNamespaceName"; String someReleaseKey = "someReleaseKey"; Date someValidDate = new Date(); String someAppId = "someAppId"; String anotherAppId = "anotherAppId"; String someClusterName = "someClusterName"; String someDataCenter = "someDataCenter"; String someIp = "someIp"; Instance someInstance = instanceService.createInstance(assembleInstance(someAppId, someClusterName, someDataCenter, someIp)); Instance anotherInstance = instanceService.createInstance(assembleInstance(anotherAppId, someClusterName, someDataCenter, someIp)); prepareInstanceConfigForInstance(someInstance.getId(), someConfigAppId, someConfigClusterName, someConfigNamespaceName, someReleaseKey, someValidDate); prepareInstanceConfigForInstance(anotherInstance.getId(), someConfigAppId, someConfigClusterName, someConfigNamespaceName, someReleaseKey, someValidDate); Page<Instance> result = instanceService.findInstancesByNamespaceAndInstanceAppId(someAppId, someConfigAppId, someConfigClusterName, someConfigNamespaceName, PageRequest.of(0, 10)); Page<Instance> anotherResult = instanceService.findInstancesByNamespaceAndInstanceAppId(anotherAppId, someConfigAppId, someConfigClusterName, someConfigNamespaceName, PageRequest.of(0, 10)); assertEquals(Lists.newArrayList(someInstance), result.getContent()); assertEquals(Lists.newArrayList(anotherInstance), anotherResult.getContent()); }
public static Extractors.Builder newBuilder(InternalSerializationService ss) { return new Extractors.Builder(ss); }
@Test public void when_creatingWithBuilderWithSimpleGetterCache_then_simpleGetterCacheIsUsed() { Extractors extractors = Extractors.newBuilder(ss).setGetterCacheSupplier(SIMPLE_GETTER_CACHE_SUPPLIER).build(); assertInstanceOf(SimpleGetterCache.class, extractors.getterCache); }
public static <T> Inner<T> fields(String... fields) { return fields(FieldAccessDescriptor.withFieldNames(fields)); }
@Test @Category(NeedsRunner.class) public void testDropNestedFieldKeepingOnlyNested() { Schema expectedSchema = Schema.builder().addStringField("field2").build(); PCollection<Row> result = pipeline .apply( Create.of( nestedRow(simpleRow(1, "one")), nestedRow(simpleRow(2, "two")), nestedRow(simpleRow(3, "three"))) .withRowSchema(NESTED_SCHEMA)) .apply(DropFields.fields("string", "nested.field1")); assertEquals(expectedSchema, result.getSchema()); List<Row> expectedRows = Lists.newArrayList( Row.withSchema(expectedSchema).addValue("one").build(), Row.withSchema(expectedSchema).addValue("two").build(), Row.withSchema(expectedSchema).addValue("three").build()); PAssert.that(result).containsInAnyOrder(expectedRows); pipeline.run(); }
@Override public NacosAsyncRestTemplate createNacosAsyncRestTemplate() { final HttpClientConfig originalRequestConfig = buildHttpClientConfig(); final DefaultConnectingIOReactor ioreactor = getIoReactor(ASYNC_IO_REACTOR_NAME); final RequestConfig defaultConfig = getRequestConfig(); final NHttpClientConnectionManager connectionManager = getConnectionManager(originalRequestConfig, ioreactor); monitorAndExtension(connectionManager); return new NacosAsyncRestTemplate(assignLogger(), new DefaultAsyncHttpClientRequest( HttpAsyncClients.custom().addInterceptorLast(new RequestContent(true)) .setThreadFactory(new NameThreadFactory(ASYNC_THREAD_NAME)) .setDefaultIOReactorConfig(getIoReactorConfig()).setDefaultRequestConfig(defaultConfig) .setMaxConnTotal(originalRequestConfig.getMaxConnTotal()) .setMaxConnPerRoute(originalRequestConfig.getMaxConnPerRoute()) .setUserAgent(originalRequestConfig.getUserAgent()) .setConnectionManager(connectionManager).build(), ioreactor, defaultConfig)); }
@Test void testCreateNacosAsyncRestTemplate() { HttpClientFactory httpClientFactory = new AbstractHttpClientFactory() { @Override protected HttpClientConfig buildHttpClientConfig() { return HttpClientConfig.builder().setMaxConnTotal(10).setMaxConnPerRoute(10).build(); } @Override protected Logger assignLogger() { return logger; } }; NacosAsyncRestTemplate nacosRestTemplate = httpClientFactory.createNacosAsyncRestTemplate(); assertNotNull(nacosRestTemplate); }
@Override public boolean test(Collection<V> buffer) { if (buffer.size() >= this.batchSize) { this.nextDate(); return true; } if (buffer.size() > 0 && this.next.isBefore(Instant.now())) { this.nextDate(); return true; } return false; }
@Test public void testByDuration() throws InterruptedException { HashMap<String, String> map = generateHashMap(10); assertFalse(trigger.test(map.values())); Thread.sleep(100L); assertFalse(trigger.test(map.values())); Thread.sleep(500L); assertTrue(trigger.test(map.values())); }
@GetMapping("/catalog") @Secured(action = ActionTypes.READ, signType = SignType.CONFIG) public RestResult<ConfigAdvanceInfo> getConfigAdvanceInfo(@RequestParam("dataId") String dataId, @RequestParam("group") String group, @RequestParam(value = "tenant", required = false, defaultValue = StringUtils.EMPTY) String tenant) { ConfigAdvanceInfo configInfo = configInfoPersistService.findConfigAdvanceInfo(dataId, group, tenant); return RestResultUtils.success(configInfo); }
@Test void testGetConfigAdvanceInfo() throws Exception { ConfigAdvanceInfo configAdvanceInfo = new ConfigAdvanceInfo(); configAdvanceInfo.setCreateIp("localhost"); configAdvanceInfo.setCreateUser("test"); configAdvanceInfo.setDesc("desc"); when(configInfoPersistService.findConfigAdvanceInfo("test", "test", "")).thenReturn(configAdvanceInfo); MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.get(Constants.CONFIG_CONTROLLER_PATH + "/catalog") .param("dataId", "test").param("group", "test").param("tenant", ""); String actualValue = mockmvc.perform(builder).andReturn().getResponse().getContentAsString(); String code = JacksonUtils.toObj(actualValue).get("code").toString(); String data = JacksonUtils.toObj(actualValue).get("data").toString(); ConfigAdvanceInfo resConfigAdvanceInfo = JacksonUtils.toObj(data, ConfigAdvanceInfo.class); assertEquals("200", code); assertEquals(configAdvanceInfo.getCreateIp(), resConfigAdvanceInfo.getCreateIp()); assertEquals(configAdvanceInfo.getCreateUser(), resConfigAdvanceInfo.getCreateUser()); assertEquals(configAdvanceInfo.getDesc(), resConfigAdvanceInfo.getDesc()); }
public FEELFnResult<Boolean> invoke(@ParameterName( "list" ) List list) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } boolean result = false; boolean containsNull = false; // Spec. definition: return true if any item is true, else false if all items are false, else null for ( final Object element : list ) { if (element != null && !(element instanceof Boolean)) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not a Boolean")); } else { if (element != null) { result |= (Boolean) element; } else if (!containsNull) { containsNull = true; } } } if (containsNull && !result) { return FEELFnResult.ofResult( null ); } else { return FEELFnResult.ofResult( result ); } }
@Test void invokeListParamTypeHeterogenousArray() { FunctionTestUtil.assertResultError(anyFunction.invoke(Arrays.asList(Boolean.FALSE, 1)), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(anyFunction.invoke(Arrays.asList(Boolean.TRUE, 1)), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(anyFunction.invoke(Arrays.asList(Boolean.TRUE, null, 1)), InvalidParametersEvent.class); }
public static KafkaPrincipal parseKafkaPrincipal(String str) { if (str == null || str.isEmpty()) { throw new IllegalArgumentException("expected a string in format principalType:principalName but got " + str); } String[] split = str.split(":", 2); if (split.length != 2) { throw new IllegalArgumentException("expected a string in format principalType:principalName but got " + str); } return new KafkaPrincipal(split[0], split[1]); }
@Test public void testPrincipalNameCanContainSeparator() { String name = "name:with:separator:in:it"; KafkaPrincipal principal = SecurityUtils.parseKafkaPrincipal(KafkaPrincipal.USER_TYPE + ":" + name); assertEquals(KafkaPrincipal.USER_TYPE, principal.getPrincipalType()); assertEquals(name, principal.getName()); }
public static <K, V> TypeRef<Map<K, V>> mapOf(Class<K> keyType, Class<V> valueType) { return mapOf(TypeRef.of(keyType), TypeRef.of(valueType)); }
@Test public void testMapOf() { TypeRef<Map<String, Integer>> mapTypeRef = TypeUtils.mapOf(String.class, Integer.class); Assert.assertEquals(mapTypeRef, new TypeRef<Map<String, Integer>>() {}); Assert.assertEquals( TypeUtils.mapOf(HashMap.class, String.class, Integer.class), new TypeRef<HashMap<String, Integer>>() {}); Assert.assertEquals( TypeUtils.hashMapOf(String.class, Integer.class), new TypeRef<HashMap<String, Integer>>() {}); Assert.assertEquals( TypeUtils.mapOf(LinkedHashMap.class, String.class, Integer.class), new TypeRef<LinkedHashMap<String, Integer>>() {}); }
@Override public CRFModel train(SequenceDataset<Label> sequenceExamples, Map<String, Provenance> runProvenance) { if (sequenceExamples.getOutputInfo().getUnknownCount() > 0) { throw new IllegalArgumentException("The supplied Dataset contained unknown Outputs, and this Trainer is supervised."); } // Creates a new RNG, adds one to the invocation count, generates a local optimiser. SplittableRandom localRNG; TrainerProvenance trainerProvenance; StochasticGradientOptimiser localOptimiser; synchronized(this) { localRNG = rng.split(); localOptimiser = optimiser.copy(); trainerProvenance = getProvenance(); trainInvocationCounter++; } ImmutableOutputInfo<Label> labelIDMap = sequenceExamples.getOutputIDInfo(); ImmutableFeatureMap featureIDMap = sequenceExamples.getFeatureIDMap(); SGDVector[][] sgdFeatures = new SGDVector[sequenceExamples.size()][]; int[][] sgdLabels = new int[sequenceExamples.size()][]; double[] weights = new double[sequenceExamples.size()]; int n = 0; for (SequenceExample<Label> example : sequenceExamples) { weights[n] = example.getWeight(); Pair<int[],SGDVector[]> pair = CRFModel.convertToVector(example,featureIDMap,labelIDMap); sgdFeatures[n] = pair.getB(); sgdLabels[n] = pair.getA(); n++; } logger.info(String.format("Training SGD CRF with %d examples", n)); CRFParameters crfParameters = new CRFParameters(featureIDMap.size(),labelIDMap.size()); localOptimiser.initialise(crfParameters); double loss = 0.0; int iteration = 0; for (int i = 0; i < epochs; i++) { if (shuffle) { Util.shuffleInPlace(sgdFeatures, sgdLabels, weights, localRNG); } if (minibatchSize == 1) { /* * Special case a minibatch of size 1. Directly updates the parameters after each * example rather than aggregating. */ for (int j = 0; j < sgdFeatures.length; j++) { Pair<Double,Tensor[]> output = crfParameters.valueAndGradient(sgdFeatures[j],sgdLabels[j]); loss += output.getA()*weights[j]; //Update the gradient with the current learning rates Tensor[] updates = localOptimiser.step(output.getB(),weights[j]); //Apply the update to the current parameters. crfParameters.update(updates); iteration++; if ((iteration % loggingInterval == 0) && (loggingInterval != -1)) { logger.info("At iteration " + iteration + ", average loss = " + loss/loggingInterval); loss = 0.0; } } } else { Tensor[][] gradients = new Tensor[minibatchSize][]; for (int j = 0; j < sgdFeatures.length; j += minibatchSize) { double tempWeight = 0.0; int curSize = 0; //Aggregate the gradient updates for each example in the minibatch for (int k = j; k < j+minibatchSize && k < sgdFeatures.length; k++) { Pair<Double,Tensor[]> output = crfParameters.valueAndGradient(sgdFeatures[j],sgdLabels[j]); loss += output.getA()*weights[k]; tempWeight += weights[k]; gradients[k-j] = output.getB(); curSize++; } //Merge the values into a single gradient update Tensor[] updates = crfParameters.merge(gradients,curSize); for (Tensor update : updates) { update.scaleInPlace(minibatchSize); } tempWeight /= minibatchSize; //Update the gradient with the current learning rates updates = localOptimiser.step(updates,tempWeight); //Apply the gradient. crfParameters.update(updates); iteration++; if ((loggingInterval != -1) && (iteration % loggingInterval == 0)) { logger.info("At iteration " + iteration + ", average loss = " + loss/loggingInterval); loss = 0.0; } } } } localOptimiser.finalise(); //public CRFModel(String name, String description, ImmutableInfoMap featureIDMap, ImmutableInfoMap outputIDInfo, CRFParameters parameters) { ModelProvenance provenance = new ModelProvenance(CRFModel.class.getName(),OffsetDateTime.now(),sequenceExamples.getProvenance(),trainerProvenance,runProvenance); CRFModel model = new CRFModel("crf-sgd-model",provenance,featureIDMap,labelIDMap,crfParameters); localOptimiser.reset(); return model; }
@Test public void testOtherInvalidExample() { assertThrows(IllegalArgumentException.class, () -> { SequenceDataset<Label> p = SequenceDataGenerator.generateGorillaDataset(5); SequenceModel<Label> m = t.train(p); m.predict(SequenceDataGenerator.generateOtherInvalidExample()); }); }
@Override public String updateUserAvatar(Long id, InputStream avatarFile) { validateUserExists(id); // 存储文件 String avatar = fileApi.createFile(IoUtil.readBytes(avatarFile)); // 更新路径 AdminUserDO sysUserDO = new AdminUserDO(); sysUserDO.setId(id); sysUserDO.setAvatar(avatar); userMapper.updateById(sysUserDO); return avatar; }
@Test public void testUpdateUserAvatar_success() throws Exception { // mock 数据 AdminUserDO dbUser = randomAdminUserDO(); userMapper.insert(dbUser); // 准备参数 Long userId = dbUser.getId(); byte[] avatarFileBytes = randomBytes(10); ByteArrayInputStream avatarFile = new ByteArrayInputStream(avatarFileBytes); // mock 方法 String avatar = randomString(); when(fileApi.createFile(eq( avatarFileBytes))).thenReturn(avatar); // 调用 userService.updateUserAvatar(userId, avatarFile); // 断言 AdminUserDO user = userMapper.selectById(userId); assertEquals(avatar, user.getAvatar()); }
public void update(String namespaceName, String extensionName) throws InterruptedException { if(BuiltInExtensionUtil.isBuiltIn(namespaceName)) { LOGGER.debug("SKIP BUILT-IN EXTENSION {}", NamingUtil.toExtensionId(namespaceName, extensionName)); return; } var extension = repositories.findPublicId(namespaceName, extensionName); var extensionUpdates = new HashMap<Long, String>(); updateExtensionPublicId(extension, extensionUpdates, false); if(!extensionUpdates.isEmpty()) { repositories.updateExtensionPublicIds(extensionUpdates); } var namespaceUpdates = new HashMap<Long, String>(); updateNamespacePublicId(extension, namespaceUpdates, false); if(!namespaceUpdates.isEmpty()) { repositories.updateNamespacePublicIds(namespaceUpdates); } }
@Test public void testMustUpdateRandomExists() throws InterruptedException { var namespaceName1 = "foo"; var namespacePublicId1 = UUID.randomUUID().toString(); var extensionName1 = "bar"; var extensionPublicId1 = UUID.randomUUID().toString(); var namespace1 = new Namespace(); namespace1.setId(1L); namespace1.setName(namespaceName1); var extension1 = new Extension(); extension1.setId(2L); extension1.setName(extensionName1); extension1.setNamespace(namespace1); var namespaceName2 = "baz"; var namespacePublicId2 = UUID.randomUUID().toString(); var extensionName2 = "foobar"; var extensionPublicId2 = UUID.randomUUID().toString(); var namespace2 = new Namespace(); namespace2.setId(3L); namespace2.setName(namespaceName2); namespace2.setPublicId(namespacePublicId1); var extension2 = new Extension(); extension2.setId(4L); extension2.setName(extensionName2); extension2.setPublicId(extensionPublicId1); extension2.setNamespace(namespace2); Mockito.when(repositories.findPublicId(namespaceName1, extensionName1)).thenReturn(extension1); Mockito.when(repositories.findPublicId(extensionPublicId1)).thenReturn(extension2); Mockito.when(repositories.findNamespacePublicId(namespacePublicId1)).thenReturn(extension2); var upstreamPublicIds = new PublicIds(namespacePublicId1, extensionPublicId1); Mockito.when(idService.getUpstreamPublicIds(extension1)).thenReturn(upstreamPublicIds); Mockito.when(idService.getUpstreamPublicIds(extension2)).thenReturn(upstreamPublicIds); Mockito.when(idService.getRandomPublicId()).thenReturn(extensionPublicId1, extensionPublicId2, namespacePublicId1, namespacePublicId2); updateService.update(namespaceName1, extensionName1); Mockito.verify(repositories).updateExtensionPublicIds(Mockito.argThat((Map<Long, String> map) -> { return map.size() == 2 && map.get(extension1.getId()).equals(extensionPublicId1) && map.get(extension2.getId()).equals(extensionPublicId2); })); Mockito.verify(repositories).updateNamespacePublicIds(Mockito.argThat((Map<Long, String> map) -> { return map.size() == 2 && map.get(namespace1.getId()).equals(namespacePublicId1) && map.get(namespace2.getId()).equals(namespacePublicId2); })); }
private static List<Types.NestedField> addFields( List<Types.NestedField> fields, Collection<Types.NestedField> adds) { List<Types.NestedField> newFields = Lists.newArrayList(fields); newFields.addAll(adds); return newFields; }
@Test public void testAddFields() { Schema expected = new Schema( required(1, "id", Types.IntegerType.get()), optional(2, "data", Types.StringType.get()), optional( 3, "preferences", Types.StructType.of( required(8, "feature1", Types.BooleanType.get()), optional(9, "feature2", Types.BooleanType.get())), "struct of named boolean options"), required( 4, "locations", Types.MapType.ofRequired( 10, 11, Types.StructType.of( required(20, "address", Types.StringType.get()), required(21, "city", Types.StringType.get()), required(22, "state", Types.StringType.get()), required(23, "zip", Types.IntegerType.get())), Types.StructType.of( required(12, "lat", Types.FloatType.get()), required(13, "long", Types.FloatType.get()), optional(25, "alt", Types.FloatType.get()))), "map of address to coordinate"), optional( 5, "points", Types.ListType.ofOptional( 14, Types.StructType.of( required(15, "x", Types.LongType.get()), required(16, "y", Types.LongType.get()), optional(26, "z", Types.LongType.get()), optional(27, "t.t", Types.LongType.get()))), "2-D cartesian points"), required(6, "doubles", Types.ListType.ofRequired(17, Types.DoubleType.get())), optional( 7, "properties", Types.MapType.ofOptional(18, 19, Types.StringType.get(), Types.StringType.get()), "string map of properties"), optional(24, "toplevel", Types.DecimalType.of(9, 2))); Schema added = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID) .addColumn("toplevel", Types.DecimalType.of(9, 2)) .addColumn("locations", "alt", Types.FloatType.get()) // map of structs .addColumn("points", "z", Types.LongType.get()) // list of structs .addColumn("points", "t.t", Types.LongType.get()) // name with '.' .apply(); assertThat(added.asStruct()).isEqualTo(expected.asStruct()); }
public static List<Tab> getTabsFromJson(@Nullable final String tabsJson) throws InvalidJsonException { if (tabsJson == null || tabsJson.isEmpty()) { return getDefaultTabs(); } final List<Tab> returnTabs = new ArrayList<>(); final JsonObject outerJsonObject; try { outerJsonObject = JsonParser.object().from(tabsJson); if (!outerJsonObject.has(JSON_TABS_ARRAY_KEY)) { throw new InvalidJsonException("JSON doesn't contain \"" + JSON_TABS_ARRAY_KEY + "\" array"); } final JsonArray tabsArray = outerJsonObject.getArray(JSON_TABS_ARRAY_KEY); for (final Object o : tabsArray) { if (!(o instanceof JsonObject)) { continue; } final Tab tab = Tab.from((JsonObject) o); if (tab != null) { returnTabs.add(tab); } } } catch (final JsonParserException e) { throw new InvalidJsonException(e); } if (returnTabs.isEmpty()) { return getDefaultTabs(); } return returnTabs; }
@Test public void testInvalidRead() { final List<String> invalidList = Arrays.asList( "{\"notTabsArray\":[]}", "{invalidJSON]}", "{}" ); for (final String invalidContent : invalidList) { try { TabsJsonHelper.getTabsFromJson(invalidContent); fail("didn't throw exception"); } catch (final Exception e) { final boolean isExpectedException = e instanceof TabsJsonHelper.InvalidJsonException; assertTrue("\"" + e.getClass().getSimpleName() + "\" is not the expected exception", isExpectedException); } } }
public static String getString(Document toRead) throws TransformerException { DOMSource domSource = new DOMSource(toRead); TransformerFactory factory = TransformerFactory.newInstance(); factory.setAttribute(XMLConstants.ACCESS_EXTERNAL_DTD, ""); factory.setAttribute(XMLConstants.ACCESS_EXTERNAL_STYLESHEET, ""); factory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); Transformer transformer = factory.newTransformer(); transformer.setOutputProperty(OutputKeys.INDENT, "yes"); transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2"); StringWriter sw = new StringWriter(); StreamResult sr = new StreamResult(sw); transformer.transform(domSource, sr); return sw.toString(); }
@Test public void getString() throws Exception { DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); DocumentBuilder builder = dbf.newDocumentBuilder(); Document document = builder.newDocument(); document.appendChild(document.createElement("CREATED")); String retrieved = DOMParserUtil.getString(document); assertThat(retrieved).isNotNull().contains("CREATED"); }