focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void visitField(QueryVisitorFieldEnvironment queryVisitorFieldEnvironment) { // Each new property should put as required and we should not allow additional properties. ArrayNode required = getRequiredArrayNode(); required.add(queryVisitorFieldEnvironment.getFieldDefinition().getName()); // Even if type is marked as optional in the GraphQL Schema, it must be present and // serialized as null into the Json response. We have to unwrap it first. GraphQLOutputType outputType = queryVisitorFieldEnvironment.getFieldDefinition().getType(); Type definitionType = queryVisitorFieldEnvironment.getFieldDefinition().getDefinition().getType(); if (TypeUtil.isNonNull(definitionType)) { definitionType = TypeUtil.unwrapOne(definitionType); } // Add this field to current node. ObjectNode fieldNode = currentNode.putObject(queryVisitorFieldEnvironment.getFieldDefinition().getName()); TypeInfo definitionTypeInfo = TypeInfo.typeInfo(definitionType); // Treat most common case first: we've got a scalar property. if (ScalarInfo.isGraphqlSpecifiedScalar(definitionTypeInfo.getName())) { fieldNode.put(JSON_SCHEMA_TYPE, getJsonScalarType(definitionTypeInfo.getName())); } else if (outputType instanceof GraphQLObjectType) { // Then we deal with objects. fieldNode.put(JSON_SCHEMA_TYPE, JSON_SCHEMA_OBJECT_TYPE); ObjectNode properties = fieldNode.putObject(JSON_SCHEMA_PROPERTIES); parentNode.put(JSON_SCHEMA_ADDITIONAL_PROPERTIES, false); fieldNode.put(JSON_SCHEMA_ADDITIONAL_PROPERTIES, false); parentNode = fieldNode; currentNode = properties; } else if (TypeUtil.isList(definitionType)) { // Then we deal with lists. fieldNode.put(JSON_SCHEMA_TYPE, JSON_SCHEMA_ARRAY_TYPE); ObjectNode items = fieldNode.putObject(JSON_SCHEMA_ITEMS); // Depending on item type, we should initialize an object structure. TypeName itemTypeInfo = TypeUtil.unwrapAll(definitionType); if (!ScalarInfo.isGraphqlSpecifiedScalar(itemTypeInfo.getName())) { items.put(JSON_SCHEMA_TYPE, JSON_SCHEMA_OBJECT_TYPE); ObjectNode properties = items.putObject(JSON_SCHEMA_PROPERTIES); items.put(JSON_SCHEMA_ADDITIONAL_PROPERTIES, false); parentNode = items; currentNode = properties; } } else if (outputType instanceof GraphQLEnumType enumType) { // Then we deal with enumerations. fieldNode.put(JSON_SCHEMA_TYPE, JSON_SCHEMA_STRING_TYPE); ArrayNode enumNode = fieldNode.putArray(JSON_SCHEMA_ENUM); for (GraphQLEnumValueDefinition valDef : enumType.getValues()) { enumNode.add(valDef.getName()); } } }
@Test public void testVisitFieldWithObjectType() { QueryVisitorFieldEnvironment environment = mock(QueryVisitorFieldEnvironment.class); GraphQLOutputType outputType = mock(GraphQLObjectType.class); TypeName definitionType = TypeName.newTypeName().name("Object").build(); when(environment.getFieldDefinition()).thenReturn(mock(GraphQLFieldDefinition.class)); when(environment.getFieldDefinition().getDefinition()).thenReturn(mock(FieldDefinition.class)); when(environment.getFieldDefinition().getType()).thenReturn(outputType); when(environment.getFieldDefinition().getDefinition().getType()).thenReturn(definitionType); when(environment.getFieldDefinition().getName()).thenReturn("objectField"); visitor.visitField(environment); JsonNode fieldNode = jsonSchemaData.get(JsonSchemaBuilderQueryVisitor.JSON_SCHEMA_PROPERTIES).get("objectField"); assertEquals("object", fieldNode.get(JsonSchemaBuilderQueryVisitor.JSON_SCHEMA_TYPE).asText()); assertFalse(fieldNode.get(JsonSchemaBuilderQueryVisitor.JSON_SCHEMA_ADDITIONAL_PROPERTIES).asBoolean()); }
@Override public int read() throws IOException { if (!ensureDataInBuffer()) { return -1; } return buf[position++] & BYTE_MASK; }
@Test public void readByteByByte() throws Exception { for (byte b : mockInput) { assertEquals(b, (byte) in.read()); } assertEquals(-1, in.read()); }
public static String replaceFirst(String source, String search, String replace) { int start = source.indexOf(search); int len = search.length(); if (start == -1) { return source; } if (start == 0) { return replace + source.substring(len); } return source.substring(0, start) + replace + source.substring(start + len); }
@Test public void testReplace6() { assertEquals("abcdef", JOrphanUtils.replaceFirst("abcdef", "alt=\"\" ", "")); }
public void stopIt() { super.interrupt(); }
@Test public void stopIt_interrupts_worker() { doAnswer(invocationOnMock -> { await().atMost(10, TimeUnit.SECONDS).until(() -> false); return null; }).when(monitored).hardStop(); // max stop timeout is 100 milliseconds AbstractStopperThread stopper = new AbstractStopperThread("theThreadName", () -> monitored.hardStop(), 5000L){}; stopper.start(); verify(monitored, timeout(3_000)).hardStop(); stopper.stopIt(); int timeout = 10; try { await() .atMost(timeout, TimeUnit.SECONDS) .until(() -> !stopper.isAlive()); } catch (ConditionTimeoutException conditionTimeoutException) { fail(String.format("Thread was still alive after %d seconds.", timeout)); } }
public static boolean isProperClass(Class<?> clazz) { int mods = clazz.getModifiers(); return !(Modifier.isAbstract(mods) || Modifier.isInterface(mods) || Modifier.isNative(mods)); }
@Test void testClassIsProper() { assertThat(InstantiationUtil.isProperClass(StringValue.class)).isTrue(); }
public static Impl join(By clause) { return new Impl(new JoinArguments(clause)); }
@Test @Category(NeedsRunner.class) public void testCoGroupByFieldNames() { // Input PCollection<Row> pc1 = pipeline .apply( "Create1", Create.of( Row.withSchema(CG_SCHEMA_1).addValues("user1", 1, "us").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 2, "us").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 3, "il").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 4, "il").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 5, "fr").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 6, "fr").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 7, "ar").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 8, "ar").build())) .setRowSchema(CG_SCHEMA_1); PCollection<Row> pc2 = pipeline .apply( "Create2", Create.of( Row.withSchema(CG_SCHEMA_1).addValues("user1", 9, "us").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 10, "us").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 11, "il").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 12, "il").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 13, "fr").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 14, "fr").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 15, "ar").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 16, "ar").build())) .setRowSchema(CG_SCHEMA_1); PCollection<Row> pc3 = pipeline .apply( "Create3", Create.of( Row.withSchema(CG_SCHEMA_1).addValues("user1", 17, "us").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 18, "us").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 19, "il").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 20, "il").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 21, "fr").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 22, "fr").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 23, "ar").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 24, "ar").build())) .setRowSchema(CG_SCHEMA_1); // Output Schema expectedSchema = Schema.builder() .addRowField("key", SIMPLE_CG_KEY_SCHEMA) .addIterableField("pc1", FieldType.row(CG_SCHEMA_1)) .addIterableField("pc2", FieldType.row(CG_SCHEMA_1)) .addIterableField("pc3", FieldType.row(CG_SCHEMA_1)) .build(); Row key1Joined = Row.withSchema(expectedSchema) .addValue(Row.withSchema(SIMPLE_CG_KEY_SCHEMA).addValues("user1", "us").build()) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user1", 1, "us").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 2, "us").build())) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user1", 9, "us").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 10, "us").build())) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user1", 17, "us").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 18, "us").build())) .build(); Row key2Joined = Row.withSchema(expectedSchema) .addValue(Row.withSchema(SIMPLE_CG_KEY_SCHEMA).addValues("user1", "il").build()) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user1", 3, "il").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 4, "il").build())) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user1", 11, "il").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 12, "il").build())) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user1", 19, "il").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 20, "il").build())) .build(); Row key3Joined = Row.withSchema(expectedSchema) .addValue(Row.withSchema(SIMPLE_CG_KEY_SCHEMA).addValues("user2", "fr").build()) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user2", 5, "fr").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 6, "fr").build())) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user2", 13, "fr").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 14, "fr").build())) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user2", 21, "fr").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 22, "fr").build())) .build(); Row key4Joined = Row.withSchema(expectedSchema) .addValue(Row.withSchema(SIMPLE_CG_KEY_SCHEMA).addValues("user2", "ar").build()) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user2", 7, "ar").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 8, "ar").build())) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user2", 15, "ar").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 16, "ar").build())) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user2", 23, "ar").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 24, "ar").build())) .build(); PCollection<Row> joined = PCollectionTuple.of("pc1", pc1, "pc2", pc2, "pc3", pc3) .apply("CoGroup", CoGroup.join(By.fieldNames("user", "country"))); List<Row> expected = ImmutableList.of(key1Joined, key2Joined, key3Joined, key4Joined); PAssert.that(joined).satisfies(actual -> containsJoinedFields(expected, actual)); pipeline.run(); }
public static QueryablePipeline forTransforms( Collection<String> transformIds, Components components) { return new QueryablePipeline(transformIds, components); }
@Test public void forTransformsWithMalformedGraph() { Components components = Components.newBuilder() .putTransforms( "root", PTransform.newBuilder().putOutputs("output", "output.out").build()) .putPcollections( "output.out", RunnerApi.PCollection.newBuilder().setUniqueName("output.out").build()) .putTransforms( "consumer", PTransform.newBuilder().putInputs("input", "output.out").build()) .build(); thrown.expect(IllegalArgumentException.class); // Consumer consumes a PCollection which isn't produced. QueryablePipeline.forTransforms(ImmutableSet.of("consumer"), components); }
@Override public void setSystemVersion(String version) { throw new IllegalAccessError(PLUGIN_PREFIX + currentPluginId + " tried to execute setSystemVersion!"); }
@Test public void setSystemVersion() { assertThrows(IllegalAccessError.class, () -> wrappedPluginManager.setSystemVersion("1.0.0")); }
@Override public void doFilter(HttpRequest request, HttpResponse response, FilterChain chain) throws IOException { if (userSession.hasSession() && userSession.isLoggedIn() && userSession.shouldResetPassword()) { redirectTo(response, request.getContextPath() + RESET_PASSWORD_PATH); } chain.doFilter(request, response); }
@Test public void redirect_if_reset_password_set_and_web_context_configured() throws Exception { when(request.getContextPath()).thenReturn("/sonarqube"); underTest.doFilter(request, response, chain); verify(response).sendRedirect("/sonarqube/account/reset_password"); }
public static String split(String code, int maxMethodLength, int maxClassMemberCount) { try { return splitImpl(code, maxMethodLength, maxClassMemberCount); } catch (Throwable t) { throw new RuntimeException( "JavaCodeSplitter failed. This is a bug. Please file an issue.", t); } }
@Test public void testEmptyCode() { assertThatThrownBy(() -> JavaCodeSplitter.split("", 4000, 10000)) .cause() .hasMessage("code cannot be empty"); }
@Override public boolean exists(final LinkOption... options) { NSURL resolved = null; try { resolved = this.lock(false); if(null == resolved) { return super.exists(options); } return Files.exists(Paths.get(resolved.path())); } catch(AccessDeniedException e) { return super.exists(options); } finally { this.release(resolved); } }
@Test public void testMkdir() throws Exception { FinderLocal l = new FinderLocal(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); new DefaultLocalDirectoryFeature().mkdir(l); assertTrue(l.exists()); new DefaultLocalDirectoryFeature().mkdir(l); assertTrue(l.exists()); l.delete(); }
@Override public KeyGroupRangeInputSplit[] createInputSplits(int minNumSplits) throws IOException { final int maxParallelism = operatorState.getMaxParallelism(); final List<KeyGroupRange> keyGroups = sortedKeyGroupRanges(minNumSplits, maxParallelism); return CollectionUtil.mapWithIndex( keyGroups, (keyGroupRange, index) -> createKeyGroupRangeInputSplit( operatorState, maxParallelism, keyGroupRange, index)) .toArray(KeyGroupRangeInputSplit[]::new); }
@Test public void testCreatePartitionedInputSplits() throws Exception { OperatorID operatorID = OperatorIDGenerator.fromUid("uid"); OperatorSubtaskState state = createOperatorSubtaskState(new StreamFlatMap<>(new StatefulFunction())); OperatorState operatorState = new OperatorState(operatorID, 1, 128); operatorState.putState(0, state); KeyedStateInputFormat<?, ?, ?> format = new KeyedStateInputFormat<>( operatorState, new MemoryStateBackend(), new Configuration(), new KeyedStateReaderOperator<>(new ReaderFunction(), Types.INT), new ExecutionConfig()); KeyGroupRangeInputSplit[] splits = format.createInputSplits(4); Assert.assertEquals( "Failed to properly partition operator state into input splits", 4, splits.length); }
@Override public Boolean clearExpire(K key) { return get(clearExpireAsync(key)); }
@Test public void testClearExpire() throws InterruptedException { RMapCacheNative<String, String> cache = redisson.getMapCacheNative("simple"); cache.put("0", "8", Duration.ofSeconds(1)); cache.expireAt(System.currentTimeMillis() + 100); cache.clearExpire(); Thread.sleep(500); Assertions.assertEquals(1, cache.size()); cache.destroy(); }
public static Projection of(final Collection<? extends SelectItem> selectItems) { return new Projection(selectItems); }
@Test(expected = UnsupportedOperationException.class) public void shouldThrowOnUnsupportedColumnType() { Projection.of(ImmutableList.of(mock(SelectItem.class))); }
@Override public DefaultAlarm buildWalkFailedAlarm(DeviceId deviceId) { long timeRaised = System.currentTimeMillis(); return new DefaultAlarm.Builder( AlarmId.alarmId(deviceId, Long.toString(timeRaised)), deviceId, "SNMP alarm retrieval failed", Alarm.SeverityLevel.CRITICAL, timeRaised).build(); }
@Test public void walkFailedAlarm() { assertEquals("Alarms should be equals", alarm, snmpController.buildWalkFailedAlarm(device.deviceId())); }
@Override public LogicalSchema getSchema() { return getSource().getSchema(); }
@Test public void shouldThrowMultiKeyExpressionsThatDontCoverAllKeys() { // Given: when(source.getSchema()).thenReturn(MULTI_KEY_SCHEMA); final Expression expression = new ComparisonExpression( Type.EQUAL, new UnqualifiedColumnReferenceExp(ColumnName.of("K1")), new IntegerLiteral(1) ); // When: final KsqlException e = assertThrows( KsqlException.class, () -> new QueryFilterNode( NODE_ID, source, expression, metaStore, ksqlConfig, false, plannerOptions)); // Then: assertThat(e.getMessage(), containsString( "Multi-column sources must specify every key in the WHERE clause. " + "Specified: [`K1`] Expected: [`K1` INTEGER KEY, `K2` INTEGER KEY]")); }
public static List<String> getStackFrameList(final Throwable t, int maxDepth) { final String stackTrace = getStackTrace(t); final String linebreak = System.lineSeparator(); final StringTokenizer frames = new StringTokenizer(stackTrace, linebreak); final List<String> list = new ArrayList<>(); for (int i = 0; i < maxDepth && frames.hasMoreTokens(); i++) { list.add(frames.nextToken()); } return list; }
@Test void getStackFrameList() { List<String> stackFrameList = ExceptionUtils.getStackFrameList(exception, 10); Assertions.assertEquals(10, stackFrameList.size()); }
public static <InputT, OutputT> FlatMapElements<InputT, OutputT> via( InferableFunction<? super InputT, ? extends Iterable<OutputT>> fn) { TypeDescriptor<OutputT> outputType = TypeDescriptors.extractFromTypeParameters( (TypeDescriptor<Iterable<OutputT>>) fn.getOutputTypeDescriptor(), Iterable.class, new TypeDescriptors.TypeVariableExtractor<Iterable<OutputT>, OutputT>() {}); TypeDescriptor<InputT> inputType = (TypeDescriptor<InputT>) fn.getInputTypeDescriptor(); return new FlatMapElements<>(fn, inputType, outputType); }
@Test @Category(NeedsRunner.class) public void testFlatMapSimpleFunction() throws Exception { PCollection<Integer> output = pipeline .apply(Create.of(1, 2, 3)) // Note that FlatMapElements takes an InferableFunction<InputT, ? extends // Iterable<OutputT>> // so the use of List<Integer> here (as opposed to Iterable<Integer>) deliberately // exercises // the use of an upper bound. .apply( FlatMapElements.via( new SimpleFunction<Integer, List<Integer>>() { @Override public List<Integer> apply(Integer input) { return ImmutableList.of(-input, input); } })); PAssert.that(output).containsInAnyOrder(1, -2, -1, -3, 2, 3); pipeline.run(); }
@VisibleForTesting public void validateDictDataExists(Long id) { if (id == null) { return; } DictDataDO dictData = dictDataMapper.selectById(id); if (dictData == null) { throw exception(DICT_DATA_NOT_EXISTS); } }
@Test public void testValidateDictDataExists_success() { // mock 数据 DictDataDO dbDictData = randomDictDataDO(); dictDataMapper.insert(dbDictData);// @Sql: 先插入出一条存在的数据 // 调用成功 dictDataService.validateDictDataExists(dbDictData.getId()); }
@Override public MetricResults metrics() { if (terminalMetrics != null) { return terminalMetrics; } return delegate.metrics(); }
@Test public void givenNotTerminated_reportsMetrics() { PipelineResult delegate = mock(PipelineResult.class); when(delegate.metrics()).thenReturn(mock(MetricResults.class)); PrismPipelineResult underTest = new PrismPipelineResult(delegate, exec::stop); assertThat(underTest.metrics()).isNotNull(); exec.stop(); }
@Override public boolean isValidHeader(final int readableBytes) { return readableBytes >= (startupPhase ? 0 : MESSAGE_TYPE_LENGTH) + PAYLOAD_LENGTH; }
@Test void assertIsValidHeader() { assertTrue(new PostgreSQLPacketCodecEngine().isValidHeader(50)); }
public Trade operate(int index) { return operate(index, NaN, NaN); }
@Test public void testEqualsForEntryOrders() { Position trLeft = newPosition; Position trRightEquals = new Position(); Position trRightNotEquals = new Position(); assertEquals(TradeType.BUY, trRightNotEquals.operate(2).getType()); assertNotEquals(trLeft, trRightNotEquals); assertEquals(TradeType.BUY, trLeft.operate(1).getType()); assertEquals(TradeType.BUY, trRightEquals.operate(1).getType()); assertEquals(trLeft, trRightEquals); assertNotEquals(trLeft, trRightNotEquals); }
@Override public Set<Interface> getInterfacesByVlan(VlanId vlan) { return interfaces.values() .stream() .flatMap(Collection::stream) .filter(intf -> intf.vlan().equals(vlan)) .collect(collectingAndThen(toSet(), ImmutableSet::copyOf)); }
@Test public void testGetInterfacesByVlan() throws Exception { VlanId vlanId = VlanId.vlanId((short) 1); Set<Interface> byVlan = Collections.singleton(createInterface(1)); assertEquals(byVlan, interfaceManager.getInterfacesByVlan(vlanId)); }
protected boolean isNodeEmpty(JsonNode json) { if (json.isArray()) { return isListEmpty((ArrayNode) json); } else if (json.isObject()) { return isObjectEmpty((ObjectNode) json); } else { return isEmptyText(json); } }
@Test public void isNodeEmpty_objectNodeWithArrayNode() { ObjectNode objectNode = new ObjectNode(factory); objectNode.set("empty array", new ArrayNode(factory)); assertThat(expressionEvaluator.isNodeEmpty(objectNode)).isTrue(); }
public static ColumnIndex build( PrimitiveType type, BoundaryOrder boundaryOrder, List<Boolean> nullPages, List<Long> nullCounts, List<ByteBuffer> minValues, List<ByteBuffer> maxValues) { return build(type, boundaryOrder, nullPages, nullCounts, minValues, maxValues, null, null); }
@Test public void testStaticBuildFloat() { ColumnIndex columnIndex = ColumnIndexBuilder.build( Types.required(FLOAT).named("test_float"), BoundaryOrder.ASCENDING, asList(true, true, true, false, false, false), asList(9l, 8l, 7l, 6l, 0l, 0l), toBBList(null, null, null, -3.0f, -2.0f, 0.1f), toBBList(null, null, null, -2.0f, 0.0f, 6.0f)); assertEquals(BoundaryOrder.ASCENDING, columnIndex.getBoundaryOrder()); assertCorrectNullCounts(columnIndex, 9, 8, 7, 6, 0, 0); assertCorrectNullPages(columnIndex, true, true, true, false, false, false); assertCorrectValues(columnIndex.getMaxValues(), null, null, null, -2.0f, 0.0f, 6.0f); assertCorrectValues(columnIndex.getMinValues(), null, null, null, -3.0f, -2.0f, 0.1f); }
@Override public ConfigInfoTagWrapper findConfigInfo4Tag(final String dataId, final String group, final String tenant, final String tag) { String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant; String tagTmp = StringUtils.isBlank(tag) ? StringUtils.EMPTY : tag.trim(); try { ConfigInfoTagMapper configInfoTagMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO_TAG); return this.jt.queryForObject(configInfoTagMapper.select( Arrays.asList("id", "data_id", "group_id", "tenant_id", "tag_id", "app_name", "content", "gmt_modified"), Arrays.asList("data_id", "group_id", "tenant_id", "tag_id")), new Object[] {dataId, group, tenantTmp, tagTmp}, CONFIG_INFO_TAG_WRAPPER_ROW_MAPPER); } catch (EmptyResultDataAccessException e) { // Indicates that the data does not exist, returns null. return null; } catch (CannotGetJdbcConnectionException e) { LogUtil.FATAL_LOG.error("[db-error] " + e, e); throw e; } }
@Test void testFindConfigInfo4Tag() { String dataId = "dataId1112222"; String group = "group22"; String tenant = "tenant2"; String tag = "tag123345"; //mock query tag return obj ConfigInfoTagWrapper configInfoTagWrapperMocked = new ConfigInfoTagWrapper(); configInfoTagWrapperMocked.setLastModified(System.currentTimeMillis()); Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant, tag}), eq(CONFIG_INFO_TAG_WRAPPER_ROW_MAPPER))).thenReturn(configInfoTagWrapperMocked); ConfigInfoTagWrapper configInfo4TagReturn = externalConfigInfoTagPersistService.findConfigInfo4Tag(dataId, group, tenant, tag); assertEquals(configInfoTagWrapperMocked, configInfo4TagReturn); //mock query tag throw EmptyResultDataAccessException Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant, tag}), eq(CONFIG_INFO_TAG_WRAPPER_ROW_MAPPER))).thenThrow(new EmptyResultDataAccessException(1)); ConfigInfoTagWrapper configInfo4Tag = externalConfigInfoTagPersistService.findConfigInfo4Tag(dataId, group, tenant, tag); assertNull(configInfo4Tag); //mock query tag throw CannotGetJdbcConnectionException Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant, tag}), eq(CONFIG_INFO_TAG_WRAPPER_ROW_MAPPER))).thenThrow(new CannotGetJdbcConnectionException("con error")); try { externalConfigInfoTagPersistService.findConfigInfo4Tag(dataId, group, tenant, tag); assertTrue(false); } catch (Exception e) { assertEquals("con error", e.getMessage()); } }
public static Duration parseDuration(String text) { checkNotNull(text); final String trimmed = text.trim(); checkArgument(!trimmed.isEmpty(), "argument is an empty- or whitespace-only string"); final int len = trimmed.length(); int pos = 0; char current; while (pos < len && (current = trimmed.charAt(pos)) >= '0' && current <= '9') { pos++; } final String number = trimmed.substring(0, pos); final String unitLabel = trimmed.substring(pos).trim().toLowerCase(Locale.US); if (number.isEmpty()) { throw new NumberFormatException("text does not start with a number"); } final BigInteger value; try { value = new BigInteger(number); // this throws a NumberFormatException } catch (NumberFormatException e) { throw new IllegalArgumentException( "The value '" + number + "' cannot be represented as an integer number.", e); } final ChronoUnit unit; if (unitLabel.isEmpty()) { unit = ChronoUnit.MILLIS; } else { unit = LABEL_TO_UNIT_MAP.get(unitLabel); } if (unit == null) { throw new IllegalArgumentException( "Time interval unit label '" + unitLabel + "' does not match any of the recognized units: " + TimeUnit.getAllUnits()); } try { return convertBigIntToDuration(value, unit); } catch (ArithmeticException e) { throw new IllegalArgumentException( "The value '" + number + "' cannot be represented as java.time.Duration (numeric overflow).", e); } }
@Test void testParseDurationNumberOverflow() { assertThatThrownBy(() -> TimeUtils.parseDuration("100000000000000000000000000000000 ms")) .isInstanceOf(IllegalArgumentException.class); }
@Override public void checkBeforeUpdate(final LoadSingleTableStatement sqlStatement) { checkStorageUnits(sqlStatement); String defaultSchemaName = new DatabaseTypeRegistry(database.getProtocolType()).getDefaultSchemaName(database.getName()); checkDuplicatedTables(sqlStatement, defaultSchemaName); checkActualTableExist(sqlStatement, defaultSchemaName); }
@Test void assertCheckWithEmptyStorageUnits() { when(database.getName()).thenReturn("foo_db"); when(database.getResourceMetaData().getStorageUnits().isEmpty()).thenReturn(true); executor.setDatabase(database); LoadSingleTableStatement sqlStatement = new LoadSingleTableStatement(Collections.singleton(new SingleTableSegment("*", null, "*"))); assertThrows(EmptyStorageUnitException.class, () -> executor.checkBeforeUpdate(sqlStatement)); }
public ShareFetchContext newContext(String groupId, Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData, List<TopicIdPartition> toForget, ShareRequestMetadata reqMetadata, Boolean isAcknowledgeDataPresent) { ShareFetchContext context; // TopicPartition with maxBytes as 0 should not be added in the cachedPartitions Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchDataWithMaxBytes = new HashMap<>(); shareFetchData.forEach((tp, sharePartitionData) -> { if (sharePartitionData.maxBytes > 0) shareFetchDataWithMaxBytes.put(tp, sharePartitionData); }); // If the request's epoch is FINAL_EPOCH or INITIAL_EPOCH, we should remove the existing sessions. Also, start a // new session in case it is INITIAL_EPOCH. Hence, we need to treat them as special cases. if (reqMetadata.isFull()) { ShareSessionKey key = shareSessionKey(groupId, reqMetadata.memberId()); if (reqMetadata.epoch() == ShareRequestMetadata.FINAL_EPOCH) { // If the epoch is FINAL_EPOCH, don't try to create a new session. if (!shareFetchDataWithMaxBytes.isEmpty()) { throw Errors.INVALID_REQUEST.exception(); } if (cache.remove(key) == null) { log.error("Share session error for {}: no such share session found", key); throw Errors.SHARE_SESSION_NOT_FOUND.exception(); } else { log.debug("Removed share session with key " + key); } context = new FinalContext(); } else { if (isAcknowledgeDataPresent) { log.error("Acknowledge data present in Initial Fetch Request for group {} member {}", groupId, reqMetadata.memberId()); throw Errors.INVALID_REQUEST.exception(); } if (cache.remove(key) != null) { log.debug("Removed share session with key {}", key); } ImplicitLinkedHashCollection<CachedSharePartition> cachedSharePartitions = new ImplicitLinkedHashCollection<>(shareFetchDataWithMaxBytes.size()); shareFetchDataWithMaxBytes.forEach((topicIdPartition, reqData) -> cachedSharePartitions.mustAdd(new CachedSharePartition(topicIdPartition, reqData, false))); ShareSessionKey responseShareSessionKey = cache.maybeCreateSession(groupId, reqMetadata.memberId(), time.milliseconds(), cachedSharePartitions); if (responseShareSessionKey == null) { log.error("Could not create a share session for group {} member {}", groupId, reqMetadata.memberId()); throw Errors.SHARE_SESSION_NOT_FOUND.exception(); } context = new ShareSessionContext(reqMetadata, shareFetchDataWithMaxBytes); log.debug("Created a new ShareSessionContext with key {} isSubsequent {} returning {}. A new share " + "session will be started.", responseShareSessionKey, false, partitionsToLogString(shareFetchDataWithMaxBytes.keySet())); } } else { // We update the already existing share session. synchronized (cache) { ShareSessionKey key = shareSessionKey(groupId, reqMetadata.memberId()); ShareSession shareSession = cache.get(key); if (shareSession == null) { log.error("Share session error for {}: no such share session found", key); throw Errors.SHARE_SESSION_NOT_FOUND.exception(); } if (shareSession.epoch != reqMetadata.epoch()) { log.debug("Share session error for {}: expected epoch {}, but got {} instead", key, shareSession.epoch, reqMetadata.epoch()); throw Errors.INVALID_SHARE_SESSION_EPOCH.exception(); } Map<ShareSession.ModifiedTopicIdPartitionType, List<TopicIdPartition>> modifiedTopicIdPartitions = shareSession.update( shareFetchDataWithMaxBytes, toForget); cache.touch(shareSession, time.milliseconds()); shareSession.epoch = ShareRequestMetadata.nextEpoch(shareSession.epoch); log.debug("Created a new ShareSessionContext for session key {}, epoch {}: " + "added {}, updated {}, removed {}", shareSession.key(), shareSession.epoch, partitionsToLogString(modifiedTopicIdPartitions.get( ShareSession.ModifiedTopicIdPartitionType.ADDED)), partitionsToLogString(modifiedTopicIdPartitions.get(ShareSession.ModifiedTopicIdPartitionType.UPDATED)), partitionsToLogString(modifiedTopicIdPartitions.get(ShareSession.ModifiedTopicIdPartitionType.REMOVED)) ); context = new ShareSessionContext(reqMetadata, shareSession); } } return context; }
@Test public void testShareSessionExpiration() { Time time = new MockTime(); ShareSessionCache cache = new ShareSessionCache(2, 1000); SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache).withTime(time).build(); Map<Uuid, String> topicNames = new HashMap<>(); Uuid fooId = Uuid.randomUuid(); topicNames.put(fooId, "foo"); TopicIdPartition foo0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); TopicIdPartition foo1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); // Create a new share session, session 1 Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> session1req = new LinkedHashMap<>(); session1req.put(foo0, new ShareFetchRequest.SharePartitionData(foo0.topicId(), 100)); session1req.put(foo1, new ShareFetchRequest.SharePartitionData(foo1.topicId(), 100)); String groupId = "grp"; ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext session1context = sharePartitionManager.newContext(groupId, session1req, EMPTY_PART_LIST, reqMetadata1, false); assertEquals(session1context.getClass(), ShareSessionContext.class); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData1 = new LinkedHashMap<>(); respData1.put(foo0, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo0.partition())); respData1.put(foo1, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo1.partition())); ShareFetchResponse session1resp = session1context.updateAndGenerateResponseData(groupId, reqMetadata1.memberId(), respData1); assertEquals(Errors.NONE, session1resp.error()); assertEquals(2, session1resp.responseData(topicNames).size()); ShareSessionKey session1Key = new ShareSessionKey(groupId, reqMetadata1.memberId()); // check share session entered into cache assertNotNull(cache.get(session1Key)); time.sleep(500); // Create a second new share session Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> session2req = new LinkedHashMap<>(); session2req.put(foo0, new ShareFetchRequest.SharePartitionData(foo0.topicId(), 100)); session2req.put(foo1, new ShareFetchRequest.SharePartitionData(foo1.topicId(), 100)); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext session2context = sharePartitionManager.newContext(groupId, session2req, EMPTY_PART_LIST, reqMetadata2, false); assertEquals(session2context.getClass(), ShareSessionContext.class); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData2 = new LinkedHashMap<>(); respData2.put(foo0, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo0.partition())); respData2.put(foo1, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo1.partition())); ShareFetchResponse session2resp = session2context.updateAndGenerateResponseData(groupId, reqMetadata2.memberId(), respData2); assertEquals(Errors.NONE, session2resp.error()); assertEquals(2, session2resp.responseData(topicNames).size()); ShareSessionKey session2Key = new ShareSessionKey(groupId, reqMetadata2.memberId()); // both newly created entries are present in cache assertNotNull(cache.get(session1Key)); assertNotNull(cache.get(session2Key)); time.sleep(500); // Create a subsequent share fetch context for session 1 ShareFetchContext session1context2 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); assertEquals(session1context2.getClass(), ShareSessionContext.class); // total sleep time will now be large enough that share session 1 will be evicted if not correctly touched time.sleep(501); // create one final share session to test that the least recently used entry is evicted // the second share session should be evicted because the first share session was incrementally fetched // more recently than the second session was created Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> session3req = new LinkedHashMap<>(); session3req.put(foo0, new ShareFetchRequest.SharePartitionData(foo0.topicId(), 100)); session3req.put(foo1, new ShareFetchRequest.SharePartitionData(foo1.topicId(), 100)); ShareRequestMetadata reqMetadata3 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext session3context = sharePartitionManager.newContext(groupId, session3req, EMPTY_PART_LIST, reqMetadata3, false); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData3 = new LinkedHashMap<>(); respData3.put(foo0, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo0.partition())); respData3.put(foo1, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo1.partition())); ShareFetchResponse session3resp = session3context.updateAndGenerateResponseData(groupId, reqMetadata3.memberId(), respData3); assertEquals(Errors.NONE, session3resp.error()); assertEquals(2, session3resp.responseData(topicNames).size()); ShareSessionKey session3Key = new ShareSessionKey(groupId, reqMetadata3.memberId()); assertNotNull(cache.get(session1Key)); assertNull(cache.get(session2Key), "share session 2 should have been evicted by latest share session, " + "as share session 1 was used more recently"); assertNotNull(cache.get(session3Key)); }
public static String decodeObjectIdentifier(byte[] data) { return decodeObjectIdentifier(data, 0, data.length); }
@Test public void decodeObjectIdentifierWithSingleBytes() { assertEquals("0.1.2", Asn1Utils.decodeObjectIdentifier(new byte[] { 0x01, 0x02 })); }
public static Read read() { return new AutoValue_RabbitMqIO_Read.Builder() .setQueueDeclare(false) .setExchangeDeclare(false) .setMaxReadTime(null) .setMaxNumRecords(Long.MAX_VALUE) .setUseCorrelationId(false) .build(); }
@Test public void testReadDeclaredFanoutExchange() throws Exception { doExchangeTest( new ExchangeTestPlan( RabbitMqIO.read().withExchange("DeclaredFanoutExchange", "fanout", "ignored"), 10)); }
public SmppCommand createSmppCommand(SMPPSession session, Exchange exchange) { SmppCommandType commandType = SmppCommandType.fromExchange(exchange); return commandType.createCommand(session, configuration); }
@Test public void createSmppReplaceSmCommand() { SMPPSession session = new SMPPSession(); Exchange exchange = new DefaultExchange(new DefaultCamelContext()); exchange.getIn().setHeader(SmppConstants.COMMAND, "ReplaceSm"); SmppCommand command = binding.createSmppCommand(session, exchange); assertTrue(command instanceof SmppReplaceSmCommand); }
boolean isWriteShareGroupStateSuccessful(List<PersisterStateBatch> stateBatches) { WriteShareGroupStateResult response; try { response = persister.writeState(new WriteShareGroupStateParameters.Builder() .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder<PartitionStateBatchData>() .setGroupId(this.groupId) .setTopicsData(Collections.singletonList(new TopicData<>(topicIdPartition.topicId(), Collections.singletonList(PartitionFactory.newPartitionStateBatchData( topicIdPartition.partition(), stateEpoch, startOffset, 0, stateBatches)))) ).build()).build()).get(); } catch (InterruptedException | ExecutionException e) { log.error("Failed to write the share group state for share partition: {}-{}", groupId, topicIdPartition, e); throw new IllegalStateException(String.format("Failed to write the share group state for share partition %s-%s", groupId, topicIdPartition), e); } if (response == null || response.topicsData() == null || response.topicsData().size() != 1) { log.error("Failed to write the share group state for share partition: {}-{}. Invalid state found: {}", groupId, topicIdPartition, response); throw new IllegalStateException(String.format("Failed to write the share group state for share partition %s-%s", groupId, topicIdPartition)); } TopicData<PartitionErrorData> state = response.topicsData().get(0); if (state.topicId() != topicIdPartition.topicId() || state.partitions().size() != 1 || state.partitions().get(0).partition() != topicIdPartition.partition()) { log.error("Failed to write the share group state for share partition: {}-{}. Invalid topic partition response: {}", groupId, topicIdPartition, response); throw new IllegalStateException(String.format("Failed to write the share group state for share partition %s-%s", groupId, topicIdPartition)); } PartitionErrorData partitionData = state.partitions().get(0); if (partitionData.errorCode() != Errors.NONE.code()) { Exception exception = Errors.forCode(partitionData.errorCode()).exception(partitionData.errorMessage()); log.error("Failed to write the share group state for share partition: {}-{} due to exception", groupId, topicIdPartition, exception); return false; } return true; }
@Test public void testIsWriteShareGroupStateSuccessful() { Persister persister = Mockito.mock(Persister.class); mockPersisterReadStateMethod(persister); SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); assertTrue(sharePartition.isWriteShareGroupStateSuccessful(Mockito.anyList())); }
@Override public List<String> getGroups(String user) { return Collections.emptyList(); }
@Test public void testGetGroups() { String user = "user"; List<String> expResult = Collections.emptyList(); List<String> result = ngm.getGroups(user); assertEquals("No groups should be returned", expResult, result); ngm.cacheGroupsAdd(Arrays.asList(new String[] {"group1", "group2"})); result = ngm.getGroups(user); assertEquals("No groups should be returned", expResult, result); ngm.cacheGroupsRefresh(); result = ngm.getGroups(user); assertEquals("No groups should be returned", expResult, result); }
@Override public KeyGroupRangeInputSplit[] createInputSplits(int minNumSplits) throws IOException { final int maxParallelism = operatorState.getMaxParallelism(); final List<KeyGroupRange> keyGroups = sortedKeyGroupRanges(minNumSplits, maxParallelism); return CollectionUtil.mapWithIndex( keyGroups, (keyGroupRange, index) -> createKeyGroupRangeInputSplit( operatorState, maxParallelism, keyGroupRange, index)) .toArray(KeyGroupRangeInputSplit[]::new); }
@Test public void testReadState() throws Exception { OperatorID operatorID = OperatorIDGenerator.fromUid("uid"); OperatorSubtaskState state = createOperatorSubtaskState(new StreamFlatMap<>(new StatefulFunction())); OperatorState operatorState = new OperatorState(operatorID, 1, 128); operatorState.putState(0, state); KeyedStateInputFormat<?, ?, ?> format = new KeyedStateInputFormat<>( operatorState, new MemoryStateBackend(), new Configuration(), new KeyedStateReaderOperator<>(new ReaderFunction(), Types.INT), new ExecutionConfig()); KeyGroupRangeInputSplit split = format.createInputSplits(1)[0]; KeyedStateReaderFunction<Integer, Integer> userFunction = new ReaderFunction(); List<Integer> data = readInputSplit(split, userFunction); Assert.assertEquals("Incorrect data read from input split", Arrays.asList(1, 2, 3), data); }
public static <K, V> boolean isNullOrEmpty(Map<K, V> map) { return map == null || map.isEmpty(); }
@Test public void isNullOrEmpty_whenEmpty() { assertTrue(MapUtil.isNullOrEmpty(new HashMap<>())); }
@Override public void createFloatingIp(KubevirtFloatingIp floatingIp) { checkNotNull(floatingIp, ERR_NULL_FLOATING_IP); checkArgument(!Strings.isNullOrEmpty(floatingIp.id()), ERR_NULL_FLOATING_IP_ID); kubevirtRouterStore.createFloatingIp(floatingIp); log.info(String.format(MSG_FLOATING_IP, floatingIp.floatingIp(), MSG_CREATED)); }
@Test(expected = NullPointerException.class) public void testCreateNullFloatingIp() { target.createFloatingIp(null); }
@VisibleForTesting static DwrfProto.Stream.Kind toStreamKind(StreamKind streamKind) { switch (streamKind) { case PRESENT: return DwrfProto.Stream.Kind.PRESENT; case DATA: return DwrfProto.Stream.Kind.DATA; case SECONDARY: return DwrfProto.Stream.Kind.NANO_DATA; case LENGTH: return DwrfProto.Stream.Kind.LENGTH; case DICTIONARY_DATA: return DwrfProto.Stream.Kind.DICTIONARY_DATA; case DICTIONARY_COUNT: return DwrfProto.Stream.Kind.DICTIONARY_COUNT; case ROW_INDEX: return DwrfProto.Stream.Kind.ROW_INDEX; case IN_MAP: return DwrfProto.Stream.Kind.IN_MAP; } throw new IllegalArgumentException("Unsupported stream kind: " + streamKind); }
@Test public void testToStreamKind() { assertEquals(toStreamKind(PRESENT), DwrfProto.Stream.Kind.PRESENT); assertEquals(toStreamKind(IN_MAP), DwrfProto.Stream.Kind.IN_MAP); assertEquals(toStreamKind(DATA), DwrfProto.Stream.Kind.DATA); assertEquals(toStreamKind(SECONDARY), DwrfProto.Stream.Kind.NANO_DATA); assertEquals(toStreamKind(LENGTH), DwrfProto.Stream.Kind.LENGTH); assertEquals(toStreamKind(DICTIONARY_DATA), DwrfProto.Stream.Kind.DICTIONARY_DATA); assertEquals(toStreamKind(DICTIONARY_COUNT), DwrfProto.Stream.Kind.DICTIONARY_COUNT); assertEquals(toStreamKind(ROW_INDEX), DwrfProto.Stream.Kind.ROW_INDEX); }
protected void reportThreadStackTrace(ProxyContext ctx, Status status, ThreadStackTrace request) { String nonce = request.getNonce(); String threadStack = request.getThreadStackTrace(); CompletableFuture<ProxyRelayResult<ConsumerRunningInfo>> responseFuture = this.grpcChannelManager.getAndRemoveResponseFuture(nonce); if (responseFuture != null) { try { if (status.getCode().equals(Code.OK)) { ConsumerRunningInfo runningInfo = new ConsumerRunningInfo(); runningInfo.setJstack(threadStack); responseFuture.complete(new ProxyRelayResult<>(ResponseCode.SUCCESS, "", runningInfo)); } else if (status.getCode().equals(Code.VERIFY_FIFO_MESSAGE_UNSUPPORTED)) { responseFuture.complete(new ProxyRelayResult<>(ResponseCode.NO_PERMISSION, "forbidden to verify message", null)); } else { responseFuture.complete(new ProxyRelayResult<>(ResponseCode.SYSTEM_ERROR, "verify message failed", null)); } } catch (Throwable t) { responseFuture.completeExceptionally(t); } } }
@Test public void testReportThreadStackTrace() { this.clientActivity = new ClientActivity(this.messagingProcessor, this.grpcClientSettingsManager, grpcChannelManagerMock); String jstack = "jstack"; String nonce = "123"; when(grpcChannelManagerMock.getAndRemoveResponseFuture(anyString())).thenReturn((CompletableFuture) runningInfoFutureMock); ProxyContext context = createContext(); ContextStreamObserver<TelemetryCommand> streamObserver = clientActivity.telemetry(new StreamObserver<TelemetryCommand>() { @Override public void onNext(TelemetryCommand value) { } @Override public void onError(Throwable t) { } @Override public void onCompleted() { } }); streamObserver.onNext(context, TelemetryCommand.newBuilder() .setThreadStackTrace(ThreadStackTrace.newBuilder() .setThreadStackTrace(jstack) .setNonce(nonce) .build()) .setStatus(ResponseBuilder.getInstance().buildStatus(Code.OK, Code.OK.name())) .build()); verify(runningInfoFutureMock, times(1)).complete(runningInfoArgumentCaptor.capture()); ProxyRelayResult<ConsumerRunningInfo> result = runningInfoArgumentCaptor.getValue(); assertThat(result.getCode()).isEqualTo(ResponseCode.SUCCESS); assertThat(result.getResult().getJstack()).isEqualTo(jstack); }
@Override public Output run(RunContext runContext) throws Exception { String renderedNamespace = runContext.render(this.namespace); FlowService flowService = ((DefaultRunContext) runContext).getApplicationContext().getBean(FlowService.class); flowService.checkAllowedNamespace(runContext.tenantId(), renderedNamespace, runContext.tenantId(), runContext.flowInfo().namespace()); String renderedPrefix = runContext.render(this.prefix); List<String> keys = runContext.namespaceKv(renderedNamespace).list().stream() .map(KVEntry::key) .filter(key -> key.startsWith(renderedPrefix)) .toList(); return Output.builder() .keys(keys) .build(); }
@Test void shouldGetNoKeysGivenEmptyKeyStore() throws Exception { // Given String namespace = IdUtils.create(); RunContext runContext = this.runContextFactory.of(Map.of( "flow", Map.of("namespace", namespace), "inputs", Map.of( "prefix", TEST_KEY_PREFIX_TEST ) )); GetKeys getKeys = GetKeys.builder() .id(GetKeys.class.getSimpleName()) .type(GetKeys.class.getName()) .prefix("{{ inputs.prefix }}") .build(); // When GetKeys.Output run = getKeys.run(runContext); // Then assertThat(run.getKeys(), empty()); }
@Override public void run(T configuration, Environment environment) throws Exception { final String name = name(); final String primaryName = name + PRIMARY; final String readerName = name + READER; final PooledDataSourceFactory primaryConfig = getDataSourceFactory(configuration); final SessionFactory primary = requireNonNull(sessionFactoryFactory.build(this, environment, primaryConfig, entities, primaryName)); final PooledDataSourceFactory readerConfig = getReadSourceFactory(configuration); final SessionFactory reader = requireNonNull(sessionFactoryFactory.build(this, environment, readerConfig, entities, readerName)); final DualSessionFactory factory = new DualSessionFactory(primary, reader); registerUnitOfWorkListenerIfAbsent(environment).registerSessionFactory(name, factory); final ExecutorService exec = environment.getHealthCheckExecutorService(); environment.healthChecks().register(primaryName, new SessionFactoryHealthCheck( exec, primaryConfig.getValidationQueryTimeout().orElse(Duration.seconds(5)), primary, primaryConfig.getValidationQuery())); environment.healthChecks().register(readerName, new SessionFactoryHealthCheck( exec, readerConfig.getValidationQueryTimeout().orElse(Duration.seconds(5)), reader, readerConfig.getValidationQuery())); this.sessionFactory = factory; }
@Test public void hasASessionFactory() throws Exception { bundle.run(configuration, environment); assertThat(bundle.getSessionFactory()).isInstanceOf(DualSessionFactory.class); }
static void setEnsemblePlacementPolicy(ClientConfiguration bkConf, ServiceConfiguration conf, MetadataStore store, Class<? extends EnsemblePlacementPolicy> policyClass) { bkConf.setEnsemblePlacementPolicy(policyClass); bkConf.setProperty(BookieRackAffinityMapping.METADATA_STORE_INSTANCE, store); if (conf.isBookkeeperClientRackawarePolicyEnabled() || conf.isBookkeeperClientRegionawarePolicyEnabled()) { bkConf.setProperty(REPP_DNS_RESOLVER_CLASS, conf.getProperties().getProperty(REPP_DNS_RESOLVER_CLASS, BookieRackAffinityMapping.class.getName())); bkConf.setMinNumRacksPerWriteQuorum(conf.getBookkeeperClientMinNumRacksPerWriteQuorum()); bkConf.setEnforceMinNumRacksPerWriteQuorum(conf.isBookkeeperClientEnforceMinNumRacksPerWriteQuorum()); bkConf.setProperty(NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, conf.getProperties().getProperty( NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "")); } }
@Test public void testSetEnsemblePlacementPolicys() { ClientConfiguration bkConf = new ClientConfiguration(); ServiceConfiguration conf = new ServiceConfiguration(); conf.setBookkeeperClientMinNumRacksPerWriteQuorum(3); conf.setBookkeeperClientEnforceMinNumRacksPerWriteQuorum(true); MetadataStore store = mock(MetadataStore.class); BookKeeperClientFactoryImpl.setEnsemblePlacementPolicy( bkConf, conf, store, ZkIsolatedBookieEnsemblePlacementPolicy.class); assertEquals(bkConf.getMinNumRacksPerWriteQuorum(), 3); assertTrue(bkConf.getEnforceMinNumRacksPerWriteQuorum()); }
public static NotificationDispatcherMetadata newMetadata() { return METADATA; }
@Test public void changeOnMyIssues_notification_is_enable_at_global_level() { NotificationDispatcherMetadata metadata = ChangesOnMyIssueNotificationHandler.newMetadata(); assertThat(metadata.getProperty(GLOBAL_NOTIFICATION)).isEqualTo("true"); }
public static Container createContainer( String name, String containerImage, List<String> args, SecurityContext securityContext, ResourceRequirements resources, List<EnvVar> envVars, List<ContainerPort> ports, List<VolumeMount> volumeMounts, Probe livenessProbe, Probe readinessProbe, ImagePullPolicy imagePullPolicy ) { return createContainer( name, containerImage, args, securityContext, resources, envVars, ports, volumeMounts, livenessProbe, readinessProbe, null, imagePullPolicy, null ); }
@Test public void createContainer() { Container cont = ContainerUtils.createContainer( "my-name", "my-image:latest", List.of("/startup.sh", "--port=8443"), new SecurityContextBuilder().withRunAsUser(1874L).build(), new ResourceRequirementsBuilder().withRequests(Map.of("memory", new Quantity("1Gi"), "cpu", new Quantity("1000m"))).build(), List.of(ContainerUtils.createEnvVar("VAR_1", "value1")), List.of(ContainerUtils.createContainerPort("my-port", 8443)), List.of(VolumeUtils.createVolumeMount("my-volume", "/my-volume")), ProbeUtils.execProbe(new Probe(), List.of("/liveness.sh")), ProbeUtils.execProbe(new Probe(), List.of("/readiness.sh")), null ); assertThat(cont.getName(), is("my-name")); assertThat(cont.getImage(), is("my-image:latest")); assertThat(cont.getArgs().size(), is(2)); assertThat(cont.getArgs().get(0), is("/startup.sh")); assertThat(cont.getArgs().get(1), is("--port=8443")); assertThat(cont.getSecurityContext().getRunAsUser(), is(1874L)); assertThat(cont.getResources().getRequests().get("memory"), is(new Quantity("1Gi"))); assertThat(cont.getResources().getRequests().get("cpu"), is(new Quantity("1000m"))); assertThat(cont.getResources().getLimits(), is(Map.of())); assertThat(cont.getEnv().size(), is(1)); assertThat(cont.getEnv().get(0).getName(), is("VAR_1")); assertThat(cont.getEnv().get(0).getValue(), is("value1")); assertThat(cont.getPorts().size(), is(1)); assertThat(cont.getPorts().get(0).getName(), is("my-port")); assertThat(cont.getPorts().get(0).getContainerPort(), is(8443)); assertThat(cont.getVolumeMounts().size(), is(1)); assertThat(cont.getVolumeMounts().get(0).getName(), is("my-volume")); assertThat(cont.getVolumeMounts().get(0).getMountPath(), is("/my-volume")); assertThat(cont.getLivenessProbe().getExec().getCommand(), is(List.of("/liveness.sh"))); assertThat(cont.getReadinessProbe().getExec().getCommand(), is(List.of("/readiness.sh"))); assertThat(cont.getStartupProbe(), is(nullValue())); assertThat(cont.getImagePullPolicy(), is("Always")); assertThat(cont.getLifecycle(), is(nullValue())); }
@Override public GroupType type() { return GroupType.SHARE; }
@Test public void testType() { ShareGroup shareGroup = createShareGroup("foo"); assertEquals(Group.GroupType.SHARE, shareGroup.type()); }
public void instanceRegistered(String serviceName, String groupName) { String key = NamingUtils.getGroupedName(serviceName, groupName); synchronized (registeredInstances) { InstanceRedoData redoData = registeredInstances.get(key); if (null != redoData) { redoData.registered(); } } }
@Test void testInstanceRegistered() { ConcurrentMap<String, InstanceRedoData> registeredInstances = getInstanceRedoDataMap(); redoService.cacheInstanceForRedo(SERVICE, GROUP, new Instance()); redoService.instanceRegistered(SERVICE, GROUP); InstanceRedoData actual = registeredInstances.entrySet().iterator().next().getValue(); assertTrue(actual.isRegistered()); }
@Override protected String doDesensitize(final String source) { return desensitizeData(source, source.length() / 2); }
@Test void doDesensitizeTest() { CharacterReplaceDataDesensitize characterReplaceDataMask = new CharacterReplaceDataDesensitize(); String ret = characterReplaceDataMask.doDesensitize("1"); Assertions.assertEquals("*", ret); String sourceData = "123456789"; String replaceText = DataDesensitizeFactory.selectDesensitize(sourceData, DataDesensitizeEnum.CHARACTER_REPLACE.getDataDesensitizeAlg()); int maskNum = 0; for (char c : replaceText.toCharArray()) { if (c == '*') { maskNum++; } } Assertions.assertEquals(sourceData.length() / 2, maskNum); }
public static String syslogFacilityToReadable(int facility) { switch (facility) { case 0: return "kernel"; case 1: return "user-level"; case 2: return "mail"; case 3: return "system daemon"; case 4: case 10: return "security/authorization"; case 5: return "syslogd"; case 6: return "line printer"; case 7: return "network news"; case 8: return "UUCP"; case 9: case 15: return "clock"; case 11: return "FTP"; case 12: return "NTP"; case 13: return "log audit"; case 14: return "log alert"; // TODO: Make user definable? case 16: return "local0"; case 17: return "local1"; case 18: return "local2"; case 19: return "local3"; case 20: return "local4"; case 21: return "local5"; case 22: return "local6"; case 23: return "local7"; } return "Unknown"; }
@Test public void testSyslogFacilityToReadable() { assertEquals("Unknown", Tools.syslogFacilityToReadable(9001)); assertEquals("kernel", Tools.syslogFacilityToReadable(0)); assertEquals("FTP", Tools.syslogFacilityToReadable(11)); assertEquals("local6", Tools.syslogFacilityToReadable(22)); }
public synchronized void update() { final Map<Thread, StackTraceElement[]> stackTraces = Thread.getAllStackTraces(); try { final Thread currentThread = Thread.currentThread(); for (final Map.Entry<Thread, StackTraceElement[]> entry : stackTraces.entrySet()) { final Thread thread = entry.getKey(); final StackTraceElement[] stackTrace = entry.getValue(); if (stackTrace.length > 0 && thread.getState() == Thread.State.RUNNABLE && thread != currentThread) { for (final StackTraceElement element : stackTrace) { if (!isPackageExcluded(element)) { addSample(element); break; } } } } } finally { limitDataSize(); } }
@Test public void test1() { final SamplingProfiler samplingProfiler = new SamplingProfiler(); assertEmptyHotspots(samplingProfiler); samplingProfiler.update(); }
public static <T extends Serializable> SerializableCoder<T> of(TypeDescriptor<T> type) { @SuppressWarnings("unchecked") Class<T> clazz = (Class<T>) type.getRawType(); return new SerializableCoder<>(clazz, type); }
@Test public void testNullEncoding() throws Exception { Coder<String> coder = SerializableCoder.of(String.class); byte[] encodedBytes = CoderUtils.encodeToByteArray(coder, null); assertNull(CoderUtils.decodeFromByteArray(coder, encodedBytes)); }
public List<MappingField> resolveAndValidateFields( List<MappingField> userFields, Map<String, String> options, NodeEngine nodeEngine ) { final InternalSerializationService serializationService = (InternalSerializationService) nodeEngine .getSerializationService(); final AbstractRelationsStorage relationsStorage = ((CalciteSqlOptimizer) nodeEngine.getSqlService().getOptimizer()) .relationsStorage(); // normalize and validate the names and external names for (MappingField field : userFields) { String name = field.name(); String externalName = field.externalName(); if (externalName == null) { if (name.equals(KEY) || name.equals(VALUE)) { externalName = name; } else { externalName = VALUE_PREFIX + name; } field.setExternalName(name); } if ((name.equals(KEY) && !externalName.equals(KEY)) || (name.equals(VALUE) && !externalName.equals(VALUE))) { throw QueryException.error("Cannot rename field: '" + name + '\''); } if (!EXT_NAME_PATTERN.matcher(externalName).matches()) { throw QueryException.error("Invalid external name: " + externalName); } } Stream<MappingField> keyFields = resolveAndValidateFields(true, userFields, options, serializationService, relationsStorage); Stream<MappingField> valueFields = resolveAndValidateFields(false, userFields, options, serializationService, relationsStorage); Map<String, MappingField> fields = Stream.concat(keyFields, valueFields) .collect(LinkedHashMap::new, (map, field) -> map.putIfAbsent(field.name(), field), Map::putAll); if (fields.isEmpty()) { throw QueryException.error("The resolved field list is empty"); } return new ArrayList<>(fields.values()); }
@Test public void when_keyOrThisNameIsUsed_then_itIsFilteredOut() { Map<String, String> options = ImmutableMap.of( OPTION_KEY_FORMAT, JAVA_FORMAT, OPTION_VALUE_FORMAT, JAVA_FORMAT ); given(resolver.resolveAndValidateFields(eq(true), eq(emptyList()), eq(options), eq(ss))) .willReturn(Stream.of( field("__key", QueryDataType.INT, "__key.name"), field("keyField", QueryDataType.INT, "__key.__keyField") )); given(resolver.resolveAndValidateFields(eq(false), eq(emptyList()), eq(options), eq(ss))) .willReturn(Stream.of( field("this", QueryDataType.VARCHAR, "this.name"), field("thisField", QueryDataType.VARCHAR, "this.thisField") )); List<MappingField> fields = resolvers.resolveAndValidateFields(emptyList(), options, nodeEngine); assertThat(fields).containsExactly( field("keyField", QueryDataType.INT, "__key.__keyField"), field("thisField", QueryDataType.VARCHAR, "this.thisField") ); }
@Override public void commit(final Xid xid, final boolean onePhase) throws XAException { try { delegate.commit(xid, onePhase); } catch (final XAException ex) { throw mapXAException(ex); } }
@Test void assertCommit() throws XAException { singleXAResource.commit(xid, true); verify(xaResource).commit(xid, true); }
void resolveSelectors(EngineDiscoveryRequest request, CucumberEngineDescriptor engineDescriptor) { Predicate<String> packageFilter = buildPackageFilter(request); resolve(request, engineDescriptor, packageFilter); filter(engineDescriptor, packageFilter); pruneTree(engineDescriptor); }
@Test void resolveRequestWithClasspathRootSelector() { Path classpathRoot = Paths.get("src/test/resources/"); DiscoverySelector resource = selectClasspathRoots(singleton(classpathRoot)).get(0); EngineDiscoveryRequest discoveryRequest = new SelectorRequest(resource); resolver.resolveSelectors(discoveryRequest, testDescriptor); assertEquals(7, testDescriptor.getChildren().size()); }
List<Token> tokenize() throws ScanException { List<Token> tokenList = new ArrayList<Token>(); StringBuilder buf = new StringBuilder(); while (pointer < patternLength) { char c = pattern.charAt(pointer); pointer++; switch (state) { case LITERAL_STATE: handleLiteralState(c, tokenList, buf); break; case START_STATE: handleStartState(c, tokenList, buf); break; case DEFAULT_VAL_STATE: handleDefaultValueState(c, tokenList, buf); default: } } // EOS switch (state) { case LITERAL_STATE: addLiteralToken(tokenList, buf); break; case DEFAULT_VAL_STATE: // trailing colon. see also LOGBACK-1140 buf.append(CoreConstants.COLON_CHAR); addLiteralToken(tokenList, buf); break; case START_STATE: // trailing $. see also LOGBACK-1149 buf.append(CoreConstants.DOLLAR); addLiteralToken(tokenList, buf); break; } return tokenList; }
@Test public void basicDefaultSeparator() throws ScanException { String input = "${a:-b}"; Tokenizer tokenizer = new Tokenizer(input); List<Token> tokenList = tokenizer.tokenize(); witnessList.add(Token.START_TOKEN); witnessList.add(new Token(Token.Type.LITERAL, "a")); witnessList.add(Token.DEFAULT_SEP_TOKEN); witnessList.add(new Token(Token.Type.LITERAL, "b")); witnessList.add(Token.CURLY_RIGHT_TOKEN); assertEquals(witnessList, tokenList); }
@Override public TypeSerializerSchemaCompatibility<T> resolveSchemaCompatibility( TypeSerializerSnapshot<T> oldSerializerSnapshot) { if (!(oldSerializerSnapshot instanceof PojoSerializerSnapshot)) { return TypeSerializerSchemaCompatibility.incompatible(); } PojoSerializerSnapshot<T> previousPojoSerializerSnapshot = (PojoSerializerSnapshot<T>) oldSerializerSnapshot; final Class<T> previousPojoClass = previousPojoSerializerSnapshot.snapshotData.getPojoClass(); final LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots = previousPojoSerializerSnapshot.snapshotData.getFieldSerializerSnapshots(); final LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>> registeredSubclassSerializerSnapshots = previousPojoSerializerSnapshot.snapshotData .getRegisteredSubclassSerializerSnapshots(); final LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>> nonRegisteredSubclassSerializerSnapshots = previousPojoSerializerSnapshot.snapshotData .getNonRegisteredSubclassSerializerSnapshots(); if (previousPojoClass != snapshotData.getPojoClass()) { return TypeSerializerSchemaCompatibility.incompatible(); } if (registeredSubclassSerializerSnapshots.hasAbsentKeysOrValues()) { return TypeSerializerSchemaCompatibility.incompatible(); } if (nonRegisteredSubclassSerializerSnapshots.hasAbsentKeysOrValues()) { return TypeSerializerSchemaCompatibility.incompatible(); } final IntermediateCompatibilityResult<T> preExistingFieldSerializersCompatibility = getCompatibilityOfPreExistingFields(fieldSerializerSnapshots); if (preExistingFieldSerializersCompatibility.isIncompatible()) { return TypeSerializerSchemaCompatibility.incompatible(); } final IntermediateCompatibilityResult<T> preExistingRegistrationsCompatibility = getCompatibilityOfPreExistingRegisteredSubclasses( registeredSubclassSerializerSnapshots); if (preExistingRegistrationsCompatibility.isIncompatible()) { return TypeSerializerSchemaCompatibility.incompatible(); } if (newPojoSerializerIsCompatibleAfterMigration( preExistingFieldSerializersCompatibility, preExistingRegistrationsCompatibility, fieldSerializerSnapshots)) { return TypeSerializerSchemaCompatibility.compatibleAfterMigration(); } if (newPojoSerializerIsCompatibleWithReconfiguredSerializer( preExistingFieldSerializersCompatibility, preExistingRegistrationsCompatibility, registeredSubclassSerializerSnapshots, nonRegisteredSubclassSerializerSnapshots)) { return TypeSerializerSchemaCompatibility.compatibleWithReconfiguredSerializer( constructReconfiguredPojoSerializer( preExistingFieldSerializersCompatibility, registeredSubclassSerializerSnapshots, preExistingRegistrationsCompatibility, nonRegisteredSubclassSerializerSnapshots)); } return TypeSerializerSchemaCompatibility.compatibleAsIs(); }
@Test void testResolveSchemaCompatibilityWithSameFields() { final PojoSerializerSnapshot<TestPojo> oldSnapshot = buildTestSnapshot(Arrays.asList(ID_FIELD, NAME_FIELD, HEIGHT_FIELD)); final PojoSerializerSnapshot<TestPojo> newSnapshot = buildTestSnapshot(Arrays.asList(ID_FIELD, NAME_FIELD, HEIGHT_FIELD)); final TypeSerializerSchemaCompatibility<TestPojo> resultCompatibility = newSnapshot.resolveSchemaCompatibility(oldSnapshot); assertThat(resultCompatibility.isCompatibleAsIs()).isTrue(); }
public static String getKey(String dataId, String group) { return getKey(dataId, group, ""); }
@Test void testGetKeyDatIdParam() { assertThrows(IllegalArgumentException.class, () -> { GroupKey.getKey("", "a"); }); }
@Override public void updateSocialClient(SocialClientSaveReqVO updateReqVO) { // 校验存在 validateSocialClientExists(updateReqVO.getId()); // 校验重复 validateSocialClientUnique(updateReqVO.getId(), updateReqVO.getUserType(), updateReqVO.getSocialType()); // 更新 SocialClientDO updateObj = BeanUtils.toBean(updateReqVO, SocialClientDO.class); socialClientMapper.updateById(updateObj); }
@Test public void testUpdateSocialClient_notExists() { // 准备参数 SocialClientSaveReqVO reqVO = randomPojo(SocialClientSaveReqVO.class); // 调用, 并断言异常 assertServiceException(() -> socialClientService.updateSocialClient(reqVO), SOCIAL_CLIENT_NOT_EXISTS); }
@Override public String getOnus(String target) { DriverHandler handler = handler(); NetconfController controller = handler.get(NetconfController.class); MastershipService mastershipService = handler.get(MastershipService.class); DeviceId ncDeviceId = handler.data().deviceId(); checkNotNull(controller, "Netconf controller is null"); String reply = null; String[] onuId = null; if (!mastershipService.isLocalMaster(ncDeviceId)) { log.warn("Not master for {} Use {} to execute command", ncDeviceId, mastershipService.getMasterFor(ncDeviceId)); return null; } if (target != null) { onuId = checkIdString(target); if (onuId == null) { log.error("Invalid ONU identifier {}", target); return null; } } try { StringBuilder request = new StringBuilder(); request.append(VOLT_NE_OPEN + VOLT_NE_NAMESPACE); request.append(ANGLE_RIGHT + NEW_LINE); if (onuId != null) { request.append(buildStartTag(VOLT_ONUS)) .append(buildStartTag(ONUS_PERLINK)) .append(buildStartTag(PONLINK_ID, false)) .append(onuId[FIRST_PART]) .append(buildEndTag(PONLINK_ID)); if (onuId.length > ONE) { request.append(buildStartTag(ONUS_LIST)) .append(buildStartTag(ONU_INFO)) .append(buildStartTag(ONU_ID, false)) .append(onuId[SECOND_PART]) .append(buildEndTag(ONU_ID)) .append(buildEndTag(ONU_INFO)) .append(buildEndTag(ONUS_LIST)); } request.append(buildEndTag(ONUS_PERLINK)) .append(buildEndTag(VOLT_ONUS)); } else { request.append(buildEmptyTag(VOLT_ONUS)); } request.append(VOLT_NE_CLOSE); reply = controller .getDevicesMap() .get(ncDeviceId) .getSession() .get(request.toString(), REPORT_ALL); } catch (NetconfException e) { log.error("Cannot communicate to device {} exception {}", ncDeviceId, e); } return reply; }
@Test public void testInvalidGetOnusInput() throws Exception { String reply; String target; for (int i = ZERO; i < INVALID_GET_TCS.length; i++) { target = INVALID_GET_TCS[i]; reply = voltConfig.getOnus(target); assertNull("Incorrect response for INVALID_GET_TCS", reply); } }
@Override public Num calculate(BarSeries series, Position position) { if (position.isClosed()) { Num entryPrice = position.getEntry().getValue(); return position.getProfit().dividedBy(entryPrice).multipliedBy(series.hundred()); } return series.zero(); }
@Test public void calculateWithLosingLongPositions() { MockBarSeries series = new MockBarSeries(numFunction, 100, 95, 100, 80, 85, 70); TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(1, series), Trade.buyAt(2, series), Trade.sellAt(5, series)); AnalysisCriterion profit = getCriterion(); assertNumEquals(-5 + -30, profit.calculate(series, tradingRecord)); }
static String determineOperatingSystemCompleteName() { try { useAgentTmpDirIfNecessary(); OperatingSystem os = newSystemInfo().getOperatingSystem(); return String.format("%s %s%s", os.getFamily(), os.getVersionInfo().getVersion(), optionalFrom(os.getVersionInfo().getCodeName()).map(s -> " (" + s + ")").orElse("") ); } catch (Exception e) { LOG.warn("Unable to determine OS platform from native, falling back to default", e); return new SystemEnvironment().getOperatingSystemFamilyJvmName(); } }
@Test public void shouldPreserveJnaTmpDirIfSet() { String defaultTempDir = System.getProperty("java.io.tmpdir"); props.set("jna.tmpdir", defaultTempDir); SystemInfo.determineOperatingSystemCompleteName(); assertThat(System.getProperty("jna.tmpdir")).isEqualTo(defaultTempDir); }
public ClientConfig build() { return build(Thread.currentThread().getContextClassLoader()); }
@Override @Test public void loadingThroughSystemProperty_existingFile() throws IOException { String yaml = "hazelcast-client:\n" + " cluster-name: foobar"; File file = File.createTempFile("foo", ".yaml"); file.deleteOnExit(); PrintWriter writer = new PrintWriter(file, StandardCharsets.UTF_8); writer.println(yaml); writer.close(); System.setProperty("hazelcast.client.config", file.getAbsolutePath()); YamlClientConfigBuilder configBuilder = new YamlClientConfigBuilder(); ClientConfig config = configBuilder.build(); assertEquals("foobar", config.getClusterName()); }
public static List<AclEntry> mergeAclEntries(List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException { ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec); ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); List<AclEntry> foundAclSpecEntries = Lists.newArrayListWithCapacity(MAX_ENTRIES); EnumMap<AclEntryScope, AclEntry> providedMask = Maps.newEnumMap(AclEntryScope.class); EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class); EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class); for (AclEntry existingEntry: existingAcl) { AclEntry aclSpecEntry = aclSpec.findByKey(existingEntry); if (aclSpecEntry != null) { foundAclSpecEntries.add(aclSpecEntry); scopeDirty.add(aclSpecEntry.getScope()); if (aclSpecEntry.getType() == MASK) { providedMask.put(aclSpecEntry.getScope(), aclSpecEntry); maskDirty.add(aclSpecEntry.getScope()); } else { aclBuilder.add(aclSpecEntry); } } else { if (existingEntry.getType() == MASK) { providedMask.put(existingEntry.getScope(), existingEntry); } else { aclBuilder.add(existingEntry); } } } // ACL spec entries that were not replacements are new additions. for (AclEntry newEntry: aclSpec) { if (Collections.binarySearch(foundAclSpecEntries, newEntry, ACL_ENTRY_COMPARATOR) < 0) { scopeDirty.add(newEntry.getScope()); if (newEntry.getType() == MASK) { providedMask.put(newEntry.getScope(), newEntry); maskDirty.add(newEntry.getScope()); } else { aclBuilder.add(newEntry); } } } copyDefaultsIfNeeded(aclBuilder); calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); return buildAndValidateAcl(aclBuilder); }
@Test(expected = AclException.class) public void testMergeAclDefaultEntriesResultTooLarge() throws AclException { ImmutableList.Builder<AclEntry> aclBuilder = new ImmutableList.Builder<AclEntry>() .add(aclEntry(DEFAULT, USER, ALL)); for (int i = 1; i <= 28; ++i) { aclBuilder.add(aclEntry(DEFAULT, USER, "user" + i, READ)); } aclBuilder .add(aclEntry(DEFAULT, GROUP, READ)) .add(aclEntry(DEFAULT, MASK, READ)) .add(aclEntry(DEFAULT, OTHER, NONE)); List<AclEntry> existing = aclBuilder.build(); List<AclEntry> aclSpec = Lists.newArrayList( aclEntry(DEFAULT, USER, "bruce", READ)); mergeAclEntries(existing, aclSpec); }
@Nonnull @Override public Sketch<IntegerSummary> getResult() { return unionAll(); }
@Test public void testAccumulatorMerge() { IntegerSketch input1 = new IntegerSketch(_lgK, IntegerSummary.Mode.Sum); IntStream.range(0, 1000).forEach(i -> input1.update(i, 1)); CompactSketch<IntegerSummary> sketch1 = input1.compact(); IntegerSketch input2 = new IntegerSketch(_lgK, IntegerSummary.Mode.Sum); IntStream.range(1000, 2000).forEach(i -> input2.update(i, 1)); CompactSketch<IntegerSummary> sketch2 = input2.compact(); TupleIntSketchAccumulator accumulator1 = new TupleIntSketchAccumulator(_setOps, _nominalEntries, 3); accumulator1.apply(sketch1); TupleIntSketchAccumulator accumulator2 = new TupleIntSketchAccumulator(_setOps, _nominalEntries, 3); accumulator2.apply(sketch2); accumulator1.merge(accumulator2); Assert.assertEquals(accumulator1.getResult().getEstimate(), sketch1.getEstimate() + sketch2.getEstimate()); }
@Override public HttpResponseOutputStream<EueWriteFeature.Chunk> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { String uploadUri; String resourceId; if(status.isExists()) { resourceId = fileid.getFileId(file); uploadUri = EueUploadHelper.updateResource(session, resourceId, status, UploadType.CHUNKED).getUploadURI(); } else { final ResourceCreationResponseEntry resourceCreationResponseEntry = EueUploadHelper.createResource(session, fileid.getFileId(file.getParent()), file.getName(), status, UploadType.CHUNKED); resourceId = EueResourceIdProvider.getResourceIdFromResourceUri(resourceCreationResponseEntry.getHeaders().getLocation()); uploadUri = resourceCreationResponseEntry.getEntity().getUploadURI(); } final MultipartOutputStream proxy; try { proxy = new MultipartOutputStream(file, resourceId, uploadUri, status, callback); } catch(NoSuchAlgorithmException e) { throw new ChecksumException(LocaleFactory.localizedString("Checksum failure", "Error"), e); } return new HttpResponseOutputStream<EueWriteFeature.Chunk>(new MemorySegementingOutputStream(proxy, new HostPreferences(session.getHost()).getInteger("eue.upload.multipart.size")), new EueAttributesAdapter(), status) { @Override public EueWriteFeature.Chunk getStatus() { return proxy.getResult(); } }; }
@Test public void testWriteZeroLength() throws Exception { final EueResourceIdProvider fileid = new EueResourceIdProvider(session); final EueMultipartWriteFeature feature = new EueMultipartWriteFeature(session, fileid); final Path container = new EueDirectoryFeature(session, fileid).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final byte[] content = RandomUtils.nextBytes(0); final TransferStatus status = new TransferStatus().withLength(-1L); final Path file = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final HttpResponseOutputStream<EueWriteFeature.Chunk> out = feature.write(file, status, new DisabledConnectionCallback()); final ByteArrayInputStream in = new ByteArrayInputStream(content); assertEquals(content.length, IOUtils.copyLarge(in, out)); in.close(); out.close(); assertTrue(new DefaultFindFeature(session).find(file)); final byte[] compare = new byte[content.length]; final InputStream stream = new EueReadFeature(session, fileid).read(file, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); new EueDeleteFeature(session, fileid).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
final Set<E> getDelegate() { return delegate; }
@Test public void requireThatInitialDelegateIsEmpty() { LazySet<String> set = newLazySet(new HashSet<>()); assertEquals(LazySet.EmptySet.class, set.getDelegate().getClass()); }
@Override public ExecuteResult execute(final ServiceContext serviceContext, final ConfiguredKsqlPlan plan, final boolean restoreInProgress) { try { final ExecuteResult result = EngineExecutor .create(primaryContext, serviceContext, plan.getConfig()) .execute(plan.getPlan(), restoreInProgress); return result; } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { // add the statement text to the KsqlException throw new KsqlStatementException( e.getMessage(), e.getMessage(), plan.getPlan().getStatementText(), e.getCause() ); } }
@Test public void shouldShowHintWhenFailingToCreateQueryIfSelectingFromSourceNameWithoutQuotes() { // Given: setupKsqlEngineWithSharedRuntimeEnabled(); KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, "create stream bar as select * from test1;", ksqlConfig, Collections.emptyMap() ); // When: final KsqlStatementException e = assertThrows( KsqlStatementException.class, () -> KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, "select * from \"bar\";", ksqlConfig, Collections.emptyMap() ) ); // Then: assertThat(e, rawMessage(is( "Exception while preparing statement: bar does not exist.\n" + "Did you mean BAR? Hint: try removing double quotes from the source name."))); assertThat(e, statementText(is("select * from \"bar\";"))); }
@Override protected FTPClient connect(final ProxyFinder proxy, final HostKeyCallback callback, final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException { try { final CustomTrustSSLProtocolSocketFactory f = new CustomTrustSSLProtocolSocketFactory(trust, key, preferences.getProperty("connection.ssl.protocols.ftp").split(",")); final LoggingProtocolCommandListener listener = new LoggingProtocolCommandListener(this); final FTPClient client = new FTPClient(host.getProtocol(), f, f.getSSLContext()) { @Override public void disconnect() throws IOException { try { super.disconnect(); } finally { this.removeProtocolCommandListener(listener); } } }; client.addProtocolCommandListener(listener); this.configure(client); client.connect(new PunycodeConverter().convert(host.getHostname()), host.getPort()); client.setTcpNoDelay(false); return client; } catch(IOException e) { throw new FTPExceptionMappingService().map(e); } }
@Test public void testConnect() throws Exception { final Path path = new FTPWorkdirService(session).find(); assertNotNull(path); assertEquals(path, new FTPWorkdirService(session).find()); assertTrue(session.isConnected()); }
public static String removeTags(String text, List<Tag> tagsToRemove) { if (StringUtils.isBlank(text)) { return text; } var textCopy = new AtomicReference<>(text); tagsToRemove.forEach(tagToRemove -> textCopy.set(removeTag(textCopy.get(), tagToRemove))); return textCopy.get(); }
@Test public void removeTags_specialCharsKept () { String text = "<>[],-.(){}!?\n\t text"; String testString = text + " " + TAG1.getText(); String result = TagsHelper.removeTags(testString, singletonList(TAG1)); assertEquals(text, result); }
@Override public <T extends State> T state(StateNamespace namespace, StateTag<T> address) { return workItemState.get(namespace, address, StateContexts.nullContext()); }
@Test public void testCombiningClearBeforeRead() throws Exception { GroupingState<Integer, Integer> value = underTest.state(NAMESPACE, COMBINING_ADDR); value.clear(); value.readLater(); value.add(5); value.add(6); assertThat(value.read(), Matchers.equalTo(11)); value.add(2); assertThat(value.read(), Matchers.equalTo(13)); // Shouldn't need to read from windmill for this because we immediately cleared.. Mockito.verifyZeroInteractions(mockReader); }
@Override public void run() { try (DbSession dbSession = dbClient.openSession(false)) { List<AlmSettingDto> bitbucketServerDtos = dbClient.almSettingDao().selectByAlm(dbSession, ALM.BITBUCKET); List<AlmSettingDto> bitbucketCloudDtos = dbClient.almSettingDao().selectByAlm(dbSession, ALM.BITBUCKET_CLOUD); if (bitbucketServerDtos.isEmpty() && bitbucketCloudDtos.isEmpty()) { metrics.setBitbucketStatusToRed(); return; } try { validate(bitbucketServerDtos, bitbucketCloudDtos); metrics.setBitbucketStatusToGreen(); } catch (RuntimeException e) { metrics.setBitbucketStatusToRed(); } } }
@Test public void run_bitbucketIntegrationNotConfigured_setRedStatusInMetricsOnce() { when(almSettingsDao.selectByAlm(dbSession, ALM.BITBUCKET)).thenReturn(Collections.emptyList()); when(almSettingsDao.selectByAlm(dbSession, ALM.BITBUCKET_CLOUD)).thenReturn(Collections.emptyList()); underTest.run(); verify(metrics, times(0)).setBitbucketStatusToGreen(); verify(metrics, times(1)).setBitbucketStatusToRed(); }
@VisibleForTesting public Supplier<PageProjection> compileProjection( SqlFunctionProperties sqlFunctionProperties, RowExpression projection, Optional<String> classNameSuffix) { return compileProjection(sqlFunctionProperties, emptyMap(), projection, classNameSuffix); }
@Test public void testFailureDoesNotCorruptFutureResults() { PageFunctionCompiler functionCompiler = new PageFunctionCompiler(createTestMetadataManager(), 0); Supplier<PageProjection> projectionSupplier = functionCompiler.compileProjection(SESSION.getSqlFunctionProperties(), ADD_10_EXPRESSION, Optional.empty()); PageProjection projection = projectionSupplier.get(); // process good page and verify we got the expected number of result rows Page goodPage = createLongBlockPage(1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9); Block goodResult = project(projection, goodPage, SelectedPositions.positionsRange(0, goodPage.getPositionCount())).get(0); assertEquals(goodPage.getPositionCount(), goodResult.getPositionCount()); // addition will throw due to integer overflow Page badPage = createLongBlockPage(1, 0, 1, 2, 3, 4, Long.MAX_VALUE); try { project(projection, badPage, SelectedPositions.positionsRange(0, 100)); fail("expected exception"); } catch (PrestoException e) { assertEquals(e.getErrorCode(), NUMERIC_VALUE_OUT_OF_RANGE.toErrorCode()); } // running the good page should still work // if block builder in generated code was not reset properly, we could get junk results after the failure goodResult = project(projection, goodPage, SelectedPositions.positionsRange(0, goodPage.getPositionCount())).get(0); assertEquals(goodPage.getPositionCount(), goodResult.getPositionCount()); }
public double getDouble(HazelcastProperty property) { return Double.valueOf(getString(property)); }
@Test public void getDouble() { HazelcastProperty property = new HazelcastProperty("foo", 10.1D); double foo = defaultProperties.getDouble(property); assertEquals(10.1D, foo, 0.0001); }
protected boolean isStructuredInput(String className) { return ScenarioSimulationSharedUtils.isCollectionOrMap(className); }
@Test public void isStructuredInput() { assertThat(expressionEvaluator.isStructuredInput(List.class.getCanonicalName())).isTrue(); assertThat(expressionEvaluator.isStructuredInput(ArrayList.class.getCanonicalName())).isTrue(); assertThat(expressionEvaluator.isStructuredInput(LinkedList.class.getCanonicalName())).isTrue(); assertThat(expressionEvaluator.isStructuredInput(Map.class.getCanonicalName())).isTrue(); assertThat(expressionEvaluator.isStructuredInput(HashMap.class.getCanonicalName())).isTrue(); assertThat(expressionEvaluator.isStructuredInput(LinkedHashMap.class.getCanonicalName())).isTrue(); assertThat(expressionEvaluator.isStructuredInput(Set.class.getCanonicalName())).isFalse(); assertThat(expressionEvaluator.isStructuredInput(Integer.class.getCanonicalName())).isFalse(); assertThat(expressionEvaluator.isStructuredInput(String.class.getCanonicalName())).isFalse(); }
@Override public void forward(NeighbourMessageContext context, ConnectPoint outPort) { sendTo(context.packet(), outPort); }
@Test public void forwardToInterface() { Ethernet request = NeighbourTestUtils.createArpRequest(IP1); Ethernet forwardedRequest = request.duplicate(); forwardedRequest.setSourceMACAddress(INTF2.mac()); forwardedRequest.setVlanID(INTF2.vlan().toShort()); packetService.emit(outbound(forwardedRequest, CP2)); expectLastCall().once(); replay(packetService); actions.forward(createContext(request, CP1, null), INTF2); verify(packetService); }
public static void checkPositiveInteger(final Properties props, final String propKey, final MaskAlgorithm<?, ?> algorithm) { checkRequired(props, propKey, algorithm); try { int integerValue = Integer.parseInt(props.getProperty(propKey)); ShardingSpherePreconditions.checkState(integerValue > 0, () -> new AlgorithmInitializationException(algorithm, "%s must be a positive integer.", propKey)); } catch (final NumberFormatException ex) { throw new AlgorithmInitializationException(algorithm, "%s must be a valid integer number", propKey); } }
@Test void assertCheckPositiveIntegerFailedWithZero() { Properties props = PropertiesBuilder.build(new Property("key", "0")); assertThrows(AlgorithmInitializationException.class, () -> MaskAlgorithmPropertiesChecker.checkPositiveInteger(props, "key", mock(MaskAlgorithm.class))); }
public void validate(Map<String, NewDocumentType> documentDefinitions, Set<NewDocumentType> globallyDistributedDocuments) { verifyReferredDocumentsArePresent(documentDefinitions); verifyReferredDocumentsAreGlobal(documentDefinitions, globallyDistributedDocuments); }
@Test void throws_exception_on_unknown_document() { NewDocumentType unknown = new NewDocumentType(new NewDocumentType.Name("unknown")); NewDocumentType child = createDocumentType("child", unknown); Fixture fixture = new Fixture() .addNonGlobalDocument(child); try { validate(fixture); fail(); } catch (IllegalArgumentException e) { assertEquals("The following document types are referenced from other documents, but are not listed in services.xml: 'unknown'", e.getMessage()); } }
@Override public boolean remove(Object key, Object value) { return super.remove(key, value); }
@Test void remove() { SessionContext context = new SessionContext(); Key<String> key = SessionContext.newKey("foo"); context.put(key, "bar"); Truth.assertThat(context.get(key)).isEqualTo("bar"); String val = context.remove(key); Truth.assertThat(context.get(key)).isNull(); Truth.assertThat(val).isEqualTo("bar"); }
public static PinotFS getOutputPinotFS(Map<String, String> taskConfigs, URI fileURI) throws Exception { String fileURIScheme = (fileURI == null) ? null : fileURI.getScheme(); if (fileURIScheme == null) { return new LocalPinotFS(); } // Try to create PinotFS using given Input FileSystem config always String fsClass = taskConfigs.get(BatchConfigProperties.OUTPUT_FS_CLASS); if (fsClass != null) { PinotFS pinotFS = PluginManager.get().createInstance(fsClass); PinotConfiguration fsProps = IngestionConfigUtils.getOutputFsProps(taskConfigs); pinotFS.init(fsProps); return pinotFS; } return PinotFSFactory.create(fileURIScheme); }
@Test public void testGetOutputPinotFS() throws Exception { Map<String, String> taskConfigs = new HashMap<>(); taskConfigs.put("output.fs.className", "org.apache.pinot.spi.filesystem.LocalPinotFS"); URI fileURI = new URI("file:///path/to/file"); PinotFS pinotFS = MinionTaskUtils.getOutputPinotFS(taskConfigs, fileURI); assertTrue(pinotFS instanceof LocalPinotFS); }
@GetInitialRestriction public OffsetRange getInitialRestriction(@Element PulsarSourceDescriptor pulsarSource) { long startTimestamp = 0L; long endTimestamp = Long.MAX_VALUE; if (pulsarSource.getStartOffset() != null) { startTimestamp = pulsarSource.getStartOffset(); } if (pulsarSource.getEndOffset() != null) { endTimestamp = pulsarSource.getEndOffset(); } return new OffsetRange(startTimestamp, endTimestamp); }
@Test public void testInitialRestrictionWhenHasStartOffset() throws Exception { long expectedStartOffset = 0; OffsetRange result = dofnInstance.getInitialRestriction( PulsarSourceDescriptor.of( TOPIC, expectedStartOffset, null, null, SERVICE_URL, ADMIN_URL)); assertEquals(new OffsetRange(expectedStartOffset, Long.MAX_VALUE), result); }
@CheckReturnValue public static Disposable reloadDictionaryInBackground(@NonNull Dictionary dictionary) { return Observable.<Dictionary>create(emitter -> emitter.onNext(dictionary)) .subscribeOn(RxSchedulers.background()) .map( d -> { d.loadDictionary(); return d; }) .observeOn(RxSchedulers.mainThread()) .unsubscribeOn(RxSchedulers.background()) .subscribe( d -> Logger.d("DictionaryBackgroundLoader", "Reloading of %s done.", d), throwable -> Logger.e( "DictionaryBackgroundLoader", throwable, "Reloading of %s failed with error '%s'.", dictionary, throwable.getMessage())); }
@Test public void testReloadHappyPath() { Dictionary dictionary = Mockito.mock(Dictionary.class); final Disposable disposable = DictionaryBackgroundLoader.reloadDictionaryInBackground(dictionary); TestRxSchedulers.drainAllTasks(); final InOrder inOrder = Mockito.inOrder(dictionary); inOrder.verify(dictionary).loadDictionary(); inOrder.verify(dictionary, Mockito.never()).close(); inOrder.verifyNoMoreInteractions(); disposable.dispose(); TestRxSchedulers.drainAllTasks(); Mockito.verify(dictionary, Mockito.never()).close(); }
@Override public FlinkPod decorateFlinkPod(FlinkPod flinkPod) { final Container mainContainerWithStartCmd = new ContainerBuilder(flinkPod.getMainContainer()) .withCommand(kubernetesTaskManagerParameters.getContainerEntrypoint()) .withArgs(getTaskManagerStartCommand()) .addToEnv( new EnvVarBuilder() .withName(Constants.ENV_TM_JVM_MEM_OPTS) .withValue( kubernetesTaskManagerParameters.getJvmMemOptsEnv()) .build()) .build(); return new FlinkPod.Builder(flinkPod).withMainContainer(mainContainerWithStartCmd).build(); }
@Test void testTaskManagerJvmMemOptsEnv() { final FlinkPod resultFlinkPod = cmdTaskManagerDecorator.decorateFlinkPod(baseFlinkPod); assertThat(resultFlinkPod.getMainContainer().getEnv()) .containsExactly( new EnvVarBuilder() .withName(Constants.ENV_TM_JVM_MEM_OPTS) .withValue(JVM_MEM_OPTS_ENV) .build()); }
@Override public Map<K, V> getCachedMap() { return localCacheView.getCachedMap(); }
@Test public void testNameMapper() throws InterruptedException { Config config = new Config(); config.useSingleServer() .setNameMapper(new NameMapper() { @Override public String map(String name) { return name + ":suffix:"; } @Override public String unmap(String name) { return name.replace(":suffix:", ""); } }) .setConnectionMinimumIdleSize(3) .setConnectionPoolSize(3) .setAddress(redisson.getConfig().useSingleServer().getAddress()); RedissonClient redisson = Redisson.create(config); LocalCachedMapOptions<String, Integer> options = LocalCachedMapOptions.<String, Integer>name("test") .evictionPolicy(EvictionPolicy.LFU) .cacheSize(5); RLocalCachedMap<String, Integer> map1 = redisson.getLocalCachedMap(options); Map<String, Integer> cache1 = map1.getCachedMap(); RLocalCachedMap<String, Integer> map2 = redisson.getLocalCachedMap(options); Map<String, Integer> cache2 = map2.getCachedMap(); assertThat(map1.getName()).isEqualTo("test"); assertThat(map2.getName()).isEqualTo("test"); map1.put("1", 1); map1.put("2", 2); assertThat(map2.get("1")).isEqualTo(1); assertThat(map2.get("2")).isEqualTo(2); assertThat(cache1.size()).isEqualTo(2); assertThat(cache2.size()).isEqualTo(2); map1.put("1", 3); map2.put("2", 4); Thread.sleep(50); assertThat(redisson.getKeys().getKeys()).containsOnly("test"); RedisClientConfig destinationCfg = new RedisClientConfig(); destinationCfg.setAddress(redisson.getConfig().useSingleServer().getAddress()); RedisClient client = RedisClient.create(destinationCfg); RedisConnection destinationConnection = client.connect(); List<String> channels = destinationConnection.sync(RedisCommands.PUBSUB_CHANNELS); assertThat(channels).contains("{test:suffix:}:topic"); client.shutdown(); assertThat(cache1.size()).isEqualTo(1); assertThat(cache2.size()).isEqualTo(1); redisson.shutdown(); }
public void setProperty(String name, String value) { if (value == null) { return; } Method setter = aggregationAssessor.findSetterMethod(name); if (setter == null) { addWarn("No setter for property [" + name + "] in " + objClass.getName() + "."); } else { try { setProperty(setter, value); } catch (PropertySetterException ex) { addWarn("Failed to set property [" + name + "] to value \"" + value + "\". ", ex); } } }
@Test public void testFilterReply() { // test case reproducing bug #52 setter.setProperty("filterReply", "ACCEPT"); assertEquals(FilterReply.ACCEPT, house.getFilterReply()); }
public static List<AclEntry> filterAclEntriesByAclSpec( List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException { ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec); ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); EnumMap<AclEntryScope, AclEntry> providedMask = Maps.newEnumMap(AclEntryScope.class); EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class); EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class); for (AclEntry existingEntry: existingAcl) { if (aclSpec.containsKey(existingEntry)) { scopeDirty.add(existingEntry.getScope()); if (existingEntry.getType() == MASK) { maskDirty.add(existingEntry.getScope()); } } else { if (existingEntry.getType() == MASK) { providedMask.put(existingEntry.getScope(), existingEntry); } else { aclBuilder.add(existingEntry); } } } copyDefaultsIfNeeded(aclBuilder); calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); return buildAndValidateAcl(aclBuilder); }
@Test(expected=AclException.class) public void testFilterAclEntriesByAclSpecRemoveAccessMaskRequired() throws AclException { List<AclEntry> existing = new ImmutableList.Builder<AclEntry>() .add(aclEntry(ACCESS, USER, ALL)) .add(aclEntry(ACCESS, USER, "bruce", READ)) .add(aclEntry(ACCESS, GROUP, READ)) .add(aclEntry(ACCESS, MASK, ALL)) .add(aclEntry(ACCESS, OTHER, NONE)) .build(); List<AclEntry> aclSpec = Lists.newArrayList( aclEntry(ACCESS, MASK)); filterAclEntriesByAclSpec(existing, aclSpec); }
public static <PrimaryKeyT, SecondaryKeyT, ValueT> SortValues<PrimaryKeyT, SecondaryKeyT, ValueT> create( BufferedExternalSorter.Options sorterOptions) { return new SortValues<>(sorterOptions); }
@Test public void testSecondaryKeyByteOptimization() { PCollection<KV<String, KV<byte[], Integer>>> input = p.apply( Create.of( Arrays.asList( KV.of("key1", KV.of("secondaryKey2".getBytes(StandardCharsets.UTF_8), 20)), KV.of("key2", KV.of("secondaryKey2".getBytes(StandardCharsets.UTF_8), 200)), KV.of("key1", KV.of("secondaryKey3".getBytes(StandardCharsets.UTF_8), 30)), KV.of("key1", KV.of("secondaryKey1".getBytes(StandardCharsets.UTF_8), 10)), KV.of("key2", KV.of("secondaryKey1".getBytes(StandardCharsets.UTF_8), 100))))); // Group by Key, bringing <SecondaryKey, Value> pairs for the same Key together. PCollection<KV<String, Iterable<KV<byte[], Integer>>>> grouped = input.apply(GroupByKey.create()); // For every Key, sort the iterable of <SecondaryKey, Value> pairs by SecondaryKey. PCollection<KV<String, Iterable<KV<byte[], Integer>>>> groupedAndSorted = grouped.apply(SortValues.create(BufferedExternalSorter.options())); PAssert.that(groupedAndSorted) .satisfies( new AssertThatHasExpectedContentsForTestSecondaryKeySorting<>( Arrays.asList( KV.of( "key1", Arrays.asList( KV.of("secondaryKey1".getBytes(StandardCharsets.UTF_8), 10), KV.of("secondaryKey2".getBytes(StandardCharsets.UTF_8), 20), KV.of("secondaryKey3".getBytes(StandardCharsets.UTF_8), 30))), KV.of( "key2", Arrays.asList( KV.of("secondaryKey1".getBytes(StandardCharsets.UTF_8), 100), KV.of("secondaryKey2".getBytes(StandardCharsets.UTF_8), 200)))))); p.run(); }
String getStatusReportView(String responseBody) { String statusReportView = (String) DEFAULT_GSON.fromJson(responseBody, Map.class).get("view"); if (StringUtils.isBlank(statusReportView)) { throw new RuntimeException("Status Report is blank!"); } return statusReportView; }
@Test public void shouldGetStatusReportViewFromResponseBody() { String template = new ElasticAgentExtensionConverterV4().getStatusReportView("{\"view\":\"foo\"}"); assertThat(template, is("foo")); }
@Override public List<LogicalSlot> peakSlotsToAllocate(SlotTracker slotTracker) { updateOptionsPeriodically(); List<LogicalSlot> slotsToAllocate = Lists.newArrayList(); int curNumAllocatedSmallSlots = numAllocatedSmallSlots; for (SlotContext slotContext : requiringSmallSlots.values()) { LogicalSlot slot = slotContext.getSlot(); if (curNumAllocatedSmallSlots + slot.getNumPhysicalSlots() > opts.v2().getTotalSmallSlots()) { break; } requiringQueue.remove(slotContext); slotsToAllocate.add(slot); slotContext.setAllocateAsSmallSlot(); curNumAllocatedSmallSlots += slot.getNumPhysicalSlots(); } int numAllocatedSlots = slotTracker.getNumAllocatedSlots() - numAllocatedSmallSlots; while (!requiringQueue.isEmpty()) { SlotContext slotContext = requiringQueue.peak(); if (!isGlobalSlotAvailable(numAllocatedSlots, slotContext.getSlot())) { break; } requiringQueue.poll(); slotsToAllocate.add(slotContext.getSlot()); numAllocatedSlots += slotContext.getSlot().getNumPhysicalSlots(); } return slotsToAllocate; }
@Test public void testHeadLineBlocking2() { QueryQueueOptions opts = QueryQueueOptions.createFromEnv(); SlotSelectionStrategyV2 strategy = new SlotSelectionStrategyV2(); SlotTracker slotTracker = new SlotTracker(ImmutableList.of(strategy)); LogicalSlot slot1 = generateSlot(opts.v2().getTotalSlots() / 2 + 1); LogicalSlot slot2 = generateSlot(opts.v2().getTotalSlots() / 2); LogicalSlot slot3 = generateSlot(2); // 1. Require and allocate slot1. slotTracker.requireSlot(slot1); assertThat(strategy.peakSlotsToAllocate(slotTracker)).containsExactly(slot1); slotTracker.allocateSlot(slot1); // 2. Require slot2. slotTracker.requireSlot(slot2); assertThat(strategy.peakSlotsToAllocate(slotTracker)).isEmpty(); // 3. Require enough small slots to make its priority lower. { List<LogicalSlot> smallSlots = IntStream.range(0, 10) .mapToObj(i -> generateSlot(2)) .collect(Collectors.toList()); smallSlots.forEach(slotTracker::requireSlot); for (int numPeakedSmallSlots = 0; numPeakedSmallSlots < 10; ) { List<LogicalSlot> peakSlots = strategy.peakSlotsToAllocate(slotTracker); numPeakedSmallSlots += peakSlots.size(); peakSlots.forEach(slotTracker::allocateSlot); peakSlots.forEach(slot -> assertThat(slotTracker.releaseSlot(slot.getSlotId())).isSameAs(slot)); } } // Try peak the only rest slot2, but it is blocked by slot1. assertThat(strategy.peakSlotsToAllocate(slotTracker)).isEmpty(); // 4. slot3 cannot be peaked because it is blocked by slot2. for (int i = 0; i < 10; i++) { slotTracker.requireSlot(slot3); assertThat(strategy.peakSlotsToAllocate(slotTracker)).isEmpty(); assertThat(slotTracker.releaseSlot(slot3.getSlotId())).isSameAs(slot3); } slotTracker.requireSlot(slot3); assertThat(strategy.peakSlotsToAllocate(slotTracker)).isEmpty(); // 5. slot2 and slot3 can be peaked after releasing slot1. slotTracker.releaseSlot(slot1.getSlotId()); assertThat(strategy.peakSlotsToAllocate(slotTracker)).containsExactly(slot2, slot3); }
public void validate(Map<String, NewDocumentType> documentDefinitions) { List<String> conflictingNames = documentDefinitions.keySet().stream() .filter(this::isReservedName) .toList(); if (!conflictingNames.isEmpty()) { throw new IllegalArgumentException(makeReservedNameMessage(conflictingNames)); } }
@Test void validation_is_case_insensitive() { ReservedDocumentTypeNameValidator validator = new ReservedDocumentTypeNameValidator(); Map<String, NewDocumentType> orderedDocTypes = new TreeMap<>(asDocTypeMapping(List.of("NULL", "True", "anD"))); try { validator.validate(orderedDocTypes); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().startsWith("The following document types conflict with reserved keyword names: " + "'NULL', 'True', 'anD'.")); } }
@Override public int hashCode() { StringBuilder stringBuilder = new StringBuilder(); for (Entry<String, Object> entry : getAllLocalProperties().entrySet()) { stringBuilder.append(entry.getKey()).append(entry.getValue()); } return Objects.hashCode(poolClassName, stringBuilder.toString()); }
@Test void assertSameHashCode() { assertThat(new DataSourcePoolProperties(MockedDataSource.class.getName(), createUserProperties("root")).hashCode(), is(new DataSourcePoolProperties(MockedDataSource.class.getName(), createUserProperties("root")).hashCode())); }
public static String extractZoneFromHostName(String hostName) { Matcher matcher = ZONE_RE.matcher(hostName); if (matcher.matches()) { return matcher.group(2); } return null; }
@Test public void testExtractZoneFromHostName() throws Exception { assertThat(ResolverUtils.extractZoneFromHostName("us-east-1c.myservice.net"), is(equalTo("us-east-1c"))); assertThat(ResolverUtils.extractZoneFromHostName("txt.us-east-1c.myservice.net"), is(equalTo("us-east-1c"))); }
public UniVocityFixedDataFormat setFieldLengths(int[] fieldLengths) { this.fieldLengths = fieldLengths; return this; }
@Test public void shouldConfigureSkipEmptyLines() { UniVocityFixedDataFormat dataFormat = new UniVocityFixedDataFormat() .setFieldLengths(new int[] { 1, 2, 3 }) .setSkipEmptyLines(true); assertTrue(dataFormat.getSkipEmptyLines()); assertTrue(dataFormat.createAndConfigureWriterSettings().getSkipEmptyLines()); assertTrue(dataFormat.createAndConfigureParserSettings().getSkipEmptyLines()); }
public static String getString( String key ) { try { return getBundle().getString( key ); } catch ( MissingResourceException e ) { return '!' + key + '!'; } }
@Test public void testGetString() throws Exception { // These tests are meant for the en_US locale (or equivalent) assertEquals( "Database Connection", Messages.getString( "DatabaseDialog.Shell.title" ) ); assertEquals( "!Not.A.Message!", Messages.getString( "Not.A.Message" ) ); // 1 param assertEquals( "MyParam: JDBC options help", Messages.getString( "DatabaseDialog.JDBCOptions.Tab", "MyParam" ) ); assertEquals( "!Not.A.Message!", Messages.getString( "Not.A.Message", "Unused1" ) ); assertEquals( "!null!", Messages.getString( null, "Unused1" ) ); // 2 params assertEquals( "MyParam: JDBC options help", Messages.getString( "DatabaseDialog.JDBCOptions.Tab", "MyParam", "Unused" ) ); assertEquals( "!Not.A.Message!", Messages.getString( "Not.A.Message", "Unused1", "Unused2" ) ); assertEquals( "!null!", Messages.getString( null, null, null ) ); // 3 params assertEquals( "MyParam: JDBC options help", Messages.getString( "DatabaseDialog.JDBCOptions.Tab", "MyParam", "Unused2", "Unused3" ) ); assertEquals( "!Not.A.Message!", Messages.getString( "Not.A.Message", "Unused1", "Unused2", "Unused3" ) ); assertEquals( "!null!", Messages.getString( null, null, null, null ) ); // 4 params assertEquals( "MyParam: JDBC options help", Messages.getString( "DatabaseDialog.JDBCOptions.Tab", "MyParam", "Unused2", "Unused3", "Unused4" ) ); assertEquals( "!Not.A.Message!", Messages.getString( "Not.A.Message", "Unused1", "Unused2", "Unused3", "Unused4" ) ); assertEquals( "!null!", Messages.getString( null, null, null, null, null ) ); }
@Override public Column convert(BasicTypeDefine typeDefine) { PhysicalColumn.PhysicalColumnBuilder builder = PhysicalColumn.builder() .name(typeDefine.getName()) .nullable(typeDefine.isNullable()) .defaultValue(typeDefine.getDefaultValue()) .comment(typeDefine.getComment()); String sqlServerType = typeDefine.getDataType().toUpperCase(); switch (sqlServerType) { case SQLSERVER_BIT: builder.sourceType(SQLSERVER_BIT); builder.dataType(BasicType.BOOLEAN_TYPE); break; case SQLSERVER_TINYINT: case SQLSERVER_TINYINT_IDENTITY: builder.sourceType(SQLSERVER_TINYINT); builder.dataType(BasicType.SHORT_TYPE); break; case SQLSERVER_SMALLINT: case SQLSERVER_SMALLINT_IDENTITY: builder.sourceType(SQLSERVER_SMALLINT); builder.dataType(BasicType.SHORT_TYPE); break; case SQLSERVER_INTEGER: case SQLSERVER_INTEGER_IDENTITY: case SQLSERVER_INT: case SQLSERVER_INT_IDENTITY: builder.sourceType(SQLSERVER_INT); builder.dataType(BasicType.INT_TYPE); break; case SQLSERVER_BIGINT: case SQLSERVER_BIGINT_IDENTITY: builder.sourceType(SQLSERVER_BIGINT); builder.dataType(BasicType.LONG_TYPE); break; case SQLSERVER_REAL: builder.sourceType(SQLSERVER_REAL); builder.dataType(BasicType.FLOAT_TYPE); break; case SQLSERVER_FLOAT: if (typeDefine.getPrecision() != null && typeDefine.getPrecision() <= 24) { builder.sourceType(SQLSERVER_REAL); builder.dataType(BasicType.FLOAT_TYPE); } else { builder.sourceType(SQLSERVER_FLOAT); builder.dataType(BasicType.DOUBLE_TYPE); } break; case SQLSERVER_DECIMAL: case SQLSERVER_NUMERIC: builder.sourceType( String.format( "%s(%s,%s)", SQLSERVER_DECIMAL, typeDefine.getPrecision(), typeDefine.getScale())); builder.dataType( new DecimalType( typeDefine.getPrecision().intValue(), typeDefine.getScale())); builder.columnLength(typeDefine.getPrecision()); builder.scale(typeDefine.getScale()); break; case SQLSERVER_MONEY: builder.sourceType(SQLSERVER_MONEY); builder.dataType( new DecimalType( typeDefine.getPrecision().intValue(), typeDefine.getScale())); builder.columnLength(typeDefine.getPrecision()); builder.scale(typeDefine.getScale()); break; case SQLSERVER_SMALLMONEY: builder.sourceType(SQLSERVER_SMALLMONEY); builder.dataType( new DecimalType( typeDefine.getPrecision().intValue(), typeDefine.getScale())); builder.columnLength(typeDefine.getPrecision()); builder.scale(typeDefine.getScale()); break; case SQLSERVER_CHAR: builder.sourceType(String.format("%s(%s)", SQLSERVER_CHAR, typeDefine.getLength())); builder.dataType(BasicType.STRING_TYPE); builder.columnLength( TypeDefineUtils.doubleByteTo4ByteLength(typeDefine.getLength())); break; case SQLSERVER_NCHAR: builder.sourceType( String.format("%s(%s)", SQLSERVER_NCHAR, typeDefine.getLength())); builder.dataType(BasicType.STRING_TYPE); builder.columnLength( TypeDefineUtils.doubleByteTo4ByteLength(typeDefine.getLength())); break; case SQLSERVER_VARCHAR: if (typeDefine.getLength() == -1) { builder.sourceType(MAX_VARCHAR); builder.columnLength(TypeDefineUtils.doubleByteTo4ByteLength(POWER_2_31 - 1)); } else { builder.sourceType( String.format("%s(%s)", SQLSERVER_VARCHAR, typeDefine.getLength())); builder.columnLength( TypeDefineUtils.doubleByteTo4ByteLength(typeDefine.getLength())); } builder.dataType(BasicType.STRING_TYPE); break; case SQLSERVER_NVARCHAR: if (typeDefine.getLength() == -1) { builder.sourceType(MAX_NVARCHAR); builder.columnLength(TypeDefineUtils.doubleByteTo4ByteLength(POWER_2_31 - 1)); } else { builder.sourceType( String.format("%s(%s)", SQLSERVER_NVARCHAR, typeDefine.getLength())); builder.columnLength( TypeDefineUtils.doubleByteTo4ByteLength(typeDefine.getLength())); } builder.dataType(BasicType.STRING_TYPE); break; case SQLSERVER_TEXT: builder.sourceType(SQLSERVER_TEXT); builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_31 - 1); break; case SQLSERVER_NTEXT: builder.sourceType(SQLSERVER_NTEXT); builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_30 - 1); break; case SQLSERVER_XML: builder.sourceType(SQLSERVER_XML); builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_31 - 1); break; case SQLSERVER_UNIQUEIDENTIFIER: builder.sourceType(SQLSERVER_UNIQUEIDENTIFIER); builder.dataType(BasicType.STRING_TYPE); builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength())); break; case SQLSERVER_SQLVARIANT: builder.sourceType(SQLSERVER_SQLVARIANT); builder.dataType(BasicType.STRING_TYPE); builder.columnLength(typeDefine.getLength()); break; case SQLSERVER_BINARY: builder.sourceType( String.format("%s(%s)", SQLSERVER_BINARY, typeDefine.getLength())); builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(typeDefine.getLength()); break; case SQLSERVER_VARBINARY: if (typeDefine.getLength() == -1) { builder.sourceType(MAX_VARBINARY); builder.columnLength(POWER_2_31 - 1); } else { builder.sourceType( String.format("%s(%s)", SQLSERVER_VARBINARY, typeDefine.getLength())); builder.columnLength(typeDefine.getLength()); } builder.dataType(PrimitiveByteArrayType.INSTANCE); break; case SQLSERVER_IMAGE: builder.sourceType(SQLSERVER_IMAGE); builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(POWER_2_31 - 1); break; case SQLSERVER_TIMESTAMP: builder.sourceType(SQLSERVER_TIMESTAMP); builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(8L); break; case SQLSERVER_DATE: builder.sourceType(SQLSERVER_DATE); builder.dataType(LocalTimeType.LOCAL_DATE_TYPE); break; case SQLSERVER_TIME: builder.sourceType(String.format("%s(%s)", SQLSERVER_TIME, typeDefine.getScale())); builder.dataType(LocalTimeType.LOCAL_TIME_TYPE); builder.scale(typeDefine.getScale()); break; case SQLSERVER_DATETIME: builder.sourceType(SQLSERVER_DATETIME); builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); builder.scale(3); break; case SQLSERVER_DATETIME2: builder.sourceType( String.format("%s(%s)", SQLSERVER_DATETIME2, typeDefine.getScale())); builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); builder.scale(typeDefine.getScale()); break; case SQLSERVER_DATETIMEOFFSET: builder.sourceType( String.format("%s(%s)", SQLSERVER_DATETIMEOFFSET, typeDefine.getScale())); builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); builder.scale(typeDefine.getScale()); break; case SQLSERVER_SMALLDATETIME: builder.sourceType(SQLSERVER_SMALLDATETIME); builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); break; default: throw CommonError.convertToSeaTunnelTypeError( DatabaseIdentifier.SQLSERVER, sqlServerType, typeDefine.getName()); } return builder.build(); }
@Test public void testConvertTinyint() { BasicTypeDefine<Object> typeDefine = BasicTypeDefine.builder() .name("test") .columnType("tinyint") .dataType("tinyint") .build(); Column column = SqlServerTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(BasicType.SHORT_TYPE, column.getDataType()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType().toLowerCase()); }
public final void containsEntry(@Nullable Object key, @Nullable Object value) { // TODO(kak): Can we share any of this logic w/ MapSubject.containsEntry()? checkNotNull(actual); if (!actual.containsEntry(key, value)) { Map.Entry<@Nullable Object, @Nullable Object> entry = immutableEntry(key, value); ImmutableList<Map.Entry<@Nullable Object, @Nullable Object>> entryList = ImmutableList.of(entry); // TODO(cpovirk): If the key is present but not with the right value, we could fail using // something like valuesForKey(key).contains(value). Consider whether this is worthwhile. if (hasMatchingToStringPair(actual.entries(), entryList)) { failWithoutActual( fact("expected to contain entry", entry), fact("an instance of", objectToTypeName(entry)), simpleFact("but did not"), fact( "though it did contain", countDuplicatesAndAddTypeInfo( retainMatchingToString(actual.entries(), /* itemsToCheck = */ entryList))), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else if (actual.containsKey(key)) { failWithoutActual( fact("expected to contain entry", entry), simpleFact("but did not"), fact("though it did contain values with that key", actual.asMap().get(key)), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else if (actual.containsValue(value)) { Set<@Nullable Object> keys = new LinkedHashSet<>(); for (Map.Entry<?, ?> actualEntry : actual.entries()) { if (Objects.equal(actualEntry.getValue(), value)) { keys.add(actualEntry.getKey()); } } failWithoutActual( fact("expected to contain entry", entry), simpleFact("but did not"), fact("though it did contain keys with that value", keys), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else { failWithActual("expected to contain entry", immutableEntry(key, value)); } } }
@Test public void failContainsEntry() { ImmutableMultimap<String, String> actual = ImmutableMultimap.of("a", "A"); expectFailureWhenTestingThat(actual).containsEntry("b", "B"); assertFailureKeys("expected to contain entry", "but was"); assertFailureValue("expected to contain entry", "b=B"); assertFailureValue("but was", "{a=[A]}"); }
public static Index withRelations(String name) { return new Index(name, true); }
@Test @UseDataProvider("nullOrEmpty") public void withRelations_index_constructor_fails_with_IAE_if_index_name_is_null_or_empty(String nullOrEmpty) { assertThatThrownBy(() -> Index.withRelations(nullOrEmpty)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Index name can't be null nor empty"); }
@Override public AlterReplicaLogDirsResult alterReplicaLogDirs(Map<TopicPartitionReplica, String> replicaAssignment, final AlterReplicaLogDirsOptions options) { final Map<TopicPartitionReplica, KafkaFutureImpl<Void>> futures = new HashMap<>(replicaAssignment.size()); for (TopicPartitionReplica replica : replicaAssignment.keySet()) futures.put(replica, new KafkaFutureImpl<>()); Map<Integer, AlterReplicaLogDirsRequestData> replicaAssignmentByBroker = new HashMap<>(); for (Map.Entry<TopicPartitionReplica, String> entry: replicaAssignment.entrySet()) { TopicPartitionReplica replica = entry.getKey(); String logDir = entry.getValue(); int brokerId = replica.brokerId(); AlterReplicaLogDirsRequestData value = replicaAssignmentByBroker.computeIfAbsent(brokerId, key -> new AlterReplicaLogDirsRequestData()); AlterReplicaLogDir alterReplicaLogDir = value.dirs().find(logDir); if (alterReplicaLogDir == null) { alterReplicaLogDir = new AlterReplicaLogDir(); alterReplicaLogDir.setPath(logDir); value.dirs().add(alterReplicaLogDir); } AlterReplicaLogDirTopic alterReplicaLogDirTopic = alterReplicaLogDir.topics().find(replica.topic()); if (alterReplicaLogDirTopic == null) { alterReplicaLogDirTopic = new AlterReplicaLogDirTopic().setName(replica.topic()); alterReplicaLogDir.topics().add(alterReplicaLogDirTopic); } alterReplicaLogDirTopic.partitions().add(replica.partition()); } final long now = time.milliseconds(); for (Map.Entry<Integer, AlterReplicaLogDirsRequestData> entry: replicaAssignmentByBroker.entrySet()) { final int brokerId = entry.getKey(); final AlterReplicaLogDirsRequestData assignment = entry.getValue(); runnable.call(new Call("alterReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) { @Override public AlterReplicaLogDirsRequest.Builder createRequest(int timeoutMs) { return new AlterReplicaLogDirsRequest.Builder(assignment); } @Override public void handleResponse(AbstractResponse abstractResponse) { AlterReplicaLogDirsResponse response = (AlterReplicaLogDirsResponse) abstractResponse; for (AlterReplicaLogDirTopicResult topicResult: response.data().results()) { for (AlterReplicaLogDirPartitionResult partitionResult: topicResult.partitions()) { TopicPartitionReplica replica = new TopicPartitionReplica( topicResult.topicName(), partitionResult.partitionIndex(), brokerId); KafkaFutureImpl<Void> future = futures.get(replica); if (future == null) { log.warn("The partition {} in the response from broker {} is not in the request", new TopicPartition(topicResult.topicName(), partitionResult.partitionIndex()), brokerId); } else if (partitionResult.errorCode() == Errors.NONE.code()) { future.complete(null); } else { future.completeExceptionally(Errors.forCode(partitionResult.errorCode()).exception()); } } } // The server should send back a response for every replica. But do a sanity check anyway. completeUnrealizedFutures( futures.entrySet().stream().filter(entry -> entry.getKey().brokerId() == brokerId), replica -> "The response from broker " + brokerId + " did not contain a result for replica " + replica); } @Override void handleFailure(Throwable throwable) { // Only completes the futures of brokerId completeAllExceptionally( futures.entrySet().stream() .filter(entry -> entry.getKey().brokerId() == brokerId) .map(Map.Entry::getValue), throwable); } }, now); } return new AlterReplicaLogDirsResult(new HashMap<>(futures)); }
@Test public void testAlterReplicaLogDirsSuccess() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { createAlterLogDirsResponse(env, env.cluster().nodeById(0), Errors.NONE, 0); createAlterLogDirsResponse(env, env.cluster().nodeById(1), Errors.NONE, 0); TopicPartitionReplica tpr0 = new TopicPartitionReplica("topic", 0, 0); TopicPartitionReplica tpr1 = new TopicPartitionReplica("topic", 0, 1); Map<TopicPartitionReplica, String> logDirs = new HashMap<>(); logDirs.put(tpr0, "/data0"); logDirs.put(tpr1, "/data1"); AlterReplicaLogDirsResult result = env.adminClient().alterReplicaLogDirs(logDirs); assertNull(result.values().get(tpr0).get()); assertNull(result.values().get(tpr1).get()); } }
RegistryEndpointProvider<Void> committer(URL location) { return new Committer(location); }
@Test public void testCommitter_getContent() { Assert.assertNull(testBlobPusher.committer(mockUrl).getContent()); }
public Variable evalJs(String js) { try { return new Variable(JS.eval(js)); } catch (Exception e) { KarateException ke = JsEngine.fromJsEvalException(js, e, null); setFailedReason(ke); throw ke; } }
@Test void testEmbeddedList() { engine.evalJs("var foo = 3"); matchEval("[1, 2, '#(foo)']", "[1, 2, 3]"); engine.evalJs("var foo = [3, 4]"); matchEval("[1, 2, '#(foo)']", "[1, 2, [3, 4]]"); engine.evalJs("var foo = null"); matchEval("[1, 2, '#(foo)']", "[1, 2, null]"); matchEval("[1, 2, '##(foo)']", "[1, 2]"); matchEval("[1, '##(foo)', 3]", "[1, 3]"); engine.evalJs("var bar = null"); matchEval("['##(foo)', 2, '##(bar)']", "[2]"); }
@Override public long sleepTime(final long attempt) { checkArgument(attempt >= 0, "attempt must not be negative (%s)", attempt); final long exponentialSleepTime = initialWait * Math.round(Math.pow(2, attempt)); return exponentialSleepTime >= 0 && exponentialSleepTime < maxWait ? exponentialSleepTime : maxWait; }
@Test void testExponentialGrowth() { final ExponentialWaitStrategy exponentialWaitStrategy = new ExponentialWaitStrategy(1, 1000); assertThat(exponentialWaitStrategy.sleepTime(3) / exponentialWaitStrategy.sleepTime(2)) .isEqualTo(2L); }
@SuppressWarnings("StringSplitter") public static CaffeineSpec parse(String specification) { CaffeineSpec spec = new CaffeineSpec(specification); for (String option : specification.split(SPLIT_OPTIONS)) { spec.parseOption(option.trim()); } return spec; }
@Test public void parse_exception() { assertThrows(IllegalArgumentException.class, () -> CaffeineSpec.parse("=")); assertThrows(IllegalArgumentException.class, () -> CaffeineSpec.parse("==")); assertThrows(IllegalArgumentException.class, () -> CaffeineSpec.parse("key=")); assertThrows(IllegalArgumentException.class, () -> CaffeineSpec.parse("=value")); assertThrows(IllegalArgumentException.class, () -> CaffeineSpec.parse("key=value=")); }