focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public FileObject[] findJarFiles() throws KettleFileException { return findJarFiles( searchLibDir ); }
@Test public void testFindJarFiles_DirWithJarInNameNotAddedAndTxtFileNotAdded() throws IOException, KettleFileException { Files.createDirectories( PATH_TO_DIR_WITH_JAR_IN_NAME ); Files.createFile( PATH_TO_NOT_JAR_FILE ); FileObject[] findJarFiles = plFolder.findJarFiles(); assertNotNull( findJarFiles ); assertEquals( 0, findJarFiles.length ); }
@Override public void clearIgnoringIndexes() { size = 0; }
@Test public void testClearIgnoringIndexes() { PriorityQueue<TestElement> queue = new DefaultPriorityQueue<TestElement>(TestElementComparator.INSTANCE, 0); assertEmptyQueue(queue); TestElement a = new TestElement(5); TestElement b = new TestElement(10); TestElement c = new TestElement(2); TestElement d = new TestElement(6); TestElement e = new TestElement(11); assertOffer(queue, a); assertOffer(queue, b); assertOffer(queue, c); assertOffer(queue, d); queue.clearIgnoringIndexes(); assertEmptyQueue(queue); // Elements cannot be re-inserted but new ones can. try { queue.offer(a); fail(); } catch (IllegalArgumentException t) { // expected } assertOffer(queue, e); assertSame(e, queue.peek()); }
public void send(NetworkSend send) { String connectionId = send.destinationId(); KafkaChannel channel = openOrClosingChannelOrFail(connectionId); if (closingChannels.containsKey(connectionId)) { // ensure notification via `disconnected`, leave channel in the state in which closing was triggered this.failedSends.add(connectionId); } else { try { channel.setSend(send); } catch (Exception e) { // update the state for consistency, the channel will be discarded after `close` channel.state(ChannelState.FAILED_SEND); // ensure notification via `disconnected` when `failedSends` are processed in the next poll this.failedSends.add(connectionId); close(channel, CloseMode.DISCARD_NO_NOTIFY); if (!(e instanceof CancelledKeyException)) { log.error("Unexpected exception during send, closing connection {} and rethrowing exception.", connectionId, e); throw e; } } } }
@Test public void testSendWithoutConnecting() { assertThrows(IllegalStateException.class, () -> selector.send(createSend("0", "test"))); }
@Override public String toString() { StringBuilder builder = new StringBuilder("AfterProcessingTime.pastFirstElementInPane()"); for (SerializableFunction<Instant, Instant> delayFn : timestampMappers) { builder.append(".plusDelayOf(").append(delayFn).append(")"); } return builder.toString(); }
@Test public void testToString() { TriggerStateMachine trigger = AfterProcessingTimeStateMachine.pastFirstElementInPane(); assertEquals("AfterProcessingTime.pastFirstElementInPane()", trigger.toString()); }
protected GelfMessage toGELFMessage(final Message message) { final DateTime timestamp; final Object fieldTimeStamp = message.getField(Message.FIELD_TIMESTAMP); if (fieldTimeStamp instanceof DateTime) { timestamp = (DateTime) fieldTimeStamp; } else { timestamp = Tools.nowUTC(); } final GelfMessageLevel messageLevel = extractLevel(message.getField(Message.FIELD_LEVEL)); final String fullMessage = (String) message.getField(Message.FIELD_FULL_MESSAGE); final String forwarder = GelfOutput.class.getCanonicalName(); final GelfMessageBuilder builder = new GelfMessageBuilder(message.getMessage(), message.getSource()) .timestamp(timestamp.getMillis() / 1000.0d) .additionalField("_forwarder", forwarder) .additionalFields(message.getFields()); if (messageLevel != null) { builder.level(messageLevel); } if (fullMessage != null) { builder.fullMessage(fullMessage); } return builder.build(); }
@Test public void testToGELFMessageWithValidStringLevel() throws Exception { final GelfTransport transport = mock(GelfTransport.class); final GelfOutput gelfOutput = new GelfOutput(transport); final DateTime now = DateTime.now(DateTimeZone.UTC); final Message message = messageFactory.createMessage("Test", "Source", now); message.addField("level", "6"); final GelfMessage gelfMessage = gelfOutput.toGELFMessage(message); assertEquals(GelfMessageLevel.INFO, gelfMessage.getLevel()); }
public static CopyFilter getCopyFilter(Configuration conf) { String filtersClassName = conf .get(DistCpConstants.CONF_LABEL_FILTERS_CLASS); if (filtersClassName != null) { try { Class<? extends CopyFilter> filtersClass = conf .getClassByName(filtersClassName) .asSubclass(CopyFilter.class); filtersClassName = filtersClass.getName(); Constructor<? extends CopyFilter> constructor = filtersClass .getDeclaredConstructor(Configuration.class); return constructor.newInstance(conf); } catch (Exception e) { LOG.error(DistCpConstants.CLASS_INSTANTIATION_ERROR_MSG + filtersClassName, e); throw new RuntimeException( DistCpConstants.CLASS_INSTANTIATION_ERROR_MSG + filtersClassName, e); } } else { return getDefaultCopyFilter(conf); } }
@Test public void testGetCopyFilterRegexCopyFilter() { Configuration configuration = new Configuration(false); configuration.set(DistCpConstants.CONF_LABEL_FILTERS_FILE, "random"); CopyFilter copyFilter = CopyFilter.getCopyFilter(configuration); assertTrue("copyFilter should be instance of RegexCopyFilter", copyFilter instanceof RegexCopyFilter); }
@Override public TTableDescriptor toThrift(List<DescriptorTable.ReferencedPartitionInfo> partitions) { TFileTable tFileTable = new TFileTable(); tFileTable.setLocation(getTableLocation()); List<TColumn> tColumns = Lists.newArrayList(); for (Column column : getBaseSchema()) { tColumns.add(column.toThrift()); } tFileTable.setColumns(tColumns); TTableDescriptor tTableDescriptor = new TTableDescriptor(id, TTableType.FILE_TABLE, fullSchema.size(), 0, "", ""); tTableDescriptor.setFileTable(tFileTable); HiveStorageFormat storageFormat = HiveStorageFormat.get(fileProperties.get(JSON_KEY_FORMAT)); tFileTable.setSerde_lib(storageFormat.getSerde()); tFileTable.setInput_format(storageFormat.getInputFormat()); String columnNames = fullSchema.stream().map(Column::getName).collect(Collectors.joining(",")); //when create table with string type, sr will change string to varchar(65533) in parser, but hive need string. // we have no choice but to transfer varchar(65533) into string explicitly in external table for avro/rcfile/sequence String columnTypes = fullSchema.stream().map(Column::getType).map(ColumnTypeConverter::toHiveType) .map(type -> type.replace("varchar(65533)", "string")) .collect(Collectors.joining("#")); tFileTable.setHive_column_names(columnNames); tFileTable.setHive_column_types(columnTypes); tFileTable.setTime_zone(TimeUtils.getSessionTimeZone()); return tTableDescriptor; }
@Test public void testCreateTextExternalTableFormat() throws Exception { String createTableSql = "create external table if not exists db.file_tbl (col1 int, col2 int, col3 string) engine=file properties " + "(\"path\"=\"hdfs://127.0.0.1:10000/hive/\", \"format\"=\"avro\")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createTableSql, connectContext); com.starrocks.catalog.Table table = createTable(createTableStmt); Assert.assertTrue(table instanceof FileTable); FileTable fileTable = (FileTable) table; List<DescriptorTable.ReferencedPartitionInfo> partitions = new ArrayList<>(); TTableDescriptor tTableDescriptor = fileTable.toThrift(partitions); Assert.assertEquals(tTableDescriptor.getFileTable().getInput_format(), HiveStorageFormat.get("avro").getInputFormat()); Assert.assertEquals(tTableDescriptor.getFileTable().getSerde_lib(), HiveStorageFormat.get("avro").getSerde()); Assert.assertEquals(tTableDescriptor.getFileTable().getHive_column_names(), "col1,col2,col3"); Assert.assertEquals(tTableDescriptor.getFileTable().getHive_column_types(), "int#int#string"); }
@Override public ExecuteContext doBefore(ExecuteContext context) { if (RegisterContext.INSTANCE.isAvailable() && !RegisterDynamicConfig.INSTANCE.isNeedCloseOriginRegisterCenter()) { return context; } final Object target = context.getObject(); context.skip(isWebfLux(target) ? Flux.fromIterable(Collections.emptyList()) : Collections.emptyList()); return context; }
@Test public void doBefore() throws NoSuchMethodException { // Normal scenarios where isAvailable is true RegisterContext.INSTANCE.setAvailable(true); REGISTER_CONFIG.setEnableSpringRegister(true); REGISTER_CONFIG.setOpenMigration(true); final ExecuteContext context = interceptor.doAfter(buildContext(client, null, originServices)); Assert.assertTrue(context.getResult() instanceof List); Assert.assertEquals(((List<?>) context.getResult()).size(), originServices.size() + services.size()); // IsWebfLux scenario where isAvailable is true final ExecuteContext fluxContext = interceptor.doAfter( buildContext(reactiveCompositeDiscoveryClient, null, Flux.fromIterable(originServices))); Assert.assertTrue(fluxContext.getResult() instanceof Flux); final List<?> block = ((Flux<?>) fluxContext.getResult()).collectList().block(); Assert.assertNotNull(block); Assert.assertEquals(block.size(), originServices.size() + services.size()); // A normal scenario where isAvailable is false RegisterContext.INSTANCE.setAvailable(false); final ExecuteContext NotAvailableContext = interceptor.doBefore( buildContext(client, null)); Assert.assertTrue(NotAvailableContext.isSkip()); REGISTER_CONFIG.setEnableSpringRegister(false); REGISTER_CONFIG.setOpenMigration(false); }
@Override public String getMethod() { return PATH; }
@Test public void testAnswerWebAppQueryWithAllSet() { AnswerWebAppQuery answerWebAppQuery = AnswerWebAppQuery .builder() .webAppQueryId("123456789") .queryResult(InlineQueryResultArticle .builder() .id("MyId") .title("Text") .inputMessageContent(InputTextMessageContent .builder() .messageText("My own text") .build()) .build()) .build(); assertEquals("answerWebAppQuery", answerWebAppQuery.getMethod()); assertDoesNotThrow(answerWebAppQuery::validate); }
public IterableSubject factKeys() { if (!(actual instanceof ErrorWithFacts)) { failWithActual(simpleFact("expected a failure thrown by Truth's failure API")); return ignoreCheck().that(ImmutableList.of()); } ErrorWithFacts error = (ErrorWithFacts) actual; return check("factKeys()").that(getFactKeys(error)); }
@Test public void factKeys() { assertThat(fact("foo", "the foo")).factKeys().containsExactly("foo"); }
@Override public void write(InputT element, Context context) throws IOException, InterruptedException { while (bufferedRequestEntries.size() >= maxBufferedRequests) { flush(); } addEntryToBuffer(elementConverter.apply(element, context), false); nonBlockingFlush(); }
@Test public void recordsWrittenToTheSinkMustBeSmallerOrEqualToMaxRecordSizeInBytes() { AsyncSinkWriterImpl sink = new AsyncSinkWriterImplBuilder() .context(sinkInitContext) .maxBatchSize(3) .maxBufferedRequests(11) .maxBatchSizeInBytes(10_000) .maxRecordSizeInBytes(3) .build(); assertThatThrownBy(() -> sink.write("3")) .isInstanceOf(IllegalArgumentException.class) .hasMessage( "The request entry sent to the buffer was of size [4], when " + "the maxRecordSizeInBytes was set to [3]."); }
public void dispatch(JibEvent jibEvent) { if (handlers.isEmpty()) { return; } handlers.get(JibEvent.class).forEach(handler -> handler.handle(jibEvent)); handlers.get(jibEvent.getClass()).forEach(handler -> handler.handle(jibEvent)); }
@Test public void testDispatch() { List<String> emissions = new ArrayList<>(); EventHandlers eventHandlers = EventHandlers.builder() .add(TestJibEvent2.class, testJibEvent2 -> emissions.add("handled 2 first")) .add(TestJibEvent2.class, testJibEvent2 -> emissions.add("handled 2 second")) .add(TestJibEvent3.class, testJibEvent3 -> emissions.add("handled 3")) .add(JibEvent.class, jibEvent -> emissions.add("handled generic")) .build(); TestJibEvent2 testJibEvent2 = new TestJibEvent2(); TestJibEvent3 testJibEvent3 = new TestJibEvent3(); eventHandlers.dispatch(testJibEvent2); eventHandlers.dispatch(testJibEvent3); Assert.assertEquals( Arrays.asList( "handled generic", "handled 2 first", "handled 2 second", "handled generic", "handled 3"), emissions); }
public boolean contains(String tag) { return tags.contains(tag); }
@Test public void testContains() { Tags tags = new Tags(Set.of("a", "tag2", "3")); assertTrue(tags.contains("a")); assertTrue(tags.contains("tag2")); assertTrue(tags.contains("3")); assertFalse(tags.contains("other")); Tags subTags = new Tags(Set.of("a", "3")); assertTrue(tags.containsAll(subTags)); assertFalse(subTags.containsAll(tags)); }
public ClusterSerdes init(Environment env, ClustersProperties clustersProperties, int clusterIndex) { ClustersProperties.Cluster clusterProperties = clustersProperties.getClusters().get(clusterIndex); log.debug("Configuring serdes for cluster {}", clusterProperties.getName()); var globalPropertiesResolver = new PropertyResolverImpl(env); var clusterPropertiesResolver = new PropertyResolverImpl(env, "kafka.clusters." + clusterIndex); Map<String, SerdeInstance> registeredSerdes = new LinkedHashMap<>(); // initializing serdes from config if (clusterProperties.getSerde() != null) { for (int i = 0; i < clusterProperties.getSerde().size(); i++) { SerdeConfig serdeConfig = clusterProperties.getSerde().get(i); if (Strings.isNullOrEmpty(serdeConfig.getName())) { throw new ValidationException("'name' property not set for serde: " + serdeConfig); } if (registeredSerdes.containsKey(serdeConfig.getName())) { throw new ValidationException("Multiple serdes with same name: " + serdeConfig.getName()); } var instance = createSerdeFromConfig( serdeConfig, new PropertyResolverImpl(env, "kafka.clusters." + clusterIndex + ".serde." + i + ".properties"), clusterPropertiesResolver, globalPropertiesResolver ); registeredSerdes.put(serdeConfig.getName(), instance); } } // initializing remaining built-in serdes with empty selection patters builtInSerdeClasses.forEach((name, clazz) -> { if (!registeredSerdes.containsKey(name)) { BuiltInSerde serde = createSerdeInstance(clazz); if (autoConfigureSerde(serde, clusterPropertiesResolver, globalPropertiesResolver)) { registeredSerdes.put(name, new SerdeInstance(name, serde, null, null, null)); } } }); registerTopicRelatedSerde(registeredSerdes); return new ClusterSerdes( registeredSerdes, Optional.ofNullable(clusterProperties.getDefaultKeySerde()) .map(name -> Preconditions.checkNotNull(registeredSerdes.get(name), "Default key serde not found")) .orElse(null), Optional.ofNullable(clusterProperties.getDefaultValueSerde()) .map(name -> Preconditions.checkNotNull(registeredSerdes.get(name), "Default value serde not found")) .or(() -> Optional.ofNullable(registeredSerdes.get(SchemaRegistrySerde.name()))) .or(() -> Optional.ofNullable(registeredSerdes.get(ProtobufFileSerde.name()))) .orElse(null), createFallbackSerde() ); }
@Test void serdeWithBuiltInNameAndSetPropertiesAreExplicitlyConfigured() { ClustersProperties.SerdeConfig serdeConfig = new ClustersProperties.SerdeConfig(); serdeConfig.setName("BuiltIn1"); serdeConfig.setProperties(Map.of("any", "property")); serdeConfig.setTopicKeysPattern("keys"); serdeConfig.setTopicValuesPattern("vals"); var serdes = init(serdeConfig); SerdeInstance explicitlyConfiguredSerde = serdes.serdes.get("BuiltIn1"); verifyExplicitlyConfigured(explicitlyConfiguredSerde); verifyPatternsMatch(serdeConfig, explicitlyConfiguredSerde); }
public static <T> AvroSchema<T> of(SchemaDefinition<T> schemaDefinition) { if (schemaDefinition.getSchemaReaderOpt().isPresent() && schemaDefinition.getSchemaWriterOpt().isPresent()) { return new AvroSchema<>(schemaDefinition.getSchemaReaderOpt().get(), schemaDefinition.getSchemaWriterOpt().get(), parseSchemaInfo(schemaDefinition, SchemaType.AVRO)); } ClassLoader pojoClassLoader = null; if (schemaDefinition.getClassLoader() != null) { pojoClassLoader = schemaDefinition.getClassLoader(); } else if (schemaDefinition.getPojo() != null) { pojoClassLoader = schemaDefinition.getPojo().getClassLoader(); } return new AvroSchema<>(parseSchemaInfo(schemaDefinition, SchemaType.AVRO), pojoClassLoader); }
@Test public void testDecodeByteBuf() { AvroSchema<Foo> avroSchema = AvroSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).build()); Foo foo1 = new Foo(); foo1.setField1("foo1"); foo1.setField2("bar1"); foo1.setField4(new Bar()); foo1.setFieldUnableNull("notNull"); Foo foo2 = new Foo(); foo2.setField1("foo2"); foo2.setField2("bar2"); byte[] bytes1 = avroSchema.encode(foo1); ByteBuf byteBuf = ByteBufAllocator.DEFAULT.buffer(bytes1.length); byteBuf.writeBytes(bytes1); Foo object1 = avroSchema.decode(byteBuf); Assert.assertTrue(bytes1.length > 0); assertEquals(object1, foo1); }
public static boolean isBasicInfoChanged(Member actual, Member expected) { if (null == expected) { return null != actual; } if (!expected.getIp().equals(actual.getIp())) { return true; } if (expected.getPort() != actual.getPort()) { return true; } if (!expected.getAddress().equals(actual.getAddress())) { return true; } if (!expected.getState().equals(actual.getState())) { return true; } // if change if (expected.isGrpcReportEnabled() != actual.isGrpcReportEnabled()) { return true; } return isBasicInfoChangedInExtendInfo(expected, actual); }
@Test void testIsBasicInfoChangedForIp() { Member newMember = buildMember(); newMember.setIp("1.1.1.2"); assertTrue(MemberUtil.isBasicInfoChanged(newMember, originalMember)); }
public Object evaluate( final GenericRow row, final Object defaultValue, final ProcessingLogger logger, final Supplier<String> errorMsg ) { try { return expressionEvaluator.evaluate(new Object[]{ spec.resolveArguments(row), defaultValue, logger, row }); } catch (final Exception e) { final Throwable cause = e instanceof InvocationTargetException ? e.getCause() : e; logger.error(RecordProcessingError.recordProcessingError(errorMsg.get(), cause, row)); return defaultValue; } }
@Test public void shouldEvaluateExpressionWithValueColumnSpecs() throws Exception { // Given: spec.addParameter( ColumnName.of("foo1"), Integer.class, 0 ); spec.addParameter( ColumnName.of("foo2"), Integer.class, 1 ); compiledExpression = new CompiledExpression( expressionEvaluator, spec.build(), EXPRESSION_TYPE, expression ); // When: final Object result = compiledExpression .evaluate(genericRow(123, 456), DEFAULT_VAL, processingLogger, errorMsgSupplier); // Then: assertThat(result, equalTo(RETURN_VALUE)); final Map<String, Object> arguments = new HashMap<>(); arguments.put("var0", 123); arguments.put("var1", 456); verify(expressionEvaluator).evaluate(new Object[]{arguments, DEFAULT_VAL, processingLogger, genericRow(123, 456)}); }
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { long size = request.getContentLengthLong(); if (size > maxSize || isChunked(request)) { // Size it's either unknown or too large HttpServletResponse httpResponse = (HttpServletResponse) response; httpResponse.sendError(HttpServletResponse.SC_BAD_REQUEST, "Bad Request"); } else { chain.doFilter(request, response); } }
@Test public void testDoFilterSendError() throws ServletException, IOException { MaxRequestSizeFilter maxRequestSizeFilter = new MaxRequestSizeFilter(MAX_SIZE); FilterChain mockFilterChain = Mockito.mock(FilterChain.class); // the size grater than max size HttpServletRequest spyHttpServletRequest = Mockito.spy(HttpServletRequest.class); HttpServletResponse spyHttpServletResponse = Mockito.spy(HttpServletResponse.class); Mockito.doReturn(ILLEGAL_SIZE).when(spyHttpServletRequest).getContentLengthLong(); maxRequestSizeFilter.doFilter(spyHttpServletRequest, spyHttpServletResponse, mockFilterChain); Mockito.verify(spyHttpServletResponse).sendError(HttpServletResponse.SC_BAD_REQUEST, "Bad Request"); // the request is chunked HttpServletRequest spyHttpServletRequest2 = Mockito.spy(HttpServletRequest.class); HttpServletResponse spyHttpServletResponse2 = Mockito.spy(HttpServletResponse.class); Mockito.doReturn(LEGAL_SIZE).when(spyHttpServletRequest2).getContentLengthLong(); Mockito.doReturn("chunked").when(spyHttpServletRequest2).getHeader("Transfer-Encoding"); maxRequestSizeFilter.doFilter(spyHttpServletRequest2, spyHttpServletResponse2, mockFilterChain); Mockito.verify(spyHttpServletResponse).sendError(HttpServletResponse.SC_BAD_REQUEST, "Bad Request"); }
@Override public boolean updateLeaseTime(String permitId, long leaseTime, TimeUnit unit) { return get(updateLeaseTimeAsync(permitId, leaseTime, unit)); }
@Test public void testUpdateLeaseTime() throws InterruptedException { RPermitExpirableSemaphore semaphore = redisson.getPermitExpirableSemaphore("test"); semaphore.trySetPermits(1); assertThat(semaphore.updateLeaseTime("1234", 1, TimeUnit.SECONDS)).isFalse(); String id = semaphore.acquire(); assertThat(semaphore.updateLeaseTime(id, 1, TimeUnit.SECONDS)).isTrue(); Thread.sleep(1200); assertThat(semaphore.updateLeaseTime(id, 1, TimeUnit.SECONDS)).isFalse(); String id2 = semaphore.tryAcquire(1, 1, TimeUnit.SECONDS); assertThat(semaphore.updateLeaseTime(id2, 3, TimeUnit.SECONDS)).isTrue(); Thread.sleep(2800); assertThat(semaphore.availablePermits()).isZero(); Thread.sleep(500); assertThat(semaphore.availablePermits()).isOne(); assertThat(semaphore.updateLeaseTime(id2, 2, TimeUnit.SECONDS)).isFalse(); }
public static CodecFactory fromHadoopString(String hadoopCodecClass) { CodecFactory o = null; try { String avroCodec = HADOOP_AVRO_NAME_MAP.get(hadoopCodecClass); if (avroCodec != null) { o = CodecFactory.fromString(avroCodec); } } catch (Exception e) { throw new AvroRuntimeException("Unrecognized hadoop codec: " + hadoopCodecClass, e); } return o; }
@Test void hadoopCodecFactoryZstd() { CodecFactory hadoopZstdCodec = HadoopCodecFactory.fromHadoopString("org.apache.hadoop.io.compress.ZStandardCodec"); CodecFactory avroZstdCodec = CodecFactory.fromString("zstandard"); assertEquals(hadoopZstdCodec.getClass(), avroZstdCodec.getClass()); }
@Deprecated // TODO: Remove in Smack 4.5. public void setPriority(int priority) { if (priority < -128 || priority > 127) { throw new IllegalArgumentException("Priority value " + priority + " is not valid. Valid range is -128 through 127."); } setPriority((byte) priority); }
@Test public void setIllegalPriorityTest() { assertThrows(IllegalArgumentException.class, () -> getNewPresence().setPriority(Integer.MIN_VALUE) ); }
public static Set<Map.Entry<String, RpcClient>> getAllClientEntries() { return CLIENT_MAP.entrySet(); }
@Test void testGetAllClientEntries() throws IllegalAccessException { assertTrue(RpcClientFactory.getAllClientEntries().isEmpty()); clientMapField.set(null, Collections.singletonMap("testClient", rpcClient)); assertEquals(1, RpcClientFactory.getAllClientEntries().size()); }
@Override public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay readerWay, IntsRef relationFlags) { String surfaceTag = readerWay.getTag("surface"); Surface surface = Surface.find(surfaceTag); if (surface == MISSING) return; surfaceEnc.setEnum(false, edgeId, edgeIntAccess, surface); }
@Test public void testSimpleTags() { IntsRef relFlags = new IntsRef(2); ReaderWay readerWay = new ReaderWay(1); EdgeIntAccess edgeIntAccess = new ArrayEdgeIntAccess(1); int edgeId = 0; readerWay.setTag("highway", "primary"); parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags); assertEquals(Surface.MISSING, surfaceEnc.getEnum(false, edgeId, edgeIntAccess)); readerWay.setTag("surface", "cobblestone"); parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags); assertEquals(Surface.COBBLESTONE, surfaceEnc.getEnum(false, edgeId, edgeIntAccess)); assertTrue(Surface.COBBLESTONE.ordinal() > Surface.ASPHALT.ordinal()); readerWay.setTag("surface", "wood"); parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags); assertEquals(Surface.WOOD, surfaceEnc.getEnum(false, edgeId, edgeIntAccess)); }
public void addConfigDefinitionsFromBundle(Bundle bundle, List<Bundle> bundlesAdded) { try { checkAndCopyUserDefs(bundle, bundlesAdded); } catch (IOException e) { throw new IllegalArgumentException("Unable to add config definitions from bundle " + bundle.getFile().getAbsolutePath(), e); } }
@Test public void require_that_defs_are_added() throws IOException { File defDir = temporaryFolder.newFolder(); ConfigDefinitionDir dir = new ConfigDefinitionDir(defDir); Bundle bundle = new Bundle(new JarFile(bundleFile), bundleFile); assertEquals(0, defDir.listFiles().length); dir.addConfigDefinitionsFromBundle(bundle, new ArrayList<>()); assertEquals(1, defDir.listFiles().length); }
public FEELFnResult<Boolean> invoke(@ParameterName("list") List list, @ParameterName("element") Object element) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } if (element == null) { return FEELFnResult.ofResult(list.contains(element)); } Object e = NumberEvalHelper.coerceNumber(element); boolean found = false; ListIterator<?> it = list.listIterator(); while (it.hasNext() && !found) { Object next = NumberEvalHelper.coerceNumber(it.next()); found = itemEqualsSC(e, next); } return FEELFnResult.ofResult(found); }
@Test void invokeNotContainsNull() { FunctionTestUtil.assertResult(listContainsFunction.invoke(Collections.emptyList(), null), false); FunctionTestUtil.assertResult(listContainsFunction.invoke(Collections.singletonList(1), null), false); FunctionTestUtil.assertResult(listContainsFunction.invoke(Arrays.asList(1, 2), null), false); }
public int getBufferIndex() { return bufferIndex; }
@Test void testGetBufferIndex() { Buffer buffer = BufferBuilderTestUtils.buildSomeBuffer(0); int bufferIndex = 0; int subpartitionId = 1; NettyPayload nettyPayload = NettyPayload.newBuffer(buffer, bufferIndex, subpartitionId); assertThat(nettyPayload.getBufferIndex()).isEqualTo(bufferIndex); }
@InvokeOnHeader(CONTROL_ACTION_STATS) public void performStats(final Exchange exchange, AsyncCallback callback) { Message message = exchange.getMessage(); Map<String, Object> headers = message.getHeaders(); String subscribeChannel = (String) headers.getOrDefault(CONTROL_SUBSCRIBE_CHANNEL, configuration.getSubscribeChannel()); try { String stats = dynamicRouterControlService.getStatisticsForChannel(subscribeChannel); message.setBody(stats, String.class); } catch (Exception e) { exchange.setException(e); } finally { callback.done(false); } }
@Test void testPerformStatsAction() { String subscribeChannel = "testChannel"; String statString = "PrioritizedFilterStatistics [id: testId, count: 1, first:12345, last: 23456]"; Map<String, Object> headers = Map.of( CONTROL_ACTION_HEADER, CONTROL_ACTION_STATS, CONTROL_SUBSCRIBE_CHANNEL, subscribeChannel); when(exchange.getMessage()).thenReturn(message); when(message.getHeaders()).thenReturn(headers); when(controlService.getStatisticsForChannel(subscribeChannel)).thenReturn("[" + statString + "]"); Mockito.doNothing().when(callback).done(false); producer.performStats(exchange, callback); Mockito.verify(message, Mockito.times(1)).setBody("[" + statString + "]", String.class); }
public static List<IntPair> intersectSortedRangeSets(List<List<IntPair>> sortedRangeSetList) { if (sortedRangeSetList == null || sortedRangeSetList.isEmpty()) { return Collections.emptyList(); } if (sortedRangeSetList.size() == 1) { return sortedRangeSetList.get(0); } // if any list is empty return empty for (List<IntPair> rangeSet : sortedRangeSetList) { if (rangeSet.isEmpty()) { return Collections.emptyList(); } } int[] currentRangeSetIndex = new int[sortedRangeSetList.size()]; Arrays.fill(currentRangeSetIndex, 0); int maxHead = -1; int maxHeadIndex = -1; boolean reachedEnd = false; List<IntPair> result = new ArrayList<IntPair>(); while (!reachedEnd) { // find max Head in the current pointers for (int i = 0; i < sortedRangeSetList.size(); i++) { int head = sortedRangeSetList.get(i).get(currentRangeSetIndex[i]).getLeft(); if (head > maxHead) { maxHead = head; maxHeadIndex = i; } } // move all pointers forward such that range they point to contain maxHead int j = -1; while (j++ < sortedRangeSetList.size() - 1) { if (j == maxHeadIndex) { continue; } boolean found = false; while (!found && currentRangeSetIndex[j] < sortedRangeSetList.get(j).size()) { IntPair range = sortedRangeSetList.get(j).get(currentRangeSetIndex[j]); if (maxHead >= range.getLeft() && maxHead <= range.getRight()) { found = true; break; } if (range.getLeft() > maxHead) { maxHead = range.getLeft(); maxHeadIndex = j; j = -1; break; } currentRangeSetIndex[j] = currentRangeSetIndex[j] + 1; } // new maxHead found if (j == -1) { continue; } if (!found) { reachedEnd = true; break; } } if (reachedEnd) { break; } // there is definitely some intersection possible here IntPair intPair = sortedRangeSetList.get(0).get(currentRangeSetIndex[0]); IntPair intersection = Pairs.intPair(intPair.getLeft(), intPair.getRight()); for (int i = 1; i < sortedRangeSetList.size(); i++) { IntPair pair = sortedRangeSetList.get(i).get(currentRangeSetIndex[i]); int start = Math.max(intersection.getLeft(), pair.getLeft()); int end = Math.min(intersection.getRight(), pair.getRight()); intersection.setLeft(start); intersection.setRight(end); } if (!result.isEmpty()) { // if new range is contiguous merge it IntPair prevIntersection = result.get(result.size() - 1); if (intersection.getLeft() == prevIntersection.getRight() + 1) { prevIntersection.setRight(intersection.getRight()); } else { result.add(intersection); } } else { result.add(intersection); } // move the pointers forward for rangesets where the currenttail == intersection.tail for (int i = 0; i < sortedRangeSetList.size(); i++) { IntPair pair = sortedRangeSetList.get(i).get(currentRangeSetIndex[i]); if (pair.getRight() == intersection.getRight()) { currentRangeSetIndex[i] = currentRangeSetIndex[i] + 1; if (currentRangeSetIndex[i] == sortedRangeSetList.get(i).size()) { reachedEnd = true; break; } } } } return result; }
@Test public void testSimple() { List<IntPair> rangeSet1 = Arrays.asList(Pairs.intPair(0, 4), Pairs.intPair(6, 10)); List<IntPair> rangeSet2 = Arrays.asList(Pairs.intPair(4, 7), Pairs.intPair(8, 14)); List<List<IntPair>> newArrayList = Arrays.asList(rangeSet1, rangeSet2); List<IntPair> intersectionPairs = SortedRangeIntersection.intersectSortedRangeSets(newArrayList); // expected (4,4) (6,10) Assert.assertEquals(intersectionPairs.size(), 2); Assert.assertEquals(intersectionPairs.get(0), Pairs.intPair(4, 4)); Assert.assertEquals(intersectionPairs.get(1), Pairs.intPair(6, 10)); }
@SuppressWarnings("unchecked") @Override public Concat.Output run(RunContext runContext) throws Exception { File tempFile = runContext.workingDir().createTempFile(extension).toFile(); try (FileOutputStream fileOutputStream = new FileOutputStream(tempFile)) { List<String> finalFiles; if (this.files instanceof List) { finalFiles = (List<String>) this.files; } else if (this.files instanceof String) { final TypeReference<List<String>> reference = new TypeReference<>() {}; finalFiles = JacksonMapper.ofJson(false).readValue( runContext.render((String) this.files), reference ); } else { throw new Exception("Invalid `files` properties with type '" + this.files.getClass() + "'"); } finalFiles.forEach(throwConsumer(s -> { URI from = new URI(runContext.render(s)); try (InputStream inputStream = runContext.storage().getFile(from)) { IOUtils.copyLarge(inputStream, fileOutputStream); } if (separator != null) { IOUtils.copy(new ByteArrayInputStream(this.separator.getBytes()), fileOutputStream); } })); } return Concat.Output.builder() .uri(runContext.storage().putFile(tempFile)) .build(); }
@Test void list() throws Exception { this.run(false); }
@SuppressWarnings({"unchecked", "rawtypes"}) public static Collection<ShardingSphereRule> buildRules(final Collection<RuleConfiguration> globalRuleConfigs, final Map<String, ShardingSphereDatabase> databases, final ConfigurationProperties props) { Collection<ShardingSphereRule> result = new LinkedList<>(); for (Entry<RuleConfiguration, GlobalRuleBuilder> entry : getRuleBuilderMap(globalRuleConfigs).entrySet()) { result.add(entry.getValue().build(entry.getKey(), databases, props)); } return result; }
@Test void assertBuildRulesClassType() { Collection<ShardingSphereRule> shardingSphereRules = GlobalRulesBuilder .buildRules(Collections.singletonList(new FixtureGlobalRuleConfiguration()), Collections.singletonMap("logic_db", buildDatabase()), mock(ConfigurationProperties.class)); assertTrue(shardingSphereRules.toArray()[0] instanceof FixtureGlobalRule); }
@Override public void upgrade() { Map<String, Set<String>> encryptedFieldsByInputType = getEncryptedFieldsByInputType(); if (getMigratedField().equals(encryptedFieldsByInputType)) { LOG.debug("Migration already completed."); return; } final MongoCollection<Document> collection = getCollection(); final FindIterable<Document> documents = collection.find(in(FIELD_TYPE, encryptedFieldsByInputType.keySet())); documents.forEach(doc -> { @SuppressWarnings("unchecked") final Map<String, Object> config = new HashMap<>((Map<String, Object>) doc.getOrDefault(FIELD_CONFIGURATION, Map.of())); final Set<String> encryptedFields = encryptedFieldsByInputType.getOrDefault((String) doc.get(FIELD_TYPE), Set.of()); encryptedFields.forEach(fieldName -> { final Object value = config.get(fieldName); // Assume that in case of a Map, the value is already encrypted and doesn't need conversion. if (config.containsKey(fieldName) && !(value instanceof Map)) { final EncryptedValue encryptedValue = objectMapper.convertValue(value, EncryptedValue.class); config.put(fieldName, dbObjectMapper.convertValue(encryptedValue, TypeReferences.MAP_STRING_OBJECT)); } }); collection.updateOne(eq(FIELD_ID, doc.getObjectId(FIELD_ID)), Updates.set(FIELD_CONFIGURATION, config)); }); saveMigrationCompleted(encryptedFieldsByInputType); }
@SuppressWarnings("unchecked") @Test public void migrateUnencryptedSecret() { migration.upgrade(); final Document migrated = collection.find(Filters.eq(FIELD_TITLE, "unencrypted-secret")).first(); assertThat(migrated).isNotNull().satisfies(doc -> assertThat((Map<String, Object>) doc.get(FIELD_CONFIGURATION)).satisfies(config -> { final Object sourceValue = config.get(ENCRYPTED_FIELD); assertThat(sourceValue).isInstanceOf(Map.class); final EncryptedValue encryptedValue = dbObjectMapper.convertValue(sourceValue, EncryptedValue.class); assertThat(encryptedValueService.decrypt(encryptedValue)).isEqualTo("X-Encrypted-Header: secret"); }) ); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } if(containerService.isContainer(file)) { final PathAttributes attributes = new PathAttributes(); if(log.isDebugEnabled()) { log.debug(String.format("Read location for bucket %s", file)); } attributes.setRegion(new S3LocationFeature(session, session.getClient().getRegionEndpointCache()).getLocation(file).getIdentifier()); return attributes; } if(file.getType().contains(Path.Type.upload)) { final Write.Append append = new S3MultipartUploadService(session, new S3WriteFeature(session, acl), acl).append(file, new TransferStatus()); if(append.append) { return new PathAttributes().withSize(append.offset); } throw new NotfoundException(file.getAbsolute()); } try { PathAttributes attr; final Path bucket = containerService.getContainer(file); try { attr = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getVersionedObjectDetails( file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))); } catch(ServiceException e) { switch(e.getResponseCode()) { case 405: if(log.isDebugEnabled()) { log.debug(String.format("Mark file %s as delete marker", file)); } // Only DELETE method is allowed for delete markers attr = new PathAttributes(); attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, Boolean.TRUE.toString())); attr.setDuplicate(true); return attr; } throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); } if(StringUtils.isNotBlank(attr.getVersionId())) { if(log.isDebugEnabled()) { log.debug(String.format("Determine if %s is latest version for %s", attr.getVersionId(), file)); } // Determine if latest version try { final String latest = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getObjectDetails( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))).getVersionId(); if(null != latest) { if(log.isDebugEnabled()) { log.debug(String.format("Found later version %s for %s", latest, file)); } // Duplicate if not latest version attr.setDuplicate(!latest.equals(attr.getVersionId())); } } catch(ServiceException e) { final BackgroundException failure = new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); if(failure instanceof NotfoundException) { attr.setDuplicate(true); } else { throw failure; } } } return attr; } catch(NotfoundException e) { if(file.isDirectory()) { if(log.isDebugEnabled()) { log.debug(String.format("Search for common prefix %s", file)); } // File may be marked as placeholder but no placeholder file exists. Check for common prefix returned. try { new S3ObjectListService(session, acl).list(file, new CancellingListProgressListener(), String.valueOf(Path.DELIMITER), 1); } catch(ListCanceledException l) { // Found common prefix return PathAttributes.EMPTY; } catch(NotfoundException n) { throw e; } // Found common prefix return PathAttributes.EMPTY; } throw e; } }
@Test public void testFindCommonPrefix() throws Exception { final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); assertTrue(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(container)); final String prefix = new AlphanumericRandomStringService().random(); final Path test = new S3TouchFeature(session, new S3AccessControlListFeature(session)).touch( new Path(new Path(container, prefix, EnumSet.of(Path.Type.directory)), new AsciiRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); assertNotNull(new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(test)); assertNotNull(new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(new Path(container, prefix, EnumSet.of(Path.Type.directory)))); new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); try { new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(test); fail(); } catch(NotfoundException e) { // Expected } try { new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(new Path(container, prefix, EnumSet.of(Path.Type.directory))); fail(); } catch(NotfoundException e) { // Expected } }
public boolean initWithCommittedOffsetsIfNeeded(Timer timer) { final Set<TopicPartition> initializingPartitions = subscriptions.initializingPartitions(); final Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(initializingPartitions, timer); // "offsets" will be null if the offset fetch requests did not receive responses within the given timeout if (offsets == null) return false; refreshCommittedOffsets(offsets, this.metadata, this.subscriptions); return true; }
@Test public void testRefreshOffsetWithPendingTransactions() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); subscriptions.assignFromUser(singleton(t1p)); client.prepareResponse(offsetFetchResponse(t1p, Errors.UNSTABLE_OFFSET_COMMIT, "", -1L)); client.prepareResponse(offsetFetchResponse(t1p, Errors.NONE, "", 100L)); assertEquals(Collections.singleton(t1p), subscriptions.initializingPartitions()); coordinator.initWithCommittedOffsetsIfNeeded(time.timer(0L)); assertEquals(Collections.singleton(t1p), subscriptions.initializingPartitions()); coordinator.initWithCommittedOffsetsIfNeeded(time.timer(0L)); assertEquals(Collections.emptySet(), subscriptions.initializingPartitions()); assertTrue(subscriptions.hasAllFetchPositions()); assertEquals(100L, subscriptions.position(t1p).offset); }
@Override public Object invoke(MethodInvocation methodInvocation) throws Throwable { // 入栈 DataPermission dataPermission = this.findAnnotation(methodInvocation); if (dataPermission != null) { DataPermissionContextHolder.add(dataPermission); } try { // 执行逻辑 return methodInvocation.proceed(); } finally { // 出栈 if (dataPermission != null) { DataPermissionContextHolder.remove(); } } }
@Test // 在 Method 上有 @DataPermission 注解 public void testInvoke_method() throws Throwable { // 参数 mockMethodInvocation(TestMethod.class); // 调用 Object result = interceptor.invoke(methodInvocation); // 断言 assertEquals("method", result); assertEquals(1, interceptor.getDataPermissionCache().size()); assertFalse(CollUtil.getFirst(interceptor.getDataPermissionCache().values()).enable()); }
@VisibleForTesting // TODO(aksingh737,jzacsh) stop exposing this to unit tests public long importPhotos(Collection<PhotoModel> photos, GPhotosUpload gPhotosUpload) throws Exception { return gPhotosUpload.uploadItemsViaBatching(photos, this::importPhotoBatch); }
@Test public void importOnePhotoWithHashMismatch() throws Exception { PhotoModel photoModel = new PhotoModel( PHOTO_TITLE, IMG_URI, PHOTO_DESCRIPTION, JPEG_MEDIA_TYPE, "oldPhotoID1", OLD_ALBUM_ID, false, SHA1); Mockito.when(googlePhotosInterface.uploadMediaContent(any(), eq(SHA1))) .thenThrow(new UploadErrorException("Hash mismatch will be thrown", new Throwable())); BatchMediaItemResponse batchMediaItemResponse = new BatchMediaItemResponse( new NewMediaItemResult[]{}); Mockito.when(googlePhotosInterface.createPhotos(any(NewMediaItemUpload.class))) .thenReturn(batchMediaItemResponse); // No photo imported and will return a hash mismatch error for investigation. assertThrows(UploadErrorException.class, () -> googlePhotosImporter.importPhotos(Lists.newArrayList(photoModel), new GPhotosUpload( UUID.randomUUID(), executor, Mockito.mock(TokensAndUrlAuthData.class)))); String failedDataId = String.format("%s-%s", OLD_ALBUM_ID, "oldPhotoID1"); assertFalse(executor.isKeyCached(failedDataId)); ErrorDetail errorDetail = executor.getErrors().iterator().next(); assertEquals(failedDataId, errorDetail.id()); assertThat( errorDetail.exception(), CoreMatchers.containsString("Hash mismatch")); }
String getLockName(String namespace, String name) { return "lock::" + namespace + "::" + kind() + "::" + name; }
@Test /* * Verifies that the lock is released by a call to `releaseLockAndTimer`. * The call is made through a chain of futures ending with `eventually` after a normal/successful execution of the `Callable` */ void testWithLockCallableSuccessfulReleasesLock(VertxTestContext context) { var resourceOperator = new DefaultWatchableStatusedResourceOperator<>(vertx, null, "TestResource"); @SuppressWarnings({ "unchecked", "rawtypes" }) var target = new DefaultOperator(vertx, "Test", resourceOperator, new MicrometerMetricsProvider(BackendRegistries.getDefaultNow()), null); Reconciliation reconciliation = new Reconciliation("test", "TestResource", "my-namespace", "my-resource"); String lockName = target.getLockName(reconciliation); Checkpoint callableSucceeded = context.checkpoint(); Checkpoint lockObtained = context.checkpoint(); @SuppressWarnings("unchecked") Future<String> result = target.withLockTest(reconciliation, () -> Future.succeededFuture("OK")); Promise<Void> successHandlerCalled = Promise.promise(); result.onComplete(context.succeeding(v -> context.verify(() -> { assertThat(v, is("OK")); successHandlerCalled.complete(); callableSucceeded.flag(); }))); successHandlerCalled.future() .compose(v -> vertx.sharedData().getLockWithTimeout(lockName, 10000L)) .onComplete(context.succeeding(lock -> context.verify(() -> { assertThat(lock, instanceOf(Lock.class)); lock.release(); lockObtained.flag(); }))); }
@Override public boolean isSecure() { return clientConfig.getPropertyAsBoolean(IClientConfigKey.Keys.IsSecure, false); }
@Test void testIsSecureOverride() { clientConfig.set(IClientConfigKey.Keys.IsSecure, true); assertTrue(connectionPoolConfig.isSecure()); }
@Override protected Integer convertFromString(final String value) throws ConversionException { final int subtaskIndex = Integer.parseInt(value); if (subtaskIndex >= 0) { return subtaskIndex; } else { throw new ConversionException("subtaskindex must be positive, was: " + subtaskIndex); } }
@Test void testConversionFromString() throws Exception { assertThat(subtaskIndexPathParameter.convertFromString("2147483647")) .isEqualTo(Integer.MAX_VALUE); }
public Flow injectDefaults(Flow flow, Execution execution) { try { return this.injectDefaults(flow); } catch (Exception e) { RunContextLogger .logEntries( Execution.loggingEventFromException(e), LogEntry.of(execution) ) .forEach(logQueue::emitAsync); return flow; } }
@Test public void prefix() { DefaultTester task = DefaultTester.builder() .id("test") .type(DefaultTester.class.getName()) .set(666) .build(); Flow flow = Flow.builder() .triggers(List.of( DefaultTriggerTester.builder() .id("trigger") .type(DefaultTriggerTester.class.getName()) .conditions(List.of(ExpressionCondition.builder() .type(ExpressionCondition.class.getName()) .build()) ) .build() )) .tasks(Collections.singletonList(task)) .pluginDefaults(List.of( new PluginDefault(DefaultTester.class.getName(), false, ImmutableMap.of( "set", 789 )), new PluginDefault("io.kestra.core.services.", false, ImmutableMap.of( "value", 2, "set", 456, "arrays", Collections.singletonList(1) )), new PluginDefault("io.kestra.core.services2.", false, ImmutableMap.of( "value", 3 )) )) .build(); Flow injected = pluginDefaultService.injectDefaults(flow); assertThat(((DefaultTester) injected.getTasks().getFirst()).getSet(), is(666)); assertThat(((DefaultTester) injected.getTasks().getFirst()).getValue(), is(2)); }
@Override public boolean attempt() { if (mFirstAttempt || (CommonUtils.getCurrentMs() - mLastAttempTimeMs) > mRefreshPeriodMs) { mLastAttempTimeMs = CommonUtils.getCurrentMs(); mFirstAttempt = false; return true; } return false; }
@Test public void timeout() { final long timeoutMs = 500; final long slackMs = 200; TimeoutRefresh timeoutRefresh = new TimeoutRefresh(timeoutMs); // First check, should attempt assertTrue(timeoutRefresh.attempt()); // Second check, should not attempt before refresh timeout assertFalse(timeoutRefresh.attempt()); CommonUtils.sleepMs(timeoutMs); CommonUtils.sleepMs(slackMs); assertTrue(timeoutRefresh.attempt()); assertFalse(timeoutRefresh.attempt()); }
public Integer getOrder() { return order; }
@Test public void testDefaultOrder() { Assert.assertEquals(Ordered.HIGHEST_PRECEDENCE, properties.getOrder().intValue()); }
@Override public KsMaterializedQueryResult<Row> get( final GenericKey key, final int partition, final Optional<Position> position ) { try { final KeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = KeyQuery.withKey(key); StateQueryRequest<ValueAndTimestamp<GenericRow>> request = inStore(stateStore.getStateStoreName()) .withQuery(query) .withPartitions(ImmutableSet.of(partition)); if (position.isPresent()) { request = request.withPositionBound(PositionBound.at(position.get())); } final StateQueryResult<ValueAndTimestamp<GenericRow>> result = stateStore.getKafkaStreams().query(request); final QueryResult<ValueAndTimestamp<GenericRow>> queryResult = result.getPartitionResults().get(partition); // Some of these failures are retriable, and in the future, we may want to retry // locally before throwing. if (queryResult.isFailure()) { throw failedQueryException(queryResult); } else if (queryResult.getResult() == null) { return KsMaterializedQueryResult.rowIteratorWithPosition( Collections.emptyIterator(), queryResult.getPosition()); } else { final ValueAndTimestamp<GenericRow> row = queryResult.getResult(); return KsMaterializedQueryResult.rowIteratorWithPosition( ImmutableList.of(Row.of(stateStore.schema(), key, row.value(), row.timestamp())) .iterator(), queryResult.getPosition()); } } catch (final NotUpToBoundException | MaterializationException e) { throw e; } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldReturnEmptyIfKeyNotPresent() { // Given: when(kafkaStreams.query(any())).thenReturn(getRowResult(null)); // When: final Iterator<Row> result = table.get(A_KEY, PARTITION).rowIterator; // Then: assertThat(result, is(Collections.emptyIterator())); }
public boolean writeRack() { Map<String, String> nodeLabels = client.nodes().withName(config.getNodeName()).get().getMetadata().getLabels(); LOGGER.info("NodeLabels = {}", nodeLabels); String rackId = nodeLabels.get(config.getRackTopologyKey()); LOGGER.info("Rack: {} = {}", config.getRackTopologyKey(), rackId); if (rackId == null) { LOGGER.error("Node {} doesn't have the label {} for getting the rackid", config.getNodeName(), config.getRackTopologyKey()); return false; } return write(FILE_RACK_ID, rackId); }
@Test public void testWriteRackFailsWhenInitFolderDoesNotExist() { // specify a not existing folder for emulating IOException in the rack writer Map<String, String> envVars = new HashMap<>(ENV_VARS); envVars.put(InitWriterConfig.INIT_FOLDER.key(), "/no-folder"); InitWriterConfig config = InitWriterConfig.fromMap(envVars); KubernetesClient client = mockKubernetesClient(config.getNodeName(), LABELS, ADDRESSES); InitWriter writer = new InitWriter(client, config); assertThat(writer.writeRack(), is(false)); }
@Override @MethodNotAvailable public LocalMapStats getLocalMapStats() { throw new MethodNotAvailableException(); }
@Test(expected = MethodNotAvailableException.class) public void testGetLocalMapStats() { adapter.getLocalMapStats(); }
public static Gson instance() { return SingletonHolder.INSTANCE; }
@Test void deserializesCharsets() { assertEquals(StandardCharsets.UTF_8, Serialization.instance().fromJson("UTF-8", Charset.class)); assertEquals(StandardCharsets.US_ASCII, Serialization.instance().fromJson("ascii", Charset.class)); }
public List<File> load(String artifact) throws RepositoryException { return load(artifact, new LinkedList<>()); }
@Test void testLoad() throws Exception { // basic load resolver.load("com.databricks:spark-csv_2.10:1.3.0", testCopyPath); assertEquals(4, testCopyPath.list().length); FileUtils.cleanDirectory(testCopyPath); // load with exclusions parameter resolver.load("com.databricks:spark-csv_2.10:1.3.0", Collections.singletonList("org.scala-lang:scala-library"), testCopyPath); assertEquals(3, testCopyPath.list().length); FileUtils.cleanDirectory(testCopyPath); // load from added http repository resolver.addRepo("httpmvn", "http://insecure.repo1.maven.org/maven2/", false); resolver.load("com.databricks:spark-csv_2.10:1.3.0", testCopyPath); assertEquals(4, testCopyPath.list().length); FileUtils.cleanDirectory(testCopyPath); resolver.delRepo("httpmvn"); // load from added repository resolver.addRepo("sonatype", "https://oss.sonatype.org/content/repositories/ksoap2-android-releases/", false); resolver.load("com.google.code.ksoap2-android:ksoap2-jsoup:3.6.3", testCopyPath); assertEquals(10, testCopyPath.list().length); // load invalid artifact assertThrows(RepositoryException.class, () -> { resolver.delRepo("sonatype"); resolver.load("com.agimatec:agimatec-validation:0.12.0", testCopyPath); }); }
@Override public boolean isSingleton() { return true; }
@Test public final void infinispanEmbeddedCacheManagerFactoryBeanShouldDeclareItselfToOnlyProduceSingletons() { final InfinispanEmbeddedCacheManagerFactoryBean objectUnderTest = new InfinispanEmbeddedCacheManagerFactoryBean(); assertTrue("isSingleton() should always return true. However, it returned false", objectUnderTest.isSingleton()); }
@Override public Map<String, String> getSourcesMap(final CompilationDTO<TreeModel> compilationDTO) { logger.trace("getKiePMMLModelWithSources {} {} {} {}", compilationDTO.getPackageName(), compilationDTO.getFields(), compilationDTO.getModel(), compilationDTO.getPmmlContext()); try { return KiePMMLTreeModelFactory.getKiePMMLTreeModelSourcesMap(TreeCompilationDTO.fromCompilationDTO(compilationDTO)); } catch (Exception e) { throw new KiePMMLException(e); } }
@Test void getKiePMMLModelWithSources() { TreeModel treeModel = (TreeModel) pmml.getModels().get(0); final CommonCompilationDTO<TreeModel> compilationDTO = CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME, pmml, treeModel, new PMMLCompilationContextMock(), SOURCE_1); final KiePMMLModelWithSources retrieved = PROVIDER.getKiePMMLModelWithSources(compilationDTO); assertThat(retrieved).isNotNull(); final Map<String, String> sourcesMap = retrieved.getSourcesMap(); assertThat(sourcesMap).isNotNull(); assertThat(sourcesMap).isNotEmpty(); ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); try { final Map<String, Class<?>> compiled = KieMemoryCompiler.compile(sourcesMap, classLoader); for (Class<?> clazz : compiled.values()) { assertThat(clazz).isInstanceOf(Serializable.class); } } catch (Throwable t) { fail(t.getMessage()); } }
public synchronized Partition getPartition(long partitionId) { RecyclePartitionInfo partitionInfo = idToPartition.get(partitionId); if (partitionInfo != null) { return partitionInfo.getPartition(); } return null; }
@Test public void testGetPartition() throws Exception { CatalogRecycleBin bin = new CatalogRecycleBin(); List<Column> columns = Lists.newArrayList(new Column("k1", ScalarType.createVarcharType(10))); Range<PartitionKey> range = Range.range(PartitionKey.createPartitionKey(Lists.newArrayList(new PartitionValue("1")), columns), BoundType.CLOSED, PartitionKey.createPartitionKey(Lists.newArrayList(new PartitionValue("3")), columns), BoundType.CLOSED); DataProperty dataProperty = new DataProperty(TStorageMedium.HDD); Partition partition = new Partition(1L, "pt", new MaterializedIndex(), null); bin.recyclePartition(new RecycleRangePartitionInfo(11L, 22L, partition, range, dataProperty, (short) 1, false, null)); Partition partition2 = new Partition(2L, "pt", new MaterializedIndex(), null); bin.recyclePartition(new RecycleRangePartitionInfo(11L, 22L, partition2, range, dataProperty, (short) 1, false, null)); Partition recycledPart = bin.getPartition(1L); Assert.assertNotNull(recycledPart); recycledPart = bin.getPartition(2L); Assert.assertEquals(2L, recycledPart.getId()); Assert.assertEquals(range, bin.getPartitionRange(2L)); Assert.assertEquals(dataProperty, bin.getPartitionDataProperty(2L)); Assert.assertEquals((short) 1, bin.getPartitionReplicationNum(2L)); Assert.assertFalse(bin.getPartitionIsInMemory(2L)); }
public static ValueLabel formatClippedBitRate(long bytes) { return new ValueLabel(bytes * 8, BITS_UNIT).perSec().clipG(100.0); }
@Test public void formatClippedBitsSmall() { vl = TopoUtils.formatClippedBitRate(8); assertEquals(AM_WL, "64 bps", vl.toString()); assertFalse(AM_CL, vl.clipped()); }
@Operation(summary = "deleteQueueById", description = "DELETE_QUEUE_NOTES") @Parameters({ @Parameter(name = "id", description = "QUEUE_ID", required = true, schema = @Schema(implementation = int.class, example = "100")) }) @DeleteMapping(value = "/{id}") @ResponseStatus(HttpStatus.OK) @ApiException(DELETE_QUEUE_BY_ID_ERROR) public Result<Boolean> deleteQueueById(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @PathVariable(value = "id") int id) throws Exception { queueService.deleteQueueById(loginUser, id); return Result.success(true); }
@Test public void testDeleteQueueById() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("id", "64"); MvcResult mvcResult = mockMvc.perform(delete("/queues/{id}", 64) .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assertions.assertNotNull(result); Assertions.assertEquals(Status.QUEUE_NOT_EXIST.getCode(), result.getCode().intValue()); logger.info("delete queue return result:{}", mvcResult.getResponse().getContentAsString()); }
public static Configuration loadConfiguration( String workingDirectory, Configuration dynamicParameters, Map<String, String> env) { final Configuration configuration = GlobalConfiguration.loadConfiguration(workingDirectory, dynamicParameters); final String keytabPrincipal = env.get(YarnConfigKeys.KEYTAB_PRINCIPAL); final String hostname = env.get(ApplicationConstants.Environment.NM_HOST.key()); Preconditions.checkState( hostname != null, "ApplicationMaster hostname variable %s not set", ApplicationConstants.Environment.NM_HOST.key()); configuration.set(JobManagerOptions.ADDRESS, hostname); configuration.set(RestOptions.ADDRESS, hostname); configuration.set(RestOptions.BIND_ADDRESS, hostname); // if a web monitor shall be started, set the port to random binding if (configuration.get(WebOptions.PORT, 0) >= 0) { configuration.set(WebOptions.PORT, 0); } if (!configuration.contains(RestOptions.BIND_PORT)) { // set the REST port to 0 to select it randomly configuration.set(RestOptions.BIND_PORT, "0"); } // if the user has set the deprecated YARN-specific config keys, we add the // corresponding generic config keys instead. that way, later code needs not // deal with deprecated config keys BootstrapTools.substituteDeprecatedConfigPrefix( configuration, ConfigConstants.YARN_APPLICATION_MASTER_ENV_PREFIX, ResourceManagerOptions.CONTAINERIZED_MASTER_ENV_PREFIX); BootstrapTools.substituteDeprecatedConfigPrefix( configuration, ConfigConstants.YARN_TASK_MANAGER_ENV_PREFIX, ResourceManagerOptions.CONTAINERIZED_TASK_MANAGER_ENV_PREFIX); final String keytabPath = Utils.resolveKeytabPath( workingDirectory, env.get(YarnConfigKeys.LOCAL_KEYTAB_PATH)); if (keytabPath != null && keytabPrincipal != null) { configuration.set(SecurityOptions.KERBEROS_LOGIN_KEYTAB, keytabPath); configuration.set(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL, keytabPrincipal); } final String localDirs = env.get(ApplicationConstants.Environment.LOCAL_DIRS.key()); BootstrapTools.updateTmpDirectoriesInConfiguration(configuration, localDirs); return configuration; }
@Test void testParsingKerberosEnvWithMissingKeytab() throws IOException { final Configuration initialConfiguration = new Configuration(); Map<String, String> env = new HashMap<>(); env.put(YarnConfigKeys.LOCAL_KEYTAB_PATH, "/hopefully/doesnt/exist"); env.put(YarnConfigKeys.KEYTAB_PRINCIPAL, "starlord"); Configuration configuration = loadConfiguration(initialConfiguration, env); // both keytab and principal should be null assertThat(configuration.get(SecurityOptions.KERBEROS_LOGIN_KEYTAB)).isNull(); assertThat(configuration.get(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL)).isNull(); }
@Override public PageData<AuditLog> findAuditLogsByTenantIdAndCustomerId(UUID tenantId, CustomerId customerId, List<ActionType> actionTypes, TimePageLink pageLink) { return DaoUtil.toPageData( auditLogRepository .findAuditLogsByTenantIdAndCustomerId( tenantId, customerId.getId(), pageLink.getTextSearch(), pageLink.getStartTime(), pageLink.getEndTime(), actionTypes, DaoUtil.toPageable(pageLink))); }
@Test public void testFindAuditLogsByTenantIdAndCustomerId() { List<AuditLog> foundedAuditLogs = auditLogDao.findAuditLogsByTenantIdAndCustomerId(tenantId, customerId1, List.of(ActionType.ADDED), new TimePageLink(20)).getData(); checkFoundedAuditLogsList(foundedAuditLogs, 15); }
public Plan validateReservationUpdateRequest( ReservationSystem reservationSystem, ReservationUpdateRequest request) throws YarnException { ReservationId reservationId = request.getReservationId(); Plan plan = validateReservation(reservationSystem, reservationId, AuditConstants.UPDATE_RESERVATION_REQUEST); validateReservationDefinition(reservationId, request.getReservationDefinition(), plan, AuditConstants.UPDATE_RESERVATION_REQUEST); return plan; }
@Test public void testUpdateReservationNoID() { ReservationUpdateRequest request = new ReservationUpdateRequestPBImpl(); Plan plan = null; try { plan = rrValidator.validateReservationUpdateRequest(rSystem, request); Assert.fail(); } catch (YarnException e) { Assert.assertNull(plan); String message = e.getMessage(); Assert .assertTrue(message .startsWith("Missing reservation id. Please try again by specifying a reservation id.")); LOG.info(message); } }
public String toStringWithQualifier() { return toString(true); }
@Test public void testToStringWithQualifier() { Assert.assertEquals( "someimage:latest", ImageReference.of(null, "someimage", null).toStringWithQualifier()); Assert.assertEquals( "someimage:latest", ImageReference.of("", "someimage", "").toStringWithQualifier()); Assert.assertEquals( "someotherimage:latest", ImageReference.of(null, "library/someotherimage", null).toStringWithQualifier()); Assert.assertEquals( "someregistry/someotherimage:latest", ImageReference.of("someregistry", "someotherimage", null).toStringWithQualifier()); Assert.assertEquals( "anotherregistry/anotherimage:sometag", ImageReference.of("anotherregistry", "anotherimage", "sometag").toStringWithQualifier()); Assert.assertEquals( "anotherregistry/anotherimage@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", ImageReference.of( "anotherregistry", "anotherimage", null, "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") .toStringWithQualifier()); Assert.assertEquals( "anotherregistry/anotherimage@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", ImageReference.of( "anotherregistry", "anotherimage", "sometag", "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") .toStringWithQualifier()); }
public byte[] generateAtRequest(DocumentType documentType, PolymorphType authorization, String sequenceNo, String reference) { final Certificate dvca = getDvca(documentType); final String subject = getAtSubject(documentType, dvca.getSubject(), sequenceNo); if (repository.countBySubject(subject) != 0) { throw new ClientException("AT certificate of " + subject + " already present"); } final PublicKeyInfo keyInfo = new PublicKeyInfo(); keyInfo.setOid(EACObjectIdentifiers.id_TA_ECDSA_SHA_384); keyInfo.setParams(BrainpoolP320r1.DOMAIN_PARAMS); keyInfo.setKey(signatureService.getOrGenerateKey(subject)); final CvCertificate.Body body = new CvCertificate.Body(); body.setCar(dvca.getSubject()); body.setPublicKey(keyInfo); body.setChr(subject); if (documentType == DocumentType.DL) // use EACv2 for DL only body.setAuthorization(authorization); final CvCertificate cv = new CvCertificate(); body.setRaw(mapper.write(body)); cv.setBody(body); final EcSignature inner = new EcSignature(signatureService.sign(cv, subject, true)); cv.setSignature(inner); if (reference == null) { return mapper.write(cv); } CvCertificateRequest req = new CvCertificateRequest(); cv.setRaw(mapper.write(cv)); req.setCertificate(cv); req.setCar(reference); final EcSignature outer = new EcSignature(signatureService.sign(req, reference, true)); req.setSignature(outer); return mapper.write(req); }
@Test public void shouldGenerateEACV1ATRequest() throws Exception { final HsmClient.KeyInfo keyInfo = new HsmClient.KeyInfo(); keyInfo.setPublicKey(Hex.decode("04" + "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS" + "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS" )); Mockito.doReturn(keyInfo).when(hsmClient).keyInfo( Mockito.eq("AT"), Mockito.eq("SSSSSSSSSSSS") ); final byte[] TBS = Base64.decode( "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS" + "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS" + "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS" + "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS" + "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS" + "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS" + "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS"); Mockito.doReturn(signature("SSSSSSSSSSSS")).when(hsmClient).sign( Mockito.eq("AT"), Mockito.eq("SSSSSSSSSSSS"), AdditionalMatchers.aryEq(TBS), Mockito.eq(true) ); certificateRepo.save(loadCvCertificate("SSSSSSSSSSSSSSSSSSSSSSSSSSSSS", true)); certificateRepo.save(loadCvCertificate("SSSSSSSSSSSSSSSSSSSSSSSSSSS", false)); final byte[] der = service.generateAtRequest(DocumentType.NIK, PolymorphType.PIP, "00001", null); final CvCertificate at = mapper.read(der, CvCertificate.class); verifyAt(at, "SSSSSSSSSSSS", "SSSSSSSSSSSS", false); }
public boolean matches(String input) { return MATCHER.matches(input, pattern); }
@Test public void testMatchesOnExactStringLowerCase() throws Exception { GlobMatcher matcher = new GlobMatcher("aabbcc"); assertTrue(matcher.matches("AABBCC")); assertFalse(matcher.matches("FFFF")); }
public static boolean empty(String s) { return (s == null || s.equals("")); }
@Test public void testEmpty() { assertTrue(LogUtils.empty(null)); assertTrue(LogUtils.empty("")); assertFalse(LogUtils.empty("f")); assertFalse(LogUtils.empty("fo")); assertFalse(LogUtils.empty("foo")); }
public boolean eval(ContentFile<?> file) { // TODO: detect the case where a column is missing from the file using file's max field id. return new MetricsEvalVisitor().eval(file); }
@Test public void testIsNaN() { boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, isNaN("all_nans")).eval(FILE); assertThat(shouldRead).as("Should read: at least one nan value in all nan column").isTrue(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, isNaN("some_nans")).eval(FILE); assertThat(shouldRead).as("Should read: at least one nan value in some nan column").isTrue(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, isNaN("no_nans")).eval(FILE); assertThat(shouldRead).as("Should skip: no-nans column contains no nan values").isFalse(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, isNaN("all_nulls_double")).eval(FILE); assertThat(shouldRead).as("Should skip: all-null column doesn't contain nan value").isFalse(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, isNaN("no_nan_stats")).eval(FILE); assertThat(shouldRead) .as("Should read: no guarantee on if contains nan value without nan stats") .isTrue(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, isNaN("all_nans_v1_stats")).eval(FILE); assertThat(shouldRead).as("Should read: at least one nan value in all nan column").isTrue(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, isNaN("nan_and_null_only")).eval(FILE); assertThat(shouldRead) .as("Should read: at least one nan value in nan and nulls only column") .isTrue(); }
public boolean deferredActivation() { return this.deferredActivation; }
@Test void can_build_bundle_with_deferred_activation_disabled() { var bundle = createTestBundleBuilder(false).deferredActivation(false).deriveAndBuild(); assertFalse(bundle.deferredActivation()); }
@Override public void trackEvent(InputData input) { process(input); }
@Test public void trackEvent() throws JSONException { initSensors(); SensorsDataAPI.sharedInstance().setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { assertEquals("UnitTestTrack", eventName); String version = DeviceUtils.getHarmonyOSVersion(); if (TextUtils.isEmpty(version)) { assertEquals(eventProperties.opt("$os"), "Android"); assertEquals(eventProperties.opt("$os_version"), DeviceUtils.getOS()); } else { assertEquals(eventProperties.opt("$os"), "HarmonyOS"); assertEquals(eventProperties.opt("$os_version"), version); } assertEquals(eventProperties.opt("$lib"), "Android"); assertEquals(eventProperties.opt("$lib_version"), SensorsDataAPI.sharedInstance().getSDKVersion()); assertEquals(eventProperties.opt("$manufacturer"), DeviceUtils.getManufacturer()); assertEquals(eventProperties.opt("$model"), DeviceUtils.getModel()); assertEquals(eventProperties.opt("$brand"), DeviceUtils.getBrand()); assertEquals(eventProperties.opt("$app_version"), AppInfoUtils.getAppVersionName(mApplication)); int[] size = DeviceUtils.getDeviceSize(mApplication); assertEquals(eventProperties.opt("$screen_width"), size[0]); assertEquals(eventProperties.opt("$screen_height"), size[1]); assertEquals(eventProperties.opt("$carrier"), SensorsDataUtils.getOperator(mApplication)); assertEquals(eventProperties.opt("$timezone_offset"), TimeUtils.getZoneOffset()); assertEquals(eventProperties.opt("$app_id"), AppInfoUtils.getProcessName(mApplication)); Assert.assertTrue(eventProperties.optBoolean("$is_first_day")); return true; } }); InputData inputData = new InputData(); inputData.setEventName("UnitTestTrack"); JSONObject jsonObject = new JSONObject(); jsonObject.put("track", "track"); inputData.setProperties(jsonObject); TrackEventProcessor eventProcessor = new TrackEventProcessor(SensorsDataAPI.sharedInstance().getSAContextManager()); eventProcessor.trackEvent(inputData); }
static Set<Set<Integer>> computeStronglyConnectedComponents( final int numVertex, final List<List<Integer>> outEdges) { final Set<Set<Integer>> stronglyConnectedComponents = new HashSet<>(); // a vertex will be added into this stack when it is visited for the first time final Deque<Integer> visitingStack = new ArrayDeque<>(numVertex); final boolean[] onVisitingStack = new boolean[numVertex]; // stores the order that a vertex is visited for the first time, -1 indicates it is not // visited yet final int[] vertexIndices = new int[numVertex]; Arrays.fill(vertexIndices, -1); final AtomicInteger indexCounter = new AtomicInteger(0); final int[] vertexLowLinks = new int[numVertex]; for (int vertex = 0; vertex < numVertex; vertex++) { if (!isVisited(vertex, vertexIndices)) { dfs( vertex, outEdges, vertexIndices, vertexLowLinks, visitingStack, onVisitingStack, indexCounter, stronglyConnectedComponents); } } return stronglyConnectedComponents; }
@Test void testWithNoEdge() { final List<List<Integer>> edges = Arrays.asList( Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList()); final Set<Set<Integer>> result = computeStronglyConnectedComponents(5, edges); final Set<Set<Integer>> expected = new HashSet<>(); expected.add(Collections.singleton(0)); expected.add(Collections.singleton(1)); expected.add(Collections.singleton(2)); expected.add(Collections.singleton(3)); expected.add(Collections.singleton(4)); assertThat(result).isEqualTo(expected); }
public void updateMetrics(String stepName, List<MonitoringInfo> monitoringInfos) { getMetricsContainer(stepName).update(monitoringInfos); updateMetrics(stepName); }
@Test void testMeterMonitoringInfoUpdate() { String namespace = "[\"key\", \"value\", \"MetricGroupType.key\", \"MetricGroupType.value\", \"60\"]"; MonitoringInfo userMonitoringInfo = new SimpleMonitoringInfoBuilder() .setUrn(MonitoringInfoConstants.Urns.USER_SUM_INT64) .setLabel(MonitoringInfoConstants.Labels.NAMESPACE, namespace) .setLabel(MonitoringInfoConstants.Labels.NAME, "myMeter") .setLabel(MonitoringInfoConstants.Labels.PTRANSFORM, "anyPTransform") .setInt64SumValue(111) .build(); assertThat(metricGroup.get("myMeter")).isNull(); container.updateMetrics("step", ImmutableList.of(userMonitoringInfo)); MeterView userMeter = (MeterView) metricGroup.get("myMeter"); userMeter.update(); assertThat(userMeter.getCount()).isEqualTo(111L); assertThat(userMeter.getRate()).isEqualTo(1.85); // 111 div 60 = 1.85 }
@Override public List<PrivilegedOperation> preStart(Container container) throws ResourceHandlerException { List<PrivilegedOperation> ret = null; NumaResourceAllocation numaAllocation = numaResourceAllocator .allocateNumaNodes(container); if (numaAllocation != null) { ret = new ArrayList<>(); ArrayList<String> args = new ArrayList<>(); args.add(numaCtlCmd); args.add( "--interleave=" + String.join(",", numaAllocation.getMemNodes())); args.add( "--cpunodebind=" + String.join(",", numaAllocation.getCpuNodes())); ret.add(new PrivilegedOperation(OperationType.ADD_NUMA_PARAMS, args)); } return ret; }
@Test public void testAllocateNumaCpusResource() throws ResourceHandlerException { // allocates node 0 for memory and cpu testAllocateNumaResource("container_1481156246874_0001_01_000001", Resource.newInstance(2048, 2), "0", "0"); // allocates node 1 for memory and cpu since allocator uses round // robin assignment testAllocateNumaResource("container_1481156246874_0001_01_000002", Resource.newInstance(2048, 2), "1", "1"); // allocates node 0,1 for cpus since there is are no sufficient cpus // available in any one node testAllocateNumaResource("container_1481156246874_0001_01_000003", Resource.newInstance(2048, 3), "0", "0,1"); // returns null since there are no sufficient resources available for the // request when(mockContainer.getContainerId()).thenReturn( ContainerId.fromString("container_1481156246874_0001_01_000004")); when(mockContainer.getResource()).thenReturn(Resource.newInstance(2048, 2)); assertNull(numaResourceHandler.preStart(mockContainer)); // allocates node 1 for memory and cpu testAllocateNumaResource("container_1481156246874_0001_01_000005", Resource.newInstance(2048, 1), "1", "1"); }
public ProcessContinuation run( PartitionRecord partitionRecord, RestrictionTracker<StreamProgress, StreamProgress> tracker, OutputReceiver<KV<ByteString, ChangeStreamRecord>> receiver, ManualWatermarkEstimator<Instant> watermarkEstimator) throws IOException { BytesThroughputEstimator<KV<ByteString, ChangeStreamRecord>> throughputEstimator = new BytesThroughputEstimator<>(sizeEstimator, Instant.now()); // Lock the partition if (tracker.currentRestriction().isEmpty()) { boolean lockedPartition = metadataTableDao.lockAndRecordPartition(partitionRecord); // Clean up NewPartition on the first run regardless of locking result. If locking fails it // means this partition is being streamed, then cleaning up NewPartitions avoids lingering // NewPartitions. for (NewPartition newPartition : partitionRecord.getParentPartitions()) { metadataTableDao.deleteNewPartition(newPartition); } if (!lockedPartition) { LOG.info( "RCSP {} : Could not acquire lock with uid: {}, because this is a " + "duplicate and another worker is working on this partition already.", formatByteStringRange(partitionRecord.getPartition()), partitionRecord.getUuid()); StreamProgress streamProgress = new StreamProgress(); streamProgress.setFailToLock(true); metrics.decPartitionStreamCount(); tracker.tryClaim(streamProgress); return ProcessContinuation.stop(); } } else if (tracker.currentRestriction().getCloseStream() == null && !metadataTableDao.doHoldLock( partitionRecord.getPartition(), partitionRecord.getUuid())) { // We only verify the lock if we are not holding CloseStream because if this is a retry of // CloseStream we might have already cleaned up the lock in a previous attempt. // Failed correctness check on this worker holds the lock on this partition. This shouldn't // fail because there's a restriction tracker which means this worker has already acquired the // lock and once it has acquired the lock it shouldn't fail the lock check. LOG.warn( "RCSP {} : Subsequent run that doesn't hold the lock {}. This is not unexpected and " + "should probably be reviewed.", formatByteStringRange(partitionRecord.getPartition()), partitionRecord.getUuid()); StreamProgress streamProgress = new StreamProgress(); streamProgress.setFailToLock(true); metrics.decPartitionStreamCount(); tracker.tryClaim(streamProgress); return ProcessContinuation.stop(); } // Process CloseStream if it exists CloseStream closeStream = tracker.currentRestriction().getCloseStream(); if (closeStream != null) { LOG.debug("RCSP: Processing CloseStream"); metrics.decPartitionStreamCount(); if (closeStream.getStatus().getCode() == Status.Code.OK) { // We need to update watermark here. We're terminating this stream because we have reached // endTime. Instant.now is greater or equal to endTime. The goal here is // DNP will need to know this stream has passed the endTime so DNP can eventually terminate. Instant terminatingWatermark = Instant.ofEpochMilli(Long.MAX_VALUE); Instant endTime = partitionRecord.getEndTime(); if (endTime != null) { terminatingWatermark = endTime; } watermarkEstimator.setWatermark(terminatingWatermark); metadataTableDao.updateWatermark( partitionRecord.getPartition(), watermarkEstimator.currentWatermark(), null); LOG.info( "RCSP {}: Reached end time, terminating...", formatByteStringRange(partitionRecord.getPartition())); return ProcessContinuation.stop(); } if (closeStream.getStatus().getCode() != Status.Code.OUT_OF_RANGE) { LOG.error( "RCSP {}: Reached unexpected terminal state: {}", formatByteStringRange(partitionRecord.getPartition()), closeStream.getStatus()); return ProcessContinuation.stop(); } // Release the lock only if the uuid matches. In normal operation this doesn't change // anything. However, it's possible for this RCSP to crash while processing CloseStream but // after the side effects of writing the new partitions to the metadata table. New partitions // can be created while this RCSP restarts from the previous checkpoint and processes the // CloseStream again. In certain race scenarios the child partitions may merge back to this // partition, but as a new RCSP. The new partition (same as this partition) would write the // exact same content to the metadata table but with a different uuid. We don't want to // accidentally delete the StreamPartition because it now belongs to the new RCSP. // If the uuid is the same (meaning this race scenario did not take place) we release the lock // and mark the StreamPartition to be deleted, so we can delete it after we have written the // NewPartitions. metadataTableDao.releaseStreamPartitionLockForDeletion( partitionRecord.getPartition(), partitionRecord.getUuid()); // The partitions in the continuation tokens must cover the same key space as this partition. // If there's only 1 token, then the token's partition is equals to this partition. // If there are more than 1 tokens, then the tokens form a continuous row range equals to this // partition. List<ByteStringRange> childPartitions = new ArrayList<>(); List<ByteStringRange> tokenPartitions = new ArrayList<>(); // Check if NewPartitions field exists, if not we default to using just the // ChangeStreamContinuationTokens. boolean useNewPartitionsField = closeStream.getNewPartitions().size() == closeStream.getChangeStreamContinuationTokens().size(); for (int i = 0; i < closeStream.getChangeStreamContinuationTokens().size(); i++) { ByteStringRange childPartition; if (useNewPartitionsField) { childPartition = closeStream.getNewPartitions().get(i); } else { childPartition = closeStream.getChangeStreamContinuationTokens().get(i).getPartition(); } childPartitions.add(childPartition); ChangeStreamContinuationToken token = getTokenWithCorrectPartition( partitionRecord.getPartition(), closeStream.getChangeStreamContinuationTokens().get(i)); tokenPartitions.add(token.getPartition()); metadataTableDao.writeNewPartition( new NewPartition( childPartition, Collections.singletonList(token), watermarkEstimator.getState())); } LOG.info( "RCSP {}: Split/Merge into {}", formatByteStringRange(partitionRecord.getPartition()), partitionsToString(childPartitions)); if (!coverSameKeySpace(tokenPartitions, partitionRecord.getPartition())) { LOG.warn( "RCSP {}: CloseStream has tokens {} that don't cover the entire keyspace", formatByteStringRange(partitionRecord.getPartition()), partitionsToString(tokenPartitions)); } // Perform the real cleanup. This step is no op if the race mentioned above occurs (splits and // merges results back to this partition again) because when we register the "new" partition, // we unset the deletion bit. metadataTableDao.deleteStreamPartitionRow(partitionRecord.getPartition()); return ProcessContinuation.stop(); } // Update the metadata table with the watermark metadataTableDao.updateWatermark( partitionRecord.getPartition(), watermarkEstimator.getState(), tracker.currentRestriction().getCurrentToken()); // Start to stream the partition. ServerStream<ChangeStreamRecord> stream = null; try { stream = changeStreamDao.readChangeStreamPartition( partitionRecord, tracker.currentRestriction(), partitionRecord.getEndTime(), heartbeatDuration); for (ChangeStreamRecord record : stream) { Optional<ProcessContinuation> result = changeStreamAction.run( partitionRecord, record, tracker, receiver, watermarkEstimator, throughputEstimator); // changeStreamAction will usually return Optional.empty() except for when a checkpoint // (either runner or pipeline initiated) is required. if (result.isPresent()) { return result.get(); } } } catch (Exception e) { throw e; } finally { if (stream != null) { stream.cancel(); } } return ProcessContinuation.resume(); }
@Test public void testLockingRowNotNeededAfterFirstRun() throws IOException { when(metadataTableDao.lockAndRecordPartition(partitionRecord)).thenReturn(false); // After the first run, we don't try to lock but we check if the lock owner is the same. when(restriction.isEmpty()).thenReturn(false); when(metadataTableDao.doHoldLock(partition, uuid)).thenReturn(true); final ServerStream<ChangeStreamRecord> responses = mock(ServerStream.class); final Iterator<ChangeStreamRecord> responseIterator = mock(Iterator.class); when(responses.iterator()).thenReturn(responseIterator); Heartbeat mockHeartBeat = Mockito.mock(Heartbeat.class); when(responseIterator.next()).thenReturn(mockHeartBeat); when(responseIterator.hasNext()).thenReturn(true); when(changeStreamDao.readChangeStreamPartition(any(), any(), any(), any())) .thenReturn(responses); when(changeStreamAction.run(any(), any(), any(), any(), any(), any())) .thenReturn(Optional.of(DoFn.ProcessContinuation.stop())); final DoFn.ProcessContinuation result = action.run(partitionRecord, tracker, receiver, watermarkEstimator); assertEquals(DoFn.ProcessContinuation.stop(), result); // Verify that on successful lock, we don't tryClaim on the tracker. verify(tracker, never()).tryClaim(any()); verify(changeStreamAction).run(any(), any(), any(), any(), any(), any()); }
@Override public TenantPackageDO validTenantPackage(Long id) { TenantPackageDO tenantPackage = tenantPackageMapper.selectById(id); if (tenantPackage == null) { throw exception(TENANT_PACKAGE_NOT_EXISTS); } if (tenantPackage.getStatus().equals(CommonStatusEnum.DISABLE.getStatus())) { throw exception(TENANT_PACKAGE_DISABLE, tenantPackage.getName()); } return tenantPackage; }
@Test public void testValidTenantPackage_disable() { // mock 数据 TenantPackageDO dbTenantPackage = randomPojo(TenantPackageDO.class, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())); tenantPackageMapper.insert(dbTenantPackage);// @Sql: 先插入出一条存在的数据 // 调用, 并断言异常 assertServiceException(() -> tenantPackageService.validTenantPackage(dbTenantPackage.getId()), TENANT_PACKAGE_DISABLE, dbTenantPackage.getName()); }
public static String buildContextPath(final String contextPath, final String appName) { return UriUtils.repairData(StringUtils.isEmpty(contextPath) ? appName : contextPath); }
@Test public void testBuildContextPath() { final String url = "/contextPath/service"; final String appName = "/app"; assertEquals(url, ContextPathUtils.buildContextPath(url, appName)); assertEquals(appName, ContextPathUtils.buildContextPath("", appName)); }
@Override public Optional<NativeEntity<LookupTableDto>> findExisting(Entity entity, Map<String, ValueReference> parameters) { if (entity instanceof EntityV1) { return findExisting((EntityV1) entity, parameters); } else { throw new IllegalArgumentException("Unsupported entity version: " + entity.getClass()); } }
@Test @MongoDBFixtures("LookupTableFacadeTest.json") public void findExisting() { final Entity entity = EntityV1.builder() .id(ModelId.of("1")) .type(ModelTypes.LOOKUP_TABLE_V1) .data(objectMapper.convertValue(LookupTableEntity.create( ValueReference.of(DefaultEntityScope.NAME), ValueReference.of("http-dsv-no-cache"), ValueReference.of("HTTP DSV without Cache"), ValueReference.of("HTTP DSV without Cache"), ValueReference.of("cache-id"), ValueReference.of("data-adapter-id"), ValueReference.of("Default single value"), ValueReference.of(LookupDefaultValue.Type.STRING), ValueReference.of("Default multi value"), ValueReference.of(LookupDefaultValue.Type.OBJECT)), JsonNode.class)) .build(); final NativeEntity<LookupTableDto> existingEntity = facade.findExisting(entity, Collections.emptyMap()).orElseThrow(AssertionError::new); assertThat(existingEntity.descriptor().id()).isEqualTo(ModelId.of("5adf24dd4b900a0fdb4e530d")); assertThat(existingEntity.descriptor().type()).isEqualTo(ModelTypes.LOOKUP_TABLE_V1); assertThat(existingEntity.entity().name()).isEqualTo("http-dsv-no-cache"); assertThat(existingEntity.entity().title()).isEqualTo("HTTP DSV without Cache"); assertThat(existingEntity.entity().description()).isEqualTo("HTTP DSV without Cache"); assertThat(existingEntity.entity().dataAdapterId()).isEqualTo("5adf24a04b900a0fdb4e52c8"); assertThat(existingEntity.entity().cacheId()).isEqualTo("5adf24b24b900a0fdb4e52dd"); assertThat(existingEntity.entity().defaultSingleValue()).isEqualTo("Default single value"); assertThat(existingEntity.entity().defaultSingleValueType()).isEqualTo(LookupDefaultValue.Type.STRING); assertThat(existingEntity.entity().defaultMultiValue()).isEqualTo("Default multi value"); assertThat(existingEntity.entity().defaultMultiValueType()).isEqualTo(LookupDefaultValue.Type.OBJECT); }
@Override public String toString() { StringBuilder b = new StringBuilder(); if (StringUtils.isNotBlank(protocol)) { b.append(protocol); b.append("://"); } if (StringUtils.isNotBlank(host)) { b.append(host); } if (!isPortDefault() && port != -1) { b.append(':'); b.append(port); } if (StringUtils.isNotBlank(path)) { // If no scheme/host/port, leave the path as is if (b.length() > 0 && !path.startsWith("/")) { b.append('/'); } b.append(encodePath(path)); } if (queryString != null && !queryString.isEmpty()) { b.append(queryString.toString()); } if (fragment != null) { b.append("#"); b.append(encodePath(fragment)); } return b.toString(); }
@Test public void testHttpsProtocolDefaultPort() { s = "https://www.example.com:443/blah"; t = "https://www.example.com/blah"; assertEquals(t, new HttpURL(s).toString()); }
public synchronized void createTable(String tableId, Iterable<String> columnFamilies) throws BigtableResourceManagerException { createTable(tableId, columnFamilies, Duration.ofHours(1)); }
@Test public void testCreateTableShouldThrowErrorWhenNoColumnFamilyGiven() { assertThrows( IllegalArgumentException.class, () -> testManager.createTable(TABLE_ID, new ArrayList<>())); }
public void start(long period, TimeUnit unit) { start(period, period, unit); }
@Test(expected = IllegalArgumentException.class) public void shouldDisallowToStartReportingMultiple() throws Exception { reporter.start(200, TimeUnit.MILLISECONDS); reporter.start(200, TimeUnit.MILLISECONDS); }
public static PDImageXObject createFromImage(PDDocument document, BufferedImage image) throws IOException { if (isGrayImage(image)) { return createFromGrayImage(image, document); } // We try to encode the image with predictor if (USE_PREDICTOR_ENCODER) { PDImageXObject pdImageXObject = new PredictorEncoder(document, image).encode(); if (pdImageXObject != null) { if (pdImageXObject.getColorSpace() == PDDeviceRGB.INSTANCE && pdImageXObject.getBitsPerComponent() < 16 && image.getWidth() * image.getHeight() <= 50 * 50) { // also create classic compressed image, compare sizes PDImageXObject pdImageXObjectClassic = createFromRGBImage(image, document); if (pdImageXObjectClassic.getCOSObject().getLength() < pdImageXObject.getCOSObject().getLength()) { pdImageXObject.getCOSObject().close(); return pdImageXObjectClassic; } else { pdImageXObjectClassic.getCOSObject().close(); } } return pdImageXObject; } } // Fallback: We export the image as 8-bit sRGB and might lose color information return createFromRGBImage(image, document); }
@Test void testCreateLosslessFrom16BitPNG() throws IOException { PDDocument document = new PDDocument(); BufferedImage image = ImageIO.read(new File("target/imgs", "PDFBOX-4184-16bit.png")); assertEquals(64, image.getColorModel().getPixelSize()); assertEquals(Transparency.TRANSLUCENT, image.getColorModel().getTransparency()); assertEquals(4, image.getRaster().getNumDataElements()); assertEquals(java.awt.image.DataBuffer.TYPE_USHORT, image.getRaster().getDataBuffer().getDataType()); PDImageXObject ximage = LosslessFactory.createFromImage(document, image); int w = image.getWidth(); int h = image.getHeight(); validate(ximage, 16, w, h, "png", PDDeviceRGB.INSTANCE.getName()); checkIdent(image, ximage.getImage()); checkIdentRGB(image, ximage.getOpaqueImage(null, 1)); checkIdentRaw(image, ximage); assertNotNull(ximage.getSoftMask()); validate(ximage.getSoftMask(), 16, w, h, "png", PDDeviceGray.INSTANCE.getName()); assertEquals(35, colorCount(ximage.getSoftMask().getImage())); doWritePDF(document, ximage, TESTRESULTSDIR, "png16bit.pdf"); }
public long put(final K key, final V value, final long timestamp) { if (timestampedStore != null) { timestampedStore.put(key, ValueAndTimestamp.make(value, timestamp)); return PUT_RETURN_CODE_IS_LATEST; } if (versionedStore != null) { return versionedStore.put(key, value, timestamp); } throw new IllegalStateException("KeyValueStoreWrapper must be initialized with either timestamped or versioned store"); }
@Test public void shouldPutNullToVersionedStore() { givenWrapperWithVersionedStore(); when(versionedStore.put(KEY, null, VALUE_AND_TIMESTAMP.timestamp())).thenReturn(12L); final long putReturnCode = wrapper.put(KEY, null, VALUE_AND_TIMESTAMP.timestamp()); assertThat(putReturnCode, equalTo(12L)); }
public Integer doCall() throws Exception { List<Row> rows = new ArrayList<>(); List<Integration> integrations = client(Integration.class).list().getItems(); integrations .forEach(integration -> { Row row = new Row(); row.name = integration.getMetadata().getName(); row.ready = "0/1"; if (integration.getStatus() != null) { row.phase = integration.getStatus().getPhase(); if (integration.getStatus().getConditions() != null) { row.ready = integration.getStatus().getConditions().stream().filter(c -> c.getType().equals("Ready")) .anyMatch(c -> c.getStatus().equals("True")) ? "1/1" : "0/1"; } row.kit = integration.getStatus().getIntegrationKit() != null ? integration.getStatus().getIntegrationKit().getName() : ""; } else { row.phase = "Unknown"; } rows.add(row); }); if (!rows.isEmpty()) { if (name) { rows.forEach(r -> printer().println(r.name)); } else { printer().println(AsciiTable.getTable(AsciiTable.NO_BORDERS, rows, Arrays.asList( new Column().header("NAME").dataAlign(HorizontalAlign.LEFT) .maxWidth(40, OverflowBehaviour.ELLIPSIS_RIGHT) .with(r -> r.name), new Column().header("PHASE").headerAlign(HorizontalAlign.LEFT) .with(r -> r.phase), new Column().header("KIT").headerAlign(HorizontalAlign.LEFT).with(r -> r.kit), new Column().header("READY").dataAlign(HorizontalAlign.CENTER).with(r -> r.ready)))); } } return 0; }
@Test public void shouldListPendingIntegration() throws Exception { Integration integration = createIntegration("building"); IntegrationStatus status = new IntegrationStatus(); status.setPhase("Building Kit"); status.setConditions(new ArrayList<>()); Conditions readyCondition = new Conditions(); readyCondition.setType("Ready"); readyCondition.setStatus("False"); status.getConditions().add(readyCondition); integration.setStatus(status); kubernetesClient.resources(Integration.class).resource(integration).create(); createCommand().doCall(); List<String> output = printer.getLines(); Assertions.assertEquals("NAME PHASE KIT READY", output.get(0)); Assertions.assertEquals("building Building Kit 0/1", output.get(1)); }
@Override public MapperResult listGroupKeyMd5ByPageFetchRows(MapperContext context) { String sql = "SELECT t.id,data_id,group_id,tenant_id,app_name,md5,type,gmt_modified,encrypted_data_key FROM " + "( SELECT id FROM config_info ORDER BY id LIMIT " + context.getStartRow() + "," + context.getPageSize() + " ) g, config_info t WHERE g.id = t.id"; return new MapperResult(sql, Collections.emptyList()); }
@Test void testListGroupKeyMd5ByPageFetchRows() { MapperResult mapperResult = configInfoMapperByMySql.listGroupKeyMd5ByPageFetchRows(context); assertEquals(mapperResult.getSql(), "SELECT t.id,data_id,group_id,tenant_id,app_name,md5,type,gmt_modified,encrypted_data_key FROM " + "( SELECT id FROM config_info ORDER BY id LIMIT 0,5 ) g, config_info t WHERE g.id = t.id"); assertArrayEquals(mapperResult.getParamList().toArray(), emptyObjs); }
public Arguments parse(String[] args) { JCommander jCommander = new JCommander(this); jCommander.setProgramName("jsonschema2pojo"); try { jCommander.parse(args); if (this.showHelp) { jCommander.usage(); exit(EXIT_OKAY); } else if (printVersion) { Properties properties = new Properties(); properties.load(getClass().getResourceAsStream("version.properties")); jCommander.getConsole().println(jCommander.getProgramName() + " version " + properties.getProperty("version")); exit(EXIT_OKAY); } } catch (IOException | ParameterException e) { System.err.println(e.getMessage()); jCommander.usage(); exit(EXIT_ERROR); } return this; }
@Test public void requestingVersionCausesVersion() { ArgsForTest args = (ArgsForTest) new ArgsForTest().parse(new String[] { "--version" }); assertThat(args.didExit(), is(true)); assertThat(new String(systemOutCapture.toByteArray(), StandardCharsets.UTF_8).matches("(?s)jsonschema2pojo version \\d.*"), is(true)); }
@Override public PageData<WidgetTypeInfo> findAllTenantWidgetTypesByTenantId(WidgetTypeFilter widgetTypeFilter, PageLink pageLink) { boolean deprecatedFilterEnabled = !DeprecatedFilter.ALL.equals(widgetTypeFilter.getDeprecatedFilter()); boolean deprecatedFilterBool = DeprecatedFilter.DEPRECATED.equals(widgetTypeFilter.getDeprecatedFilter()); boolean widgetTypesEmpty = widgetTypeFilter.getWidgetTypes() == null || widgetTypeFilter.getWidgetTypes().isEmpty(); return DaoUtil.toPageData( widgetTypeInfoRepository .findAllTenantWidgetTypesByTenantId( widgetTypeFilter.getTenantId().getId(), NULL_UUID, pageLink.getTextSearch(), widgetTypeFilter.isFullSearch(), deprecatedFilterEnabled, deprecatedFilterBool, widgetTypesEmpty, widgetTypeFilter.getWidgetTypes() == null ? Collections.emptyList() : widgetTypeFilter.getWidgetTypes(), widgetTypeFilter.isScadaFirst(), DaoUtil.toPageable(pageLink, WidgetTypeInfoEntity.SEARCH_COLUMNS_MAP))); }
@Test public void testTagsSearchInFindAllTenantWidgetTypesByTenantId() { for (var entry : SHOULD_FIND_SEARCH_TO_TAGS_MAP.entrySet()) { String searchText = entry.getKey(); String[] tags = entry.getValue(); WidgetTypeDetails savedWidgetType = createAndSaveWidgetType(TenantId.SYS_TENANT_ID, WIDGET_TYPE_COUNT + 1, tags); PageData<WidgetTypeInfo> widgetTypes = widgetTypeDao.findAllTenantWidgetTypesByTenantId( WidgetTypeFilter.builder() .tenantId(TenantId.SYS_TENANT_ID) .fullSearch(true) .deprecatedFilter(DeprecatedFilter.ALL) .widgetTypes(null).build(), new PageLink(10, 0, searchText) ); assertThat(widgetTypes.getData()).hasSize(1); assertThat(widgetTypes.getData().get(0).getId()).isEqualTo(savedWidgetType.getId()); widgetTypeDao.removeById(TenantId.SYS_TENANT_ID, savedWidgetType.getUuidId()); } for (var entry : SHOULDNT_FIND_SEARCH_TO_TAGS_MAP.entrySet()) { String searchText = entry.getKey(); String[] tags = entry.getValue(); WidgetTypeDetails savedWidgetType = createAndSaveWidgetType(TenantId.SYS_TENANT_ID, WIDGET_TYPE_COUNT + 1, tags); PageData<WidgetTypeInfo> widgetTypes = widgetTypeDao.findAllTenantWidgetTypesByTenantId( WidgetTypeFilter.builder() .tenantId(TenantId.SYS_TENANT_ID) .fullSearch(true) .deprecatedFilter(DeprecatedFilter.ALL) .widgetTypes(null).build(), new PageLink(10, 0, searchText) ); assertThat(widgetTypes.getData()).hasSize(0); widgetTypeDao.removeById(TenantId.SYS_TENANT_ID, savedWidgetType.getUuidId()); } }
@Override public void unwatch(final String key) { if (cacheMap.containsKey(key)) { cacheMap.remove(key).close(); } }
@Test void testUnwatch() throws NoSuchFieldException, IllegalAccessException { zookeeperDiscoveryServiceUnderTest.unwatch("/key"); Field cacheField = zookeeperDiscoveryServiceUnderTest.getClass().getDeclaredField("cacheMap"); cacheField.setAccessible(true); Map<String, TreeCache> cacheMap = (Map<String, TreeCache>) cacheField.get(zookeeperDiscoveryServiceUnderTest); Assertions.assertNull(cacheMap.get("/key")); }
public static SqlArgument of(final SqlType type) { return new SqlArgument(type, null, null); }
@Test public void shouldThrowWhenTypePresentWhenGettingLambda() { final SqlArgument argument = SqlArgument.of(SqlTypes.STRING, null); final Exception e = assertThrows( RuntimeException.class, argument::getSqlLambdaOrThrow ); assertThat(e.getMessage(), containsString("Was expecting lambda as a function argument")); }
public boolean isFatal(Throwable err, Consumer<Exception> throwableConsumer) { if (validator.test(err)) { throwableConsumer.accept(throwableMapper.apply(err)); return false; } if (chainedClassifier != null) { return chainedClassifier.isFatal(err, throwableConsumer); } else { return true; } }
@Test public void noExceptionIsThrownIfTheExceptionDoesNotMatchTheOneExpected() { AtomicReference<Exception> caughtException = new AtomicReference<>(); try { System.out.print(nullReference.toString()); } catch (Exception e) { ARITHMETIC_EXCEPTION_STRATEGY.isFatal(e, caughtException::set); } assertThat(caughtException.get()).isNull(); }
@Override public long searchOffset(MessageQueue mq, long timestamp) throws MQClientException { return this.defaultMQProducerImpl.searchOffset(queueWithNamespace(mq), timestamp); }
@Test public void assertSearchOffset() throws MQClientException, NoSuchFieldException, IllegalAccessException { setDefaultMQProducerImpl(); MessageQueue mq = mock(MessageQueue.class); long result = producer.searchOffset(mq, System.currentTimeMillis()); assertEquals(0L, result); }
boolean canFilterPlayer(String playerName) { boolean isMessageFromSelf = playerName.equals(client.getLocalPlayer().getName()); return !isMessageFromSelf && (config.filterFriends() || !client.isFriended(playerName, false)) && (config.filterFriendsChat() || !isFriendsChatMember(playerName)) && (config.filterClanChat() || !isClanChatMember(playerName)); }
@Test public void testMessageFromFriendsChatIsNotFiltered() { when(friendsChatManager.findByName("B0aty")).thenReturn(mock(FriendsChatMember.class)); when(chatFilterConfig.filterFriendsChat()).thenReturn(false); assertFalse(chatFilterPlugin.canFilterPlayer("B0aty")); }
public void setContract(@Nullable Produce contract) { this.contract = contract; setStoredContract(contract); handleContractState(); }
@Test public void cabbageContractOnionHarvestableAndCabbageDead() { final long unixNow = Instant.now().getEpochSecond(); // Get the two allotment patches final FarmingPatch patch1 = farmingGuildPatches.get(Varbits.FARMING_4773); final FarmingPatch patch2 = farmingGuildPatches.get(Varbits.FARMING_4774); assertNotNull(patch1); assertNotNull(patch2); // Specify the two allotment patches when(farmingTracker.predictPatch(patch1)) .thenReturn(new PatchPrediction(Produce.ONION, CropState.HARVESTABLE, unixNow, 3, 3)); when(farmingTracker.predictPatch(patch2)) .thenReturn(new PatchPrediction(Produce.CABBAGE, CropState.DEAD, 0, 2, 3)); farmingContractManager.setContract(Produce.CABBAGE); assertEquals(SummaryState.IN_PROGRESS, farmingContractManager.getSummary()); assertEquals(CropState.DEAD, farmingContractManager.getContractCropState()); }
@Nullable @SuppressWarnings("checkstyle:returncount") static Metadata resolve(InternalSerializationService ss, Object target, boolean key) { try { if (target instanceof Data) { Data data = (Data) target; if (data.isPortable()) { ClassDefinition classDefinition = ss.getPortableContext().lookupClassDefinition(data); return resolvePortable(classDefinition, key); } else if (data.isCompact()) { return resolveCompact(ss.extractSchemaFromData(data), key); } else if (data.isJson()) { return null; } else { return resolveJava(ss.toObject(data).getClass(), key); } } else if (target instanceof VersionedPortable) { VersionedPortable portable = (VersionedPortable) target; ClassDefinition classDefinition = ss.getPortableContext() .lookupClassDefinition(portable.getFactoryId(), portable.getClassId(), portable.getClassVersion()); return resolvePortable(classDefinition, key); } else if (target instanceof Portable) { Portable portable = (Portable) target; ClassDefinition classDefinition = ss.getPortableContext() .lookupClassDefinition(portable.getFactoryId(), portable.getClassId(), 0); return resolvePortable(classDefinition, key); } else if (target instanceof PortableGenericRecord) { return resolvePortable(((PortableGenericRecord) target).getClassDefinition(), key); } else if (target instanceof CompactGenericRecord) { return resolveCompact(((CompactGenericRecord) target).getSchema(), key); } else if (ss.isCompactSerializable(target)) { Schema schema = ss.extractSchemaFromObject(target); return resolveCompact(schema, key); } else if (target instanceof HazelcastJsonValue) { return null; } else { return resolveJava(target.getClass(), key); } } catch (Exception e) { return null; } }
@Test public void test_versionedPortable() { ClassDefinition classDefinition = new ClassDefinitionBuilder(PORTABLE_FACTORY_ID, PORTABLE_CLASS_ID, PORTABLE_CLASS_VERSION).build(); InternalSerializationService ss = new DefaultSerializationServiceBuilder().addClassDefinition(classDefinition).build(); Metadata metadata = SampleMetadataResolver.resolve(ss, new VersionedPortableClass(), key); assertThat(metadata.options()).containsExactly( entry(key ? OPTION_KEY_FORMAT : OPTION_VALUE_FORMAT, PORTABLE_FORMAT), entry(key ? OPTION_KEY_FACTORY_ID : OPTION_VALUE_FACTORY_ID, String.valueOf(PORTABLE_FACTORY_ID)), entry(key ? OPTION_KEY_CLASS_ID : OPTION_VALUE_CLASS_ID, String.valueOf(PORTABLE_CLASS_ID)), entry(key ? OPTION_KEY_CLASS_VERSION : OPTION_VALUE_CLASS_VERSION, String.valueOf(PORTABLE_CLASS_VERSION)) ); metadata = SampleMetadataResolver.resolve(ss, ss.toData(new VersionedPortableClass()), key); assertThat(metadata.options()).containsExactly( entry(key ? OPTION_KEY_FORMAT : OPTION_VALUE_FORMAT, PORTABLE_FORMAT), entry(key ? OPTION_KEY_FACTORY_ID : OPTION_VALUE_FACTORY_ID, String.valueOf(PORTABLE_FACTORY_ID)), entry(key ? OPTION_KEY_CLASS_ID : OPTION_VALUE_CLASS_ID, String.valueOf(PORTABLE_CLASS_ID)), entry(key ? OPTION_KEY_CLASS_VERSION : OPTION_VALUE_CLASS_VERSION, String.valueOf(PORTABLE_CLASS_VERSION)) ); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testLike() { analyze("SELECT '1' LIKE '1'"); analyze("SELECT CAST('1' as CHAR(1)) LIKE '1'"); }
public static LocalDateTime endOfDay(LocalDateTime time) { return endOfDay(time, false); }
@Test public void endOfDayTest() { final LocalDateTime localDateTime = LocalDateTimeUtil.parse("2020-01-23T12:23:56"); LocalDateTime endOfDay = LocalDateTimeUtil.endOfDay(localDateTime); assertEquals("2020-01-23T23:59:59.999999999", endOfDay.toString()); endOfDay = LocalDateTimeUtil.endOfDay(localDateTime, true); assertEquals("2020-01-23T23:59:59", endOfDay.toString()); }
public static int getNormalizedSort(int sort) { if (sort == Type.ARRAY) sort = Type.OBJECT; else if (sort > 0 && sort < Type.INT) sort = Type.INT; return sort; }
@Test void testGetNormalizedSort() { assertEquals(Type.METHOD, Types.getNormalizedSort(Type.METHOD)); assertEquals(Type.OBJECT, Types.getNormalizedSort(Type.OBJECT)); assertEquals(Type.OBJECT, Types.getNormalizedSort(Type.ARRAY)); assertEquals(Type.DOUBLE, Types.getNormalizedSort(Type.DOUBLE)); assertEquals(Type.LONG, Types.getNormalizedSort(Type.LONG)); assertEquals(Type.FLOAT, Types.getNormalizedSort(Type.FLOAT)); assertEquals(Type.INT, Types.getNormalizedSort(Type.INT)); assertEquals(Type.INT, Types.getNormalizedSort(Type.SHORT)); assertEquals(Type.INT, Types.getNormalizedSort(Type.BYTE)); assertEquals(Type.INT, Types.getNormalizedSort(Type.CHAR)); assertEquals(Type.INT, Types.getNormalizedSort(Type.BOOLEAN)); assertEquals(Type.VOID, Types.getNormalizedSort(Type.VOID)); }
public static GoPluginBundleDescriptor parseXML(InputStream pluginXml, BundleOrPluginFileDetails bundleOrPluginJarFile) throws IOException, JAXBException, XMLStreamException, SAXException { return parseXML(pluginXml, bundleOrPluginJarFile.file().getAbsolutePath(), bundleOrPluginJarFile.extractionLocation(), bundleOrPluginJarFile.isBundledPlugin()); }
@Test void shouldPerformPluginXsdValidationAndFailWhenIDIsNotPresent() throws IOException { try (InputStream pluginXml = IOUtils.toInputStream("<go-plugin version=\"1\"></go-plugin>", StandardCharsets.UTF_8)) { JAXBException e = assertThrows(JAXBException.class, () -> GoPluginDescriptorParser.parseXML(pluginXml, "/tmp/", new File("/tmp/"), true)); assertTrue(e.getCause().getMessage().contains("Attribute 'id' must appear on element 'go-plugin'"), format("Message not correct: [%s]", e.getCause().getMessage())); } }
public List<Serializable> getAssignedResources(String resourceType) { AssignedResources ar = assignedResourcesMap.get(resourceType); if (null == ar) { return Collections.emptyList(); } return ar.getAssignedResources(); }
@Test public void testAssignedResourcesCanDeserializePreviouslySerializedValues() { try { byte[] serializedString = toBytes(testResources.getAssignedResources()); ResourceMappings.AssignedResources deserialized = ResourceMappings.AssignedResources.fromBytes(serializedString); Assert.assertEquals(testResources.getAssignedResources(), deserialized.getAssignedResources()); } catch (IOException e) { e.printStackTrace(); Assert.fail(String.format("Deserialization of test AssignedResources " + "failed with %s", e.getMessage())); } }
public List<String> getAllDatabaseNames() { try (Timer ignored = Tracers.watchScope(EXTERNAL, "HMS.getAllDatabases")) { return callRPC("getAllDatabases", "Failed to getAllDatabases", new Object[0]); } }
@Test public void testGetHiveClient() { HiveConf hiveConf = new HiveConf(); hiveConf.set(MetastoreConf.ConfVars.THRIFT_URIS.getHiveName(), "thrift://127.0.0.1:90303"); HiveMetaClient client = new HiveMetaClient(hiveConf); try { client.getAllDatabaseNames(); } catch (Exception e) { Assert.assertTrue(e.getMessage().contains("Invalid port 90303")); } }
@VisibleForTesting @Nullable static String getHostAddress(InetSocketAddress socketAddress) { InetAddress address = socketAddress.getAddress(); if (address instanceof Inet6Address) { // Strip the scope from the address since some other classes choke on it. // TODO(carl-mastrangelo): Consider adding this back in once issues like // https://github.com/google/guava/issues/2587 are fixed. try { return InetAddress.getByAddress(address.getAddress()).getHostAddress(); } catch (UnknownHostException e) { throw new RuntimeException(e); } } else if (address instanceof Inet4Address) { return address.getHostAddress(); } else { assert address == null; return null; } }
@Test void failsOnUnresolved() { InetSocketAddress address = InetSocketAddress.createUnresolved("localhost", 8080); String addressString = SourceAddressChannelHandler.getHostAddress(address); assertNull(null, addressString); }
@Override public void setAcl(AlluxioURI path, SetAclAction action, List<AclEntry> entries, SetAclPOptions options) throws FileDoesNotExistException, IOException, AlluxioException { mDelegatedFileSystem.setAcl(path, action, entries, options); }
@Test public void setAcl() throws Exception { FileSystem fileSystem = new DelegatingFileSystem(mMockFileSystem); AlluxioURI alluxioPath = new AlluxioURI("/t"); List<AclEntry> entries = Arrays.asList(AclEntry.fromCliString("user:nameduser:rwx")); SetAclPOptions setAclPOptions = SetAclPOptions.newBuilder() .setCommonOptions(FileSystemMasterCommonPOptions.newBuilder().setTtl(5L).build()) .setRecursive(true) .build(); fileSystem.setAcl(alluxioPath, SetAclAction.MODIFY, entries, setAclPOptions); Mockito.verify(mMockFileSystem, atLeastOnce()) .setAcl(eq(alluxioPath), eq(SetAclAction.MODIFY), eq(entries), eq(setAclPOptions)); }
@Override public void enqueueLast(CoordinatorEvent event) throws RejectedExecutionException { accumulator.addLast(event); }
@Test public void testMetrics() throws Exception { CoordinatorRuntimeMetrics mockRuntimeMetrics = mock(CoordinatorRuntimeMetrics.class); Time mockTime = new MockTime(); AtomicInteger numEventsExecuted = new AtomicInteger(0); // Special event which blocks until the latch is released. FutureEvent<Integer> blockingEvent = new FutureEvent<>( new TopicPartition("foo", 0), () -> { mockTime.sleep(4000L); return numEventsExecuted.incrementAndGet(); }, true, mockTime.milliseconds() ); try (MultiThreadedEventProcessor eventProcessor = new MultiThreadedEventProcessor( new LogContext(), "event-processor-", 1, // Use a single thread to block event in the processor. mockTime, mockRuntimeMetrics, new DelayEventAccumulator(mockTime, 500L) )) { // Enqueue the blocking event. eventProcessor.enqueueLast(blockingEvent); // Ensure that the blocking event is executed. waitForCondition(() -> numEventsExecuted.get() > 0, "Blocking event not executed."); // Enqueue the other event. FutureEvent<Integer> otherEvent = new FutureEvent<>( new TopicPartition("foo", 0), () -> { mockTime.sleep(5000L); return numEventsExecuted.incrementAndGet(); }, false, mockTime.milliseconds() ); eventProcessor.enqueueLast(otherEvent); // Pass the time. mockTime.sleep(3000L); // Events should not be completed. assertFalse(otherEvent.future.isDone()); // Release the blocking event to unblock the thread. blockingEvent.release(); // The blocking event should be completed. blockingEvent.future.get(DEFAULT_MAX_WAIT_MS, TimeUnit.SECONDS); assertTrue(blockingEvent.future.isDone()); assertFalse(blockingEvent.future.isCompletedExceptionally()); // The other event should also be completed. otherEvent.future.get(DEFAULT_MAX_WAIT_MS, TimeUnit.SECONDS); assertTrue(otherEvent.future.isDone()); assertFalse(otherEvent.future.isCompletedExceptionally()); assertEquals(2, numEventsExecuted.get()); // e1 poll time = 500 // e1 processing time = 4000 // e2 enqueue time = 3000 // e2 poll time = 500 // e2 processing time = 5000 // e1 poll time verify(mockRuntimeMetrics, times(1)).recordEventQueueTime(500L); // e1 processing time + e2 enqueue time verify(mockRuntimeMetrics, times(1)).recordEventQueueProcessingTime(7000L); // Second event (e2) // e1, e2 poll time verify(mockRuntimeMetrics, times(2)).recordThreadIdleTime(500L); // event queue time = e2 enqueue time + e2 poll time verify(mockRuntimeMetrics, times(1)).recordEventQueueTime(3500L); } }
@Override public final boolean offer(int ordinal, @Nonnull Object item) { if (ordinal == -1) { return offerInternal(allEdges, item); } else { if (ordinal == bucketCount()) { // ordinal beyond bucketCount will add to snapshot queue, which we don't allow through this method throw new IllegalArgumentException("Illegal edge ordinal: " + ordinal); } singleEdge[0] = ordinal; return offerInternal(singleEdge, item); } }
@Test public void when_offerFailsAndOfferedToDifferentOrdinal_then_fail_3() { do_when_offerToDifferentOrdinal_then_fail(e -> outbox.offer(0, e), e -> outbox.offer(e)); }
@Override public CiConfiguration loadConfiguration() { // https://wiki.jenkins-ci.org/display/JENKINS/GitHub+pull+request+builder+plugin#GitHubpullrequestbuilderplugin-EnvironmentVariables // https://wiki.jenkins-ci.org/display/JENKINS/Building+a+software+project String revision = system.envVariable("ghprbActualCommit"); if (StringUtils.isNotBlank(revision)) { return new CiConfigurationImpl(revision, getName()); } revision = system.envVariable("GIT_COMMIT"); if (StringUtils.isNotBlank(revision)) { if (StringUtils.isNotBlank(system.envVariable("CHANGE_ID"))) { String jenkinsGitPrSha1 = getJenkinsGitPrSha1(); if (StringUtils.isNotBlank(jenkinsGitPrSha1)) { return new CiConfigurationImpl(jenkinsGitPrSha1, getName()); } } return new CiConfigurationImpl(revision, getName()); } revision = system.envVariable("SVN_COMMIT"); return new CiConfigurationImpl(revision, getName()); }
@Test public void loadConfiguration_of_git_repo_with_branch_plugin() throws IOException { // prepare fake git clone Path baseDir = temp.newFolder().toPath(); File unzip = ZipUtils.unzip(this.getClass().getResourceAsStream("gitrepo.zip"), baseDir.toFile()); when(project.getBaseDir()).thenReturn(unzip.toPath().resolve("gitrepo")); setEnvVariable("CHANGE_ID", "3"); setEnvVariable("GIT_BRANCH", "PR-3"); // this will be ignored setEnvVariable("GIT_COMMIT", "abd12fc"); assertThat(underTest.loadConfiguration().getScmRevision()).hasValue("e6013986eff4f0ce0a85f5d070070e7fdabead48"); }
public Collection<String> getShardingLogicTableNames(final Collection<String> logicTableNames) { Collection<String> result = new LinkedList<>(); for (String each : logicTableNames) { if (isShardingTable(each)) { result.add(each); } } return result; }
@Test void assertGetShardingLogicTableNames() { ShardingRule actual = createMaximumShardingRule(); assertThat(actual.getShardingLogicTableNames(Arrays.asList("LOGIC_TABLE", "BROADCAST_TABLE")), is(Collections.singletonList("LOGIC_TABLE"))); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; // This handles a tombstone message if (value == null) { return SchemaAndValue.NULL; } try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (config.schemasEnabled() && (!jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME) || !jsonValue.has(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); // The deserialized data should either be an envelope object containing the schema and the payload or the schema // was stripped during serialization and we need to fill in an all-encompassing schema. if (!config.schemasEnabled()) { ObjectNode envelope = JSON_NODE_FACTORY.objectNode(); envelope.set(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME, null); envelope.set(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME, jsonValue); jsonValue = envelope; } Schema schema = asConnectSchema(jsonValue.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); return new SchemaAndValue( schema, convertToConnect(schema, jsonValue.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME), config) ); }
@Test public void nullSchemaPrimitiveToConnect() { SchemaAndValue converted = converter.toConnectData(TOPIC, "{ \"schema\": null, \"payload\": null }".getBytes()); assertEquals(SchemaAndValue.NULL, converted); converted = converter.toConnectData(TOPIC, "{ \"schema\": null, \"payload\": true }".getBytes()); assertEquals(new SchemaAndValue(null, true), converted); // Integers: Connect has more data types, and JSON unfortunately mixes all number types. We try to preserve // info as best we can, so we always use the largest integer and floating point numbers we can and have Jackson // determine if it's an integer or not converted = converter.toConnectData(TOPIC, "{ \"schema\": null, \"payload\": 12 }".getBytes()); assertEquals(new SchemaAndValue(null, 12L), converted); converted = converter.toConnectData(TOPIC, "{ \"schema\": null, \"payload\": 12.24 }".getBytes()); assertEquals(new SchemaAndValue(null, 12.24), converted); converted = converter.toConnectData(TOPIC, "{ \"schema\": null, \"payload\": \"a string\" }".getBytes()); assertEquals(new SchemaAndValue(null, "a string"), converted); converted = converter.toConnectData(TOPIC, "{ \"schema\": null, \"payload\": [1, \"2\", 3] }".getBytes()); assertEquals(new SchemaAndValue(null, Arrays.asList(1L, "2", 3L)), converted); converted = converter.toConnectData(TOPIC, "{ \"schema\": null, \"payload\": { \"field1\": 1, \"field2\": 2} }".getBytes()); Map<String, Long> obj = new HashMap<>(); obj.put("field1", 1L); obj.put("field2", 2L); assertEquals(new SchemaAndValue(null, obj), converted); }
public static <T> RetryTransformer<T> of(Retry retry) { return new RetryTransformer<>(retry); }
@Test public void retryOnResultFailAfterMaxAttemptsUsingSingle() throws InterruptedException { RetryConfig config = RetryConfig.<String>custom() .retryOnResult("retry"::equals) .waitDuration(Duration.ofMillis(50)) .maxAttempts(3).build(); Retry retry = Retry.of("testName", config); given(helloWorldService.returnHelloWorld()) .willReturn("retry"); Single.fromCallable(helloWorldService::returnHelloWorld) .compose(RetryTransformer.of(retry)) .test() .await() .assertValue("retry") .assertComplete() .assertSubscribed(); then(helloWorldService).should(times(3)).returnHelloWorld(); }
public void writeEncodedInt(int valueType, int value) throws IOException { int index = 0; if (value >= 0) { while (value > 0x7f) { tempBuf[index++] = (byte)value; value >>= 8; } } else { while (value < -0x80) { tempBuf[index++] = (byte)value; value >>= 8; } } tempBuf[index++] = (byte)value; writeEncodedValueHeader(valueType, index-1); write(tempBuf, 0, index); }
@Test public void testWriteEncodedInt() throws IOException { testWriteEncodedIntHelper(0x00, 0x00); testWriteEncodedIntHelper(0x40, 0x40); testWriteEncodedIntHelper(0x7f, 0x7f); testWriteEncodedIntHelper(0xff, 0xff, 0x00); testWriteEncodedIntHelper(0xffff80, 0x80, 0xff, 0xff, 0x00); testWriteEncodedIntHelper(0xffffff80, 0x80); testWriteEncodedIntHelper(0xffffffff, 0xff); testWriteEncodedIntHelper(0x100, 0x00, 0x01); testWriteEncodedIntHelper(0x7fff, 0xff, 0x7f); testWriteEncodedIntHelper(0x8000, 0x00, 0x80, 0x00); testWriteEncodedIntHelper(0xffff8000, 0x00, 0x80); testWriteEncodedIntHelper(0x10000, 0x00, 0x00, 0x01); testWriteEncodedIntHelper(0x10203, 0x03, 0x02, 0x01); testWriteEncodedIntHelper(0x810203, 0x03, 0x02, 0x81, 0x00); testWriteEncodedIntHelper(0xff810203, 0x03, 0x02, 0x81); testWriteEncodedIntHelper(0x1000000, 0x00, 0x00, 0x00, 0x01); testWriteEncodedIntHelper(0x1020304, 0x04, 0x03, 0x02, 0x01); testWriteEncodedIntHelper(0x7fffffff, 0xff, 0xff, 0xff, 0x7f); testWriteEncodedIntHelper(0x80000000, 0x00, 0x00, 0x00, 0x80); testWriteEncodedIntHelper(0x80000001, 0x01, 0x00, 0x00, 0x80); }
@Override public boolean remove(long key1, long key2) { assert key1 != unassignedSentinel : "remove() called with key1 == nullKey1 (" + unassignedSentinel + ')'; return super.remove0(key1, key2); }
@Test(expected = AssertionError.class) @RequireAssertEnabled public void testRemove_whenDisposed() { hsa.dispose(); hsa.remove(1, 1); }