focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static Message toProto(final Map<?, ?> inputData, final Message defaultInstance) { ObjectHelper.notNull(inputData, "inputData"); ObjectHelper.notNull(defaultInstance, "defaultInstance"); final Descriptor descriptor = defaultInstance.getDescriptorForType(); final Builder target = defaultInstance.newBuilderForType(); return convertMapToMessage(descriptor, target, inputData); }
@Test public void testIfCorrectlyParseMap() { final Map<String, Object> phoneNumber = new HashMap<>(); phoneNumber.put("number", "011122233"); phoneNumber.put("type", "MOBILE"); final Map<String, Object> phoneNumber2 = new HashMap<>(); phoneNumber2.put("number", "5542454"); phoneNumber2.put("type", 2); final Map<String, Object> address = new HashMap<>(); address.put("street", "awesome street"); address.put("street_number", 12); address.put("is_valid", false); final Map<String, Object> input = new HashMap<>(); input.put("name", "Martin"); input.put("id", 1234); input.put("phone", Arrays.asList(phoneNumber, phoneNumber2)); input.put("email", "some@some.com"); input.put("nicknames", Arrays.asList("awesome1", "awesome2")); input.put("address", address); final AddressBookProtos.Person message = (AddressBookProtos.Person) ProtobufConverter.toProto(input, AddressBookProtos.Person.getDefaultInstance()); // assert primitives types and strings assertEquals("Martin", message.getName()); assertEquals(1234, message.getId()); assertEquals("some@some.com", message.getEmail()); // assert nested message assertEquals("awesome street", message.getAddress().getStreet()); assertEquals(12, message.getAddress().getStreetNumber()); assertFalse(message.getAddress().getIsValid()); // assert repeated messages assertEquals("011122233", message.getPhone(0).getNumber()); assertEquals("MOBILE", message.getPhone(0).getType().name()); assertEquals("5542454", message.getPhone(1).getNumber()); assertEquals("WORK", message.getPhone(1).getType().name()); assertEquals("awesome1", message.getNicknames(0)); assertEquals("awesome2", message.getNicknames(1)); }
@Override public CatalogTable getTable(String sqlQuery) throws SQLException { Connection defaultConnection = getConnection(defaultUrl); return CatalogUtils.getCatalogTable( defaultConnection, sqlQuery, new MySqlTypeMapper(typeConverter)); }
@Test @Order(1) void getTable() { postgresCatalogTable = postgresCatalog.getTable( TablePath.of("liulitest", "public", "pg_types_table_no_array")); mySqlCatalogTable = mySqlCatalog.getTable(TablePath.of("liuliTest", "AllTypeCol")); sqlServerCatalogTable = sqlServerCatalog.getTable(TablePath.of("TestDB", "dbo", "AllDataTest")); }
@Nonnull public static <K, V> BatchSource<Entry<K, V>> remoteCache( @Nonnull String cacheName, @Nonnull ClientConfig clientConfig ) { return batchFromProcessor("remoteCacheSource(" + cacheName + ')', ProcessorMetaSupplier.of(readRemoteCacheP(cacheName, clientConfig))); }
@Test public void remoteCache() { // Given List<Integer> input = sequence(itemCount); putToCache(remoteHz.getCacheManager().getCache(srcName), input); // When BatchSource<Entry<Object, Object>> source = Sources.remoteCache(srcName, clientConfig); // Then p.readFrom(source).writeTo(sink); execute(); List<Entry<String, Integer>> expected = input.stream() .map(i -> entry(String.valueOf(i), i)) .collect(toList()); assertEquals(toBag(expected), sinkToBag()); }
@Override public double mean() { return mean; }
@Test public void testMean() { System.out.println("mean"); WeibullDistribution instance = new WeibullDistribution(1.5, 1.0); instance.rand(); assertEquals(Gamma.gamma(1+1/1.5), instance.mean(), 1E-7); }
@Override public int read(char[] cbuf, int off, int len) throws IOException { int read = in.read(cbuf, off, len); if (read > 0) { nonXmlCharFilterer.filter(cbuf, off, read); } return read; }
@Test public void testRead() throws IOException { char[] buffer = new char[10]; when(readerMock.read(same(buffer), eq(3), eq(5))).thenAnswer(new Answer<Integer>() { public Integer answer(InvocationOnMock invocation) throws Throwable { try (ConstantReader reader = new ConstantReader(new char[] { 'a', 'b', 'c' })) { Object[] args = invocation.getArguments(); return reader.read((char[]) args[0], (Integer) args[1], (Integer) args[2]); } } }); int result = nonXmlFilterReader.read(buffer, 3, 5); verify(readerMock).read(same(buffer), eq(3), eq(5)); verify(nonXmlCharFiltererMock).filter(same(buffer), eq(3), eq(3)); assertEquals(3, result, "Unexpected number of chars read"); assertArrayEquals(new char[] { 0, 0, 0, 'a', 'b', 'c', 0, 0, 0, 0 }, buffer, "Wrong buffer contents"); }
@Override public String get(final Scope scope, final ConnectionSession connectionSession, final MySQLSystemVariable variable) { return Scope.GLOBAL == scope ? variable.getDefaultValue() : connectionSession.getIsolationLevel().orElse(TransactionIsolationLevel.REPEATABLE_READ).getIsolationLevel(); }
@Test void assertGetSessionValue() { ConnectionSession connectionSession = new ConnectionSession(TypedSPILoader.getService(DatabaseType.class, "MySQL"), new DefaultAttributeMap()); assertThat(new TransactionIsolationValueProvider().get(Scope.SESSION, connectionSession, MySQLSystemVariable.TRANSACTION_ISOLATION), is("REPEATABLE-READ")); assertThat(new TransactionIsolationValueProvider().get(Scope.SESSION, connectionSession, MySQLSystemVariable.TX_ISOLATION), is("REPEATABLE-READ")); connectionSession.setIsolationLevel(TransactionIsolationLevel.READ_COMMITTED); assertThat(new TransactionIsolationValueProvider().get(Scope.SESSION, connectionSession, MySQLSystemVariable.TRANSACTION_ISOLATION), is("READ-COMMITTED")); assertThat(new TransactionIsolationValueProvider().get(Scope.SESSION, connectionSession, MySQLSystemVariable.TX_ISOLATION), is("READ-COMMITTED")); }
public boolean denied(String name, MediaType mediaType) { String suffix = (name.contains(".") ? name.substring(name.lastIndexOf(".") + 1) : "").toLowerCase(Locale.ROOT); boolean defaultDeny = false; if (CollectionUtils.isNotEmpty(denyFiles)) { if (denyFiles.contains(suffix)) { return true; } defaultDeny = false; } if (CollectionUtils.isNotEmpty(allowFiles)) { if (allowFiles.contains(suffix)) { return false; } defaultDeny = true; } if (CollectionUtils.isNotEmpty(denyMediaType)) { if (denyMediaType.contains(mediaType.toString())) { return true; } defaultDeny = false; } if (CollectionUtils.isNotEmpty(allowMediaType)) { if (allowMediaType.contains(mediaType.toString())) { return false; } defaultDeny = true; } return defaultDeny; }
@Test public void testDenyWithDenyMediaType(){ FileUploadProperties uploadProperties=new FileUploadProperties(); uploadProperties.setDenyMediaType(new HashSet<>(Arrays.asList("application/json"))); assertFalse(uploadProperties.denied("test.xls", MediaType.ALL)); assertTrue(uploadProperties.denied("test.exe", MediaType.APPLICATION_JSON)); }
@Override public AssertionResult getResult(SampleResult response) { // no error as default AssertionResult result = new AssertionResult(getName()); String resultData = response.getResponseDataAsString(); if (resultData.length() == 0) { return result.setResultForNull(); } result.setFailure(false); XMLReader builder = XML_READER.get(); if(builder != null) { try { builder.setErrorHandler(new LogErrorHandler()); builder.parse(new InputSource(new StringReader(resultData))); } catch (SAXException | IOException e) { result.setError(true); result.setFailure(true); result.setFailureMessage(e.getMessage()); } } else { result.setError(true); result.setFailureMessage("Cannot initialize XMLReader in element:"+getName()+", check jmeter.log file"); } return result; }
@Test public void testInvalidXML() throws Exception { sampleResult.setResponseData(INVALID_XML, null); result = assertion.getResult(sampleResult); assertTrue(result.isFailure()); assertTrue(result.isError()); assertNotNull(result.getFailureMessage()); }
@Override public Map<String, Set<Integer>> assignBrokerSetsForUnresolvedBrokers(ClusterModel clusterModel, Map<String, Set<Integer>> existingBrokerSetMapping) throws BrokerSetResolutionException { // Sanity check to check if all brokers in data store do not match all brokers in cluster model Set<Broker> allMappedBrokers = existingBrokerSetMapping.values() .stream() .flatMap(Collection::stream) .map(clusterModel::broker) .filter(Objects::nonNull) .collect(Collectors.toSet()); Set<Broker> extraBrokersInClusterModel = new HashSet<>(clusterModel.brokers()); extraBrokersInClusterModel.removeAll(allMappedBrokers); boolean extraClusterModelBrokersHaveReplicas = extraBrokersInClusterModel.stream().anyMatch(broker -> !broker.replicas().isEmpty()); // The broker list in data store may not be atomically updated when brokers are added to the cluster // In this case we can ignore the brokers if they have no replicas placed if (!allMappedBrokers.equals(clusterModel.brokers()) && extraClusterModelBrokersHaveReplicas) { throw new BrokerSetResolutionException( String.format("All Brokers from data store %s do not match brokers in cluster model %s.", allMappedBrokers, clusterModel.brokers())); } Set<Integer> unmappedEmptyBrokerIds = extraBrokersInClusterModel.stream() .filter(broker -> broker.replicas().isEmpty()) .map(Broker::id) .collect(Collectors.toSet()); if (!unmappedEmptyBrokerIds.isEmpty()) { existingBrokerSetMapping.computeIfAbsent(UNMAPPED_BROKER_SET_ID, k -> new HashSet<>(unmappedEmptyBrokerIds)) .addAll(unmappedEmptyBrokerIds); } return existingBrokerSetMapping; }
@Test public void testBrokerSetAssignment() { Map<Integer, String> brokers = Map.of(0, "", 1, "", 2, "", 3, "", 4, "", 5, ""); Map<String, Set<Integer>> mappedBrokers = Map.of("bs1", Set.of(0, 1, 2, 3, 4), "bs2", Set.of(5)); Map<String, Set<Integer>> mappedBrokersAfterAssignment = NO_OP_BROKER_SET_ASSIGNMENT_POLICY.assignBrokerSetsForUnresolvedBrokers(brokers, mappedBrokers); assertNotNull(mappedBrokersAfterAssignment); assertEquals(mappedBrokersAfterAssignment, mappedBrokers); }
public AbstractFile getLastFile(final long logIndex, final int waitToWroteSize, final boolean createIfNecessary) { AbstractFile lastFile = null; while (true) { int fileCount = 0; this.readLock.lock(); try { if (!this.files.isEmpty()) { fileCount = this.files.size(); final AbstractFile file = this.files.get(fileCount - 1); if (waitToWroteSize <= 0 || !file.reachesFileEndBy(waitToWroteSize)) { lastFile = file; } else if (file.reachesFileEndBy(waitToWroteSize)) { // Reach file end , need to fill blank bytes in file end file.fillEmptyBytesInFileEnd(); } } } finally { this.readLock.unlock(); } // Try to get a new file if (lastFile == null && createIfNecessary) { this.writeLock.lock(); try { if (this.files.size() != fileCount) { // That means already create a new file , just continue and try again continue; } lastFile = this.allocateService.takeEmptyFile(); if (lastFile != null) { final long newFileOffset = (long) this.files.size() * (long) this.fileSize; lastFile.setFileFromOffset(newFileOffset); this.files.add(lastFile); this.swapOutFilesIfNecessary(); return lastFile; } else { continue; } } catch (final Exception e) { LOG.error("Error on create new abstract file , current logIndex:{}", logIndex); } finally { this.writeLock.unlock(); } } return lastFile; } }
@Test public void writeDataToFirstFile() { // Append 10 index to first file , and come to the file end (size:130) { for (int i = 0; i < 10; i++) { final AbstractFile lastFile = this.fileManager.getLastFile(i, 10, true); assert (lastFile instanceof IndexFile); final IndexFile indexFile = (IndexFile) lastFile; indexFile.appendIndex(i, i, segmentIndex); } } }
public String doLayout(ILoggingEvent event) { if (!isStarted()) { return CoreConstants.EMPTY_STRING; } return writeLoopOnConverters(event); }
@Test public void prefixConverterWithMDC() { String mdcKey = "boo"; String mdcVal = "moo"; String pattern = "%prefix(%level %logger %X{" + mdcKey + "}) %message"; pl.setPattern(pattern); pl.start(); logbackMDCAdapter.put(mdcKey, mdcVal); try { String val = pl.doLayout(makeLoggingEvent("hello", null)); assertEquals("level=" + "INFO logger=" + logger.getName() + " " + mdcKey + "=" + mdcVal + " hello", val); } finally { MDC.remove(mdcKey); } }
public static long consumerRecordSizeInBytes(final ConsumerRecord<byte[], byte[]> record) { return recordSizeInBytes( record.serializedKeySize(), record.serializedValueSize(), record.topic(), record.headers() ); }
@Test public void shouldComputeSizeInBytesForConsumerRecord() { final ConsumerRecord<byte[], byte[]> record = new ConsumerRecord<>( TOPIC, 1, 0L, 0L, TimestampType.CREATE_TIME, KEY_BYTES, VALUE_BYTES, KEY, VALUE, HEADERS, Optional.empty() ); assertThat(consumerRecordSizeInBytes(record), equalTo(SIZE_IN_BYTES)); }
@Override public String method() { return source.getMethod(); }
@Test public void call_method() { underTest.method(); verify(source).getMethod(); }
static void process(int maxMessages, MessageFormatter formatter, ConsumerWrapper consumer, PrintStream output, boolean rejectMessageOnError, AcknowledgeType acknowledgeType) { while (messageCount < maxMessages || maxMessages == -1) { ConsumerRecord<byte[], byte[]> msg; try { msg = consumer.receive(); } catch (WakeupException we) { LOG.trace("Caught WakeupException because consumer is shutdown, ignore and terminate."); // Consumer will be closed return; } catch (Throwable t) { LOG.error("Error processing message, terminating consumer process: ", t); // Consumer will be closed return; } messageCount += 1; try { formatter.writeTo(new ConsumerRecord<>(msg.topic(), msg.partition(), msg.offset(), msg.timestamp(), msg.timestampType(), 0, 0, msg.key(), msg.value(), msg.headers(), Optional.empty()), output); consumer.acknowledge(msg, acknowledgeType); } catch (Throwable t) { if (rejectMessageOnError) { LOG.error("Error processing message, rejecting this message: ", t); consumer.acknowledge(msg, AcknowledgeType.REJECT); } else { // Consumer will be closed throw t; } } if (checkErr(output)) { // Consumer will be closed return; } } }
@Test public void shouldLimitReadsToMaxMessageLimit() { ConsoleShareConsumer.ConsumerWrapper consumer = mock(ConsoleShareConsumer.ConsumerWrapper.class); MessageFormatter formatter = mock(MessageFormatter.class); ConsumerRecord<byte[], byte[]> record = new ConsumerRecord<>("foo", 1, 1, new byte[0], new byte[0]); int messageLimit = 10; when(consumer.receive()).thenReturn(record); ConsoleShareConsumer.process(messageLimit, formatter, consumer, System.out, true, AcknowledgeType.ACCEPT); verify(consumer, times(messageLimit)).receive(); verify(formatter, times(messageLimit)).writeTo(any(), any()); consumer.cleanup(); }
@Override protected DAVClient connect(final ProxyFinder proxy, final HostKeyCallback key, final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException { final HttpClientBuilder configuration = this.getConfiguration(proxy, prompt); return new DAVClient(new HostUrlProvider().withUsername(false).get(host), configuration); }
@Test @Ignore public void testRedirectHttpsAlert() throws Exception { final Host host = new Host(new DAVProtocol(), "svn.cyberduck.io"); final AtomicBoolean warning = new AtomicBoolean(); final DAVSession session = new DAVSession(host, new DefaultX509TrustManager(), new KeychainX509KeyManager(new DisabledCertificateIdentityCallback(), host, new DisabledCertificateStore())) { }; final LoginConnectionService c = new LoginConnectionService( new DisabledLoginCallback() { @Override public void warn(final Host bookmark, final String title, final String message, final String continueButton, final String disconnectButton, final String preference) { assertEquals("Unsecured WebDAV (HTTP) connection", title); assertEquals("connection.unsecure.svn.cyberduck.io", preference); warning.set(true); } }, new DisabledHostKeyCallback(), new DisabledPasswordStore(), new DisabledProgressListener() ); c.connect(session, new DisabledCancelCallback()); assertTrue(warning.get()); session.close(); }
@Udf public <T> List<T> concat( @UdfParameter(description = "First array of values") final List<T> left, @UdfParameter(description = "Second array of values") final List<T> right) { if (left == null && right == null) { return null; } final int leftSize = left != null ? left.size() : 0; final int rightSize = right != null ? right.size() : 0; final List<T> result = new ArrayList<>(leftSize + rightSize); if (left != null) { result.addAll(left); } if (right != null) { result.addAll(right); } return result; }
@Test public void shouldReturnDuplicateValues() { final List<String> input1 = Arrays.asList("foo", "foo", "bar"); final List<String> input2 = Arrays.asList("baz", "foo"); final List<String> result = udf.concat(input1, input2); assertThat(result, is(Arrays.asList("foo", "foo", "bar", "baz", "foo"))); }
@Override public String generateSqlType(Dialect dialect) { return switch (dialect.getId()) { case PostgreSql.ID, H2.ID -> "INTEGER"; case MsSql.ID -> "INT"; case Oracle.ID -> "NUMBER(38,0)"; default -> throw new IllegalArgumentException("Unsupported dialect id " + dialect.getId()); }; }
@Test public void generateSqlType_thows_IAE_for_unknown_dialect() { Dialect dialect = mock(Dialect.class); when(dialect.getId()).thenReturn("AAA"); assertThatThrownBy(() -> underTest.generateSqlType(dialect)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Unsupported dialect id AAA"); }
public static <T extends EurekaEndpoint> List<T> randomize(List<T> list) { List<T> randomList = new ArrayList<>(list); if (randomList.size() < 2) { return randomList; } Collections.shuffle(randomList,ThreadLocalRandom.current()); return randomList; }
@Test public void testRandomizeProperlyRandomizesList() throws Exception { boolean success = false; for (int i = 0; i < 100; i++) { List<AwsEndpoint> firstList = SampleCluster.UsEast1a.builder().withServerPool(100).build(); List<AwsEndpoint> secondList = ResolverUtils.randomize(firstList); try { assertThat(firstList, is(not(equalTo(secondList)))); success = true; break; }catch(AssertionError e) { } } if(!success) { throw new AssertionError("ResolverUtils::randomize returned the same list 100 times, this is more than likely a bug."); } }
public void handleAssignment(final Map<TaskId, Set<TopicPartition>> activeTasks, final Map<TaskId, Set<TopicPartition>> standbyTasks) { log.info("Handle new assignment with:\n" + "\tNew active tasks: {}\n" + "\tNew standby tasks: {}\n" + "\tExisting active tasks: {}\n" + "\tExisting standby tasks: {}", activeTasks.keySet(), standbyTasks.keySet(), activeTaskIds(), standbyTaskIds()); topologyMetadata.addSubscribedTopicsFromAssignment( activeTasks.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()), logPrefix ); final Map<TaskId, Set<TopicPartition>> activeTasksToCreate = new HashMap<>(activeTasks); final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate = new HashMap<>(standbyTasks); final Map<Task, Set<TopicPartition>> tasksToRecycle = new HashMap<>(); final Set<Task> tasksToCloseClean = new TreeSet<>(Comparator.comparing(Task::id)); final Set<TaskId> tasksToLock = tasks.allTaskIds().stream() .filter(x -> activeTasksToCreate.containsKey(x) || standbyTasksToCreate.containsKey(x)) .collect(Collectors.toSet()); maybeLockTasks(tasksToLock); // first put aside those unrecognized tasks because of unknown named-topologies tasks.clearPendingTasksToCreate(); tasks.addPendingActiveTasksToCreate(pendingTasksToCreate(activeTasksToCreate)); tasks.addPendingStandbyTasksToCreate(pendingTasksToCreate(standbyTasksToCreate)); // first rectify all existing tasks: // 1. for tasks that are already owned, just update input partitions / resume and skip re-creating them // 2. for tasks that have changed active/standby status, just recycle and skip re-creating them // 3. otherwise, close them since they are no longer owned final Map<TaskId, RuntimeException> failedTasks = new LinkedHashMap<>(); if (stateUpdater == null) { handleTasksWithoutStateUpdater(activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean); } else { handleTasksWithStateUpdater( activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean, failedTasks ); failedTasks.putAll(collectExceptionsAndFailedTasksFromStateUpdater()); } final Map<TaskId, RuntimeException> taskCloseExceptions = closeAndRecycleTasks(tasksToRecycle, tasksToCloseClean); maybeUnlockTasks(tasksToLock); failedTasks.putAll(taskCloseExceptions); maybeThrowTaskExceptions(failedTasks); createNewTasks(activeTasksToCreate, standbyTasksToCreate); }
@Test public void shouldNeverUpdateInputPartitionsOfStandbyTaskInStateUpdater() { final StandbyTask standbyTaskToUpdateInputPartitions = standbyTask(taskId02, taskId02ChangelogPartitions) .inState(State.RUNNING) .withInputPartitions(taskId02Partitions).build(); final TasksRegistry tasks = mock(TasksRegistry.class); final TaskManager taskManager = setUpTaskManager(ProcessingMode.AT_LEAST_ONCE, tasks, true); when(stateUpdater.getTasks()).thenReturn(mkSet(standbyTaskToUpdateInputPartitions)); taskManager.handleAssignment( Collections.emptyMap(), mkMap(mkEntry(standbyTaskToUpdateInputPartitions.id(), taskId03Partitions)) ); verify(stateUpdater, never()).remove(standbyTaskToUpdateInputPartitions.id()); verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap()); verify(standbyTaskCreator).createTasks(Collections.emptyMap()); }
@Override public Map<String, Object> run() { StatelessKieSession statelessKieSession = kieContainer.newStatelessKieSession(sessionName); CoverageAgendaListener coverageAgendaListener = new CoverageAgendaListener(); statelessKieSession.execute(generateCommands(coverageAgendaListener)); Map<String, Object> toReturn = new HashMap<>(); toReturn.put(COVERAGE_LISTENER, coverageAgendaListener); toReturn.put(RULES_AVAILABLE, getAvailableRules(statelessKieSession.getKieBase(), agendaGroup)); return toReturn; }
@Test public void testBuilder() { when(kieContainerMock.newStatelessKieSession(anyString())).thenReturn(statelessKieSessionMock); when(statelessKieSessionMock.getKieBase()).thenReturn(kieBaseMock); when(kieBaseMock.getKiePackages()).thenReturn(List.of()); String sessionName = "sessionName"; RuleStatelessScenarioExecutableBuilder builder = new RuleStatelessScenarioExecutableBuilder(kieContainerMock, sessionName); Map<String, Object> result = builder.run(); verify(kieContainerMock, times(1)).newStatelessKieSession(eq(sessionName)); assertThat(result).containsKeys(RuleScenarioExecutableBuilder.COVERAGE_LISTENER, RuleScenarioExecutableBuilder.RULES_AVAILABLE); }
@SuppressWarnings("unchecked") @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { if (!(statement.getStatement() instanceof CreateSource) && !(statement.getStatement() instanceof CreateAsSelect)) { return statement; } try { if (statement.getStatement() instanceof CreateSource) { final ConfiguredStatement<CreateSource> createStatement = (ConfiguredStatement<CreateSource>) statement; return (ConfiguredStatement<T>) forCreateStatement(createStatement).orElse(createStatement); } else { final ConfiguredStatement<CreateAsSelect> createStatement = (ConfiguredStatement<CreateAsSelect>) statement; return (ConfiguredStatement<T>) forCreateAsStatement(createStatement).orElse( createStatement); } } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { throw new KsqlStatementException( ErrorMessageUtil.buildErrorMessage(e), statement.getMaskedStatementText(), e.getCause()); } }
@Test public void shouldReturnStatementUnchangedIfNotCreateStatement() { // Given: final ConfiguredStatement<?> prepared = ConfiguredStatement .of(PreparedStatement.of("sql", statement), SessionConfig.of(new KsqlConfig(ImmutableMap.of()), ImmutableMap.of()) ); // When: final ConfiguredStatement<?> result = injector.inject(prepared); // Then: assertThat(result, is(sameInstance(prepared))); }
@Override public AuditReplayCommand parse(Text inputLine, Function<Long, Long> relativeToAbsolute) throws IOException { Matcher m = logLineParseRegex.matcher(inputLine.toString()); if (!m.find()) { throw new IOException( "Unable to find valid message pattern from audit log line: `" + inputLine + "` using regex `" + logLineParseRegex + "`"); } long relativeTimestamp; try { relativeTimestamp = dateFormat.parse(m.group("timestamp")).getTime() - startTimestamp; } catch (ParseException p) { throw new IOException( "Exception while parsing timestamp from audit log line: `" + inputLine + "`", p); } // Sanitize the = in the rename options field into a : so we can split on = String auditMessageSanitized = m.group("message").replace("(options=", "(options:"); Map<String, String> parameterMap = new HashMap<String, String>(); String[] auditMessageSanitizedList = auditMessageSanitized.split("\t"); for (String auditMessage : auditMessageSanitizedList) { String[] splitMessage = auditMessage.split("=", 2); try { parameterMap.put(splitMessage[0], splitMessage[1]); } catch (ArrayIndexOutOfBoundsException e) { throw new IOException( "Exception while parsing a message from audit log line: `" + inputLine + "`", e); } } return new AuditReplayCommand(relativeToAbsolute.apply(relativeTimestamp), // Split the UGI on space to remove the auth and proxy portions of it SPACE_SPLITTER.split(parameterMap.get("ugi")).iterator().next(), parameterMap.get("cmd").replace("(options:", "(options="), parameterMap.get("src"), parameterMap.get("dst"), parameterMap.get("ip")); }
@Test public void testParseDefaultDateFormat() throws Exception { Text in = getAuditString("1970-01-01 13:00:00,000", "ignored", "ignored", "ignored", "ignored"); AuditReplayCommand expected = new AuditReplayCommand( 13 * 60 * 60 * 1000 - START_TIMESTAMP, "ignored", "ignored", "ignored", "ignored", "0.0.0.0"); assertEquals(expected, parser.parse(in, Function.identity())); }
@Override void decode(ByteBufAllocator alloc, ByteBuf headerBlock, SpdyHeadersFrame frame) throws Exception { ObjectUtil.checkNotNull(headerBlock, "headerBlock"); ObjectUtil.checkNotNull(frame, "frame"); if (cumulation == null) { decodeHeaderBlock(headerBlock, frame); if (headerBlock.isReadable()) { cumulation = alloc.buffer(headerBlock.readableBytes()); cumulation.writeBytes(headerBlock); } } else { cumulation.writeBytes(headerBlock); decodeHeaderBlock(cumulation, frame); if (cumulation.isReadable()) { cumulation.discardReadBytes(); } else { releaseBuffer(); } } }
@Test public void testNegativeNameValuePairs() throws Exception { ByteBuf headerBlock = Unpooled.buffer(4); headerBlock.writeInt(-1); decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); assertFalse(headerBlock.isReadable()); assertTrue(frame.isInvalid()); assertEquals(0, frame.headers().names().size()); headerBlock.release(); }
@Override public Health check() { if (isConnectedToDB()) { return Health.GREEN; } return RED_HEALTH; }
@Test public void status_is_GREEN_without_cause_if_isAlive_returns_1() { when(isAliveMapper.isAlive()).thenReturn(1); Health health = underTest.check(); assertThat(health).isEqualTo(Health.GREEN); }
@Override public void run() { try { // We kill containers until the kernel reports the OOM situation resolved // Note: If the kernel has a delay this may kill more than necessary while (true) { String status = cgroups.getCGroupParam( CGroupsHandler.CGroupController.MEMORY, "", CGROUP_PARAM_MEMORY_OOM_CONTROL); if (!status.contains(CGroupsHandler.UNDER_OOM)) { break; } boolean containerKilled = killContainer(); if (!containerKilled) { // This can happen, if SIGKILL did not clean up // non-PGID or containers or containers launched by other users // or if a process was put to the root YARN cgroup. throw new YarnRuntimeException( "Could not find any containers but CGroups " + "reserved for containers ran out of memory. " + "I am giving up"); } } } catch (ResourceHandlerException ex) { LOG.warn("Could not fetch OOM status. " + "This is expected at shutdown. Exiting.", ex); } }
@Test public void testKillOpportunisticContainerWithKillFailuresUponOOM() throws Exception { ConcurrentHashMap<ContainerId, Container> containers = new ConcurrentHashMap<>(); Container c1 = createContainer(1, false, 1L, true); containers.put(c1.getContainerId(), c1); Container c2 = createContainer(2, false, 2L, true); containers.put(c2.getContainerId(), c2); ContainerExecutor ex = createContainerExecutor(containers); Context context = mock(Context.class); when(context.getContainers()).thenReturn(containers); when(context.getContainerExecutor()).thenReturn(ex); CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); when(cGroupsHandler.getCGroupParam( CGroupsHandler.CGroupController.MEMORY, "", CGROUP_PARAM_MEMORY_OOM_CONTROL)) .thenReturn("under_oom 1").thenReturn("under_oom 0"); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c1.getContainerId().toString(), CGROUP_PROCS_FILE)) .thenReturn("1234").thenReturn(""); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) .thenReturn(getMB(9)); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) .thenReturn(getMB(9)); // c2 process has not started, hence no cgroup.procs file yet when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c2.getContainerId().toString(), CGROUP_PROCS_FILE)) .thenThrow( new ResourceHandlerException(CGROUP_PROCS_FILE + " not found")); DefaultOOMHandler handler = new DefaultOOMHandler(context, false) { @Override protected CGroupsHandler getCGroupsHandler() { return cGroupsHandler; } }; handler.run(); verify(ex, times(1)).signalContainer( new ContainerSignalContext.Builder() .setPid("1235") .setContainer(c1) .setSignal(ContainerExecutor.Signal.KILL) .build() ); verify(ex, times(1)).signalContainer(any()); }
public static String getExactlyValue(final String value) { return null == value ? null : tryGetRealContentInBackticks(CharMatcher.anyOf(EXCLUDED_CHARACTERS).removeFrom(value)); }
@Test void assertGetExactlyValueUsingNull() { assertNull(SQLUtils.getExactlyValue(null)); }
@Override public void start() { start(true); }
@Test void testHeartbeatsAreSentForJobsInProcessingState() { backgroundJobServer.start(); final JobId jobId = BackgroundJob.enqueue(() -> testService.doWorkThatTakesLong(4)); await().pollInterval(150, MILLISECONDS).pollDelay(1, SECONDS).atMost(2, SECONDS).untilAsserted(() -> assertThat(storageProvider.getJobById(jobId)).hasUpdatedAtCloseTo(now(), within(500, ChronoUnit.MILLIS))); await().pollInterval(150, MILLISECONDS).pollDelay(1, SECONDS).atMost(2, SECONDS).untilAsserted(() -> assertThat(storageProvider.getJobById(jobId)).hasUpdatedAtCloseTo(now(), within(500, ChronoUnit.MILLIS))); await().pollInterval(150, MILLISECONDS).pollDelay(1, SECONDS).atMost(2, SECONDS).untilAsserted(() -> assertThat(storageProvider.getJobById(jobId)).hasUpdatedAtCloseTo(now(), within(500, ChronoUnit.MILLIS))); }
@Override public int hashCode() { return entries.hashCode(); }
@Test public void testHashCodeAndEqualsWithNull() { ConfigEntry ce0 = new ConfigEntry("abc", null, null, false, false, null, null, null); ConfigEntry ce1 = new ConfigEntry("abc", null, null, false, false, null, null, null); assertEquals(ce0, ce1); assertEquals(ce0.hashCode(), ce1.hashCode()); }
protected int adjustTimeout(final int timeoutMs) { return timeoutMs; }
@Test public void testAdjustTimeout() throws Exception { this.timer.nextTimeout = 100; this.timer.start(); Thread.sleep(1000); assertEquals(10, this.timer.counter.get(), 3); }
@Override @Transactional(value="defaultTransactionManager") public OAuth2AccessTokenEntity refreshAccessToken(String refreshTokenValue, TokenRequest authRequest) throws AuthenticationException { if (Strings.isNullOrEmpty(refreshTokenValue)) { // throw an invalid token exception if there's no refresh token value at all throw new InvalidTokenException("Invalid refresh token: " + refreshTokenValue); } OAuth2RefreshTokenEntity refreshToken = clearExpiredRefreshToken(tokenRepository.getRefreshTokenByValue(refreshTokenValue)); if (refreshToken == null) { // throw an invalid token exception if we couldn't find the token throw new InvalidTokenException("Invalid refresh token: " + refreshTokenValue); } ClientDetailsEntity client = refreshToken.getClient(); AuthenticationHolderEntity authHolder = refreshToken.getAuthenticationHolder(); // make sure that the client requesting the token is the one who owns the refresh token ClientDetailsEntity requestingClient = clientDetailsService.loadClientByClientId(authRequest.getClientId()); if (!client.getClientId().equals(requestingClient.getClientId())) { tokenRepository.removeRefreshToken(refreshToken); throw new InvalidClientException("Client does not own the presented refresh token"); } //Make sure this client allows access token refreshing if (!client.isAllowRefresh()) { throw new InvalidClientException("Client does not allow refreshing access token!"); } // clear out any access tokens if (client.isClearAccessTokensOnRefresh()) { tokenRepository.clearAccessTokensForRefreshToken(refreshToken); } if (refreshToken.isExpired()) { tokenRepository.removeRefreshToken(refreshToken); throw new InvalidTokenException("Expired refresh token: " + refreshTokenValue); } OAuth2AccessTokenEntity token = new OAuth2AccessTokenEntity(); // get the stored scopes from the authentication holder's authorization request; these are the scopes associated with the refresh token Set<String> refreshScopesRequested = new HashSet<>(refreshToken.getAuthenticationHolder().getAuthentication().getOAuth2Request().getScope()); Set<SystemScope> refreshScopes = scopeService.fromStrings(refreshScopesRequested); // remove any of the special system scopes refreshScopes = scopeService.removeReservedScopes(refreshScopes); Set<String> scopeRequested = authRequest.getScope() == null ? new HashSet<String>() : new HashSet<>(authRequest.getScope()); Set<SystemScope> scope = scopeService.fromStrings(scopeRequested); // remove any of the special system scopes scope = scopeService.removeReservedScopes(scope); if (scope != null && !scope.isEmpty()) { // ensure a proper subset of scopes if (refreshScopes != null && refreshScopes.containsAll(scope)) { // set the scope of the new access token if requested token.setScope(scopeService.toStrings(scope)); } else { String errorMsg = "Up-scoping is not allowed."; logger.error(errorMsg); throw new InvalidScopeException(errorMsg); } } else { // otherwise inherit the scope of the refresh token (if it's there -- this can return a null scope set) token.setScope(scopeService.toStrings(refreshScopes)); } token.setClient(client); if (client.getAccessTokenValiditySeconds() != null) { Date expiration = new Date(System.currentTimeMillis() + (client.getAccessTokenValiditySeconds() * 1000L)); token.setExpiration(expiration); } if (client.isReuseRefreshToken()) { // if the client re-uses refresh tokens, do that token.setRefreshToken(refreshToken); } else { // otherwise, make a new refresh token OAuth2RefreshTokenEntity newRefresh = createRefreshToken(client, authHolder); token.setRefreshToken(newRefresh); // clean up the old refresh token tokenRepository.removeRefreshToken(refreshToken); } token.setAuthenticationHolder(authHolder); tokenEnhancer.enhance(token, authHolder.getAuthentication()); tokenRepository.saveAccessToken(token); return token; }
@Test(expected = InvalidScopeException.class) public void refreshAccessToken_requestingMixedScope() { Set<String> mixedScope = newHashSet("openid", "profile", "address", "phone"); // no email or offline_access tokenRequest.setScope(mixedScope); service.refreshAccessToken(refreshTokenValue, tokenRequest); }
static String convertValue(Object value) { if (value != null) { String strVal = value.toString(); // check number if (NumberUtils.isParsable(strVal)) { if (strVal.startsWith("0") && !strVal.startsWith("0.")) { return strVal; } try { BigInteger longVal = new BigInteger(strVal); return longVal.toString(); } catch (NumberFormatException ignored) { } try { double dblVal = Double.parseDouble(strVal); String doubleAsString = Double.toString(dblVal); if (!Double.isInfinite(dblVal) && isSimpleDouble(doubleAsString)) { return doubleAsString; } } catch (NumberFormatException ignored) { } } return strVal; } else { return ""; } }
@Test public void testConvertValue() { assertThat(EntityDataAdapter.convertValue("500")).isEqualTo("500"); assertThat(EntityDataAdapter.convertValue("500D")).isEqualTo("500D"); //do not convert to Double !!! assertThat(EntityDataAdapter.convertValue("0101010521130565")).isEqualTo("0101010521130565"); //do not convert to Double !!! assertThat(EntityDataAdapter.convertValue("89010303310033979663")).isEqualTo("89010303310033979663"); //do not convert to Double !!! assertThat(EntityDataAdapter.convertValue("89914009129080723322")).isEqualTo("89914009129080723322"); assertThat(EntityDataAdapter.convertValue("899140091AAAA29080723322")).isEqualTo("899140091AAAA29080723322"); assertThat(EntityDataAdapter.convertValue("899140091.29080723322")).isEqualTo("899140091.29080723322"); }
@ScalarFunction("quantile_at_value") @Description("Given an input x between min/max values of qdigest, find which quantile is represented by that value") @SqlType(StandardTypes.DOUBLE) @SqlNullable public static Double quantileAtValueDouble(@SqlType("qdigest(double)") Slice input, @SqlType(StandardTypes.DOUBLE) double value) { return quantileAtValueBigint(input, doubleToSortableLong(value)); }
@Test public void testQuantileAtValueDouble() { QuantileDigest qdigest = new QuantileDigest(1); ImmutableList.of(0, 1, 2, 3, 4, 5, 6, 7, 8, 9).stream() .mapToLong(FloatingPointBitsConverterUtil::doubleToSortableLong) .forEach(qdigest::add); functionAssertions.assertFunction(format("quantile_at_value(CAST(X'%s' AS qdigest(double)), 5.6)", toHexString(qdigest)), DOUBLE, 0.6); functionAssertions.assertFunction(format("quantile_at_value(CAST(X'%s' AS qdigest(double)), -1.23)", toHexString(qdigest)), DOUBLE, null); functionAssertions.assertFunction(format("quantile_at_value(CAST(X'%s' AS qdigest(double)), 12.3)", toHexString(qdigest)), DOUBLE, null); functionAssertions.assertFunction(format("quantile_at_value(CAST(X'%s' AS qdigest(double)), nan())", toHexString(qdigest)), DOUBLE, null); }
@Override public boolean equals(java.lang.Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } LinkParameter linkParameters = (LinkParameter) o; return Objects.equals(this.value, linkParameters.value) && Objects.equals(this.extensions, linkParameters.extensions); }
@Test public void testEquals() { LinkParameter linkParameter = new LinkParameter(); Assert.assertFalse(linkParameter.equals(null)); Assert.assertFalse(linkParameter.equals(new String())); Assert.assertTrue(linkParameter.equals(linkParameter)); Assert.assertTrue(linkParameter.equals(new LinkParameter())); }
public static Entry entry(String name) throws BlockException { return Env.sph.entry(name, EntryType.OUT, 1, OBJECTS0); }
@Test public void testMethodEntryAll() throws BlockException, NoSuchMethodException, SecurityException { final String arg0 = "foo"; final String arg1 = "baz"; Method method = SphUTest.class.getMethod("testMethodEntryNormal"); Entry e = SphU.entry(method, EntryType.IN, 2, arg0, arg1); assertSame(e.resourceWrapper.getEntryType(), EntryType.IN); e.exit(2, arg0, arg1); }
@Override public boolean add(T object) { return add(Arrays.asList(object)) > 0; }
@Test public void testNotInitializedOnAdd() { Assertions.assertThrows(RedisException.class, () -> { RBloomFilter<String> filter = redisson.getBloomFilter("filter"); filter.add("123"); }); }
public void isNull() { standardIsEqualTo(null); }
@Test public void isNullBadEqualsImplementation() { expectFailure.whenTesting().that(new ThrowsOnEqualsNull()).isNull(); }
public static void unzip(String zipFilename, String destDirname) throws IOException{ final Path destDir = Paths.get(destDirname); //if the destination doesn't exist, create it if(Files.notExists(destDir)){ if(logger.isDebugEnabled()) logger.debug(destDir + " does not exist. Creating..."); Files.createDirectories(destDir); } try (FileSystem zipFileSystem = createZipFileSystem(zipFilename, false)){ final Path root = zipFileSystem.getPath("/"); //walk the zip file tree and copy files to the destination Files.walkFileTree(root, new SimpleFileVisitor<Path>(){ @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { final Path destFile = Paths.get(destDir.toString(), file.toString()); if(logger.isDebugEnabled()) logger.debug("Extracting file %s to %s", file, destFile); Files.copy(file, destFile, StandardCopyOption.REPLACE_EXISTING); return FileVisitResult.CONTINUE; } @Override public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { final Path dirToCreate = Paths.get(destDir.toString(), dir.toString()); if(Files.notExists(dirToCreate)){ if(logger.isDebugEnabled()) logger.debug("Creating directory %s", dirToCreate); Files.createDirectory(dirToCreate); } return FileVisitResult.CONTINUE; } }); } }
@Test public void testUnzip() throws IOException { URL url = Thread.currentThread().getContextClassLoader().getResource("rest.zip"); NioUtils.unzip(url.getPath().toString().replace("/C:/","C:\\"), NioUtils.getTempDir()); }
@Override public ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets, ListOffsetsOptions options) { AdminApiFuture.SimpleAdminApiFuture<TopicPartition, ListOffsetsResultInfo> future = ListOffsetsHandler.newFuture(topicPartitionOffsets.keySet()); Map<TopicPartition, Long> offsetQueriesByPartition = topicPartitionOffsets.entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> getOffsetFromSpec(e.getValue()))); ListOffsetsHandler handler = new ListOffsetsHandler(offsetQueriesByPartition, options, logContext); invokeDriver(handler, future, options.timeoutMs); return new ListOffsetsResult(future.all()); }
@Test public void testListOffsetsNonMaxTimestampDowngradedImmediately() throws Exception { Node node = new Node(0, "localhost", 8120); List<Node> nodes = Collections.singletonList(node); List<PartitionInfo> pInfos = new ArrayList<>(); pInfos.add(new PartitionInfo("foo", 0, node, new Node[]{node}, new Node[]{node})); final Cluster cluster = new Cluster( "mockClusterId", nodes, pInfos, Collections.emptySet(), Collections.emptySet(), node); final TopicPartition tp0 = new TopicPartition("foo", 0); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster, AdminClientConfig.RETRIES_CONFIG, "2")) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create( ApiKeys.LIST_OFFSETS.id, (short) 0, (short) 6)); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 123L, 321); ListOffsetsResponseData responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) .setTopics(singletonList(t0)); // listoffsets response from broker 0 env.kafkaClient().prepareResponse( request -> request instanceof ListOffsetsRequest, new ListOffsetsResponse(responseData)); ListOffsetsResult result = env.adminClient().listOffsets( Collections.singletonMap(tp0, OffsetSpec.latest())); ListOffsetsResultInfo tp0Offset = result.partitionResult(tp0).get(); assertEquals(123L, tp0Offset.offset()); assertEquals(321, tp0Offset.leaderEpoch().get().intValue()); assertEquals(-1L, tp0Offset.timestamp()); } }
@Override public Appendable append(CharSequence csq) { try { return out.append(csq); } catch (IOException e) { throw new RuntimeException(e); } }
@Test void append() { out.append("Hello"); out.append("Hello World", 5, 11); out.close(); assertThat(bytes, bytes(equalTo("Hello World"))); }
public boolean isFinished() { return finished; }
@Test public void testIsFinished() throws Exception { assertFalse( jobRunner.isFinished() ); when( mockJob.isStopped() ).thenReturn( false ); when( mockJob.getParentJob() ).thenReturn( parentJob ); when( parentJob.isStopped() ).thenReturn( false ); when( mockJob.execute( Mockito.anyInt(), Mockito.any( Result.class ) ) ).thenReturn( mockResult ); jobRunner.run(); assertTrue( jobRunner.isFinished() ); }
@Override public double getStdDev() { // two-pass algorithm for variance, avoids numeric overflow if (values.length <= 1) { return 0; } final double mean = getMean(); double sum = 0; for (long value : values) { final double diff = value - mean; sum += diff * diff; } final double variance = sum / (values.length - 1); return Math.sqrt(variance); }
@Test public void calculatesAStdDevOfZeroForASingletonSnapshot() throws Exception { final Snapshot singleItemSnapshot = new UniformSnapshot(new long[]{ 1 }); assertThat(singleItemSnapshot.getStdDev()) .isZero(); }
@Override public Serializer serializer(String topic, Target type) { String subject = schemaSubject(topic, type); SchemaMetadata meta = getSchemaBySubject(subject) .orElseThrow(() -> new ValidationException( String.format("No schema for subject '%s' found", subject))); ParsedSchema schema = getSchemaById(meta.getId()) .orElseThrow(() -> new IllegalStateException( String.format("Schema found for id %s, subject '%s'", meta.getId(), subject))); SchemaType schemaType = SchemaType.fromString(meta.getSchemaType()) .orElseThrow(() -> new IllegalStateException("Unknown schema type: " + meta.getSchemaType())); return switch (schemaType) { case PROTOBUF -> input -> serializeProto(schemaRegistryClient, topic, type, (ProtobufSchema) schema, meta.getId(), input); case AVRO -> input -> serializeAvro((AvroSchema) schema, meta.getId(), input); case JSON -> input -> serializeJson((JsonSchema) schema, meta.getId(), input); }; }
@Test void serializeTreatsInputAsJsonAvroSchemaPayload() throws RestClientException, IOException { AvroSchema schema = new AvroSchema( "{" + " \"type\": \"record\"," + " \"name\": \"TestAvroRecord1\"," + " \"fields\": [" + " {" + " \"name\": \"field1\"," + " \"type\": \"string\"" + " }," + " {" + " \"name\": \"field2\"," + " \"type\": \"int\"" + " }" + " ]" + "}" ); String jsonValue = "{ \"field1\":\"testStr\", \"field2\": 123 }"; String topic = "test"; int schemaId = registryClient.register(topic + "-value", schema); byte[] serialized = serde.serializer(topic, Serde.Target.VALUE).serialize(jsonValue); byte[] expected = toBytesWithMagicByteAndSchemaId(schemaId, jsonValue, schema); assertThat(serialized).isEqualTo(expected); }
public boolean largeLocks() { return largeLocks; }
@Test public void testLargeLocks() { var mutable = new Object() { boolean locked = true; }; Runnable unlock = () -> mutable.locked = false; try (OrchestratorContext rootContext = OrchestratorContext.createContextForMultiAppOp(new ManualClock())) { try (OrchestratorContext probeContext = rootContext.createSubcontextForSingleAppOp(true)) { assertFalse(probeContext.hasLock(application)); assertTrue(probeContext.registerLockAcquisition(application, unlock)); assertTrue(probeContext.hasLock(application)); assertTrue(mutable.locked); } try (OrchestratorContext nonProbeContext = rootContext.createSubcontextForSingleAppOp(false)) { assertTrue(nonProbeContext.hasLock(application)); assertTrue(mutable.locked); } assertTrue(mutable.locked); } assertFalse(mutable.locked); }
public static <E, K, V> Map<K, List<V>> groupKeyValue(Collection<E> collection, Function<E, K> key, Function<E, V> value) { return groupKeyValue(collection, key, value, false); }
@Test public void testGroupKeyValue() { Map<Long, List<Long>> map = CollStreamUtil.groupKeyValue(null, Student::getTermId, Student::getClassId); assertEquals(map, Collections.EMPTY_MAP); List<Student> list = new ArrayList<>(); map = CollStreamUtil.groupKeyValue(list, Student::getTermId, Student::getClassId); assertEquals(map, Collections.EMPTY_MAP); list.add(new Student(1, 1, 1, "张三")); list.add(new Student(1, 2, 1, "李四")); list.add(new Student(2, 2, 1, "王五")); map = CollStreamUtil.groupKeyValue(list, Student::getTermId, Student::getClassId); Map<Long, List<Long>> compare = new HashMap<>(); compare.put(1L, Arrays.asList(1L, 2L)); compare.put(2L, Collections.singletonList(2L)); assertEquals(compare, map); }
public long mergeNumDVs(long oldValue, long newValue) { return Math.max(oldValue, newValue); }
@Test public void testMergeNumDVs() { assertEquals(3, MERGER.mergeNumDVs(1, 3)); assertEquals(3, MERGER.mergeNumDVs(3, 1)); }
@Override public String name() { return name; }
@Test public void testSetNamespaceOwnership() throws TException { setNamespaceOwnershipAndVerify( "set_individual_ownership_on_default_owner", ImmutableMap.of(), ImmutableMap.of( HiveCatalog.HMS_DB_OWNER, "some_individual_owner", HiveCatalog.HMS_DB_OWNER_TYPE, PrincipalType.USER.name()), System.getProperty("user.name"), PrincipalType.USER, "some_individual_owner", PrincipalType.USER); setNamespaceOwnershipAndVerify( "set_group_ownership_on_default_owner", ImmutableMap.of(), ImmutableMap.of( HiveCatalog.HMS_DB_OWNER, "some_group_owner", HiveCatalog.HMS_DB_OWNER_TYPE, PrincipalType.GROUP.name()), System.getProperty("user.name"), PrincipalType.USER, "some_group_owner", PrincipalType.GROUP); setNamespaceOwnershipAndVerify( "change_individual_to_group_ownership", ImmutableMap.of(HiveCatalog.HMS_DB_OWNER, "some_owner"), ImmutableMap.of( HiveCatalog.HMS_DB_OWNER, "some_group_owner", HiveCatalog.HMS_DB_OWNER_TYPE, PrincipalType.GROUP.name()), "some_owner", PrincipalType.USER, "some_group_owner", PrincipalType.GROUP); setNamespaceOwnershipAndVerify( "change_group_to_individual_ownership", ImmutableMap.of( HiveCatalog.HMS_DB_OWNER, "some_group_owner", HiveCatalog.HMS_DB_OWNER_TYPE, PrincipalType.GROUP.name()), ImmutableMap.of( HiveCatalog.HMS_DB_OWNER, "some_individual_owner", HiveCatalog.HMS_DB_OWNER_TYPE, PrincipalType.USER.name()), "some_group_owner", PrincipalType.GROUP, "some_individual_owner", PrincipalType.USER); assertThatThrownBy( () -> setNamespaceOwnershipAndVerify( "set_owner_without_setting_owner_type", ImmutableMap.of(), ImmutableMap.of(HiveCatalog.HMS_DB_OWNER, "some_individual_owner"), System.getProperty("user.name"), PrincipalType.USER, "no_post_setting_expectation_due_to_exception_thrown", null)) .isInstanceOf(IllegalArgumentException.class) .hasMessage( String.format( "Setting %s and %s has to be performed together or not at all", HiveCatalog.HMS_DB_OWNER_TYPE, HiveCatalog.HMS_DB_OWNER)); assertThatThrownBy( () -> setNamespaceOwnershipAndVerify( "set_owner_type_without_setting_owner", ImmutableMap.of(HiveCatalog.HMS_DB_OWNER, "some_owner"), ImmutableMap.of(HiveCatalog.HMS_DB_OWNER_TYPE, PrincipalType.GROUP.name()), "some_owner", PrincipalType.USER, "no_post_setting_expectation_due_to_exception_thrown", null)) .isInstanceOf(IllegalArgumentException.class) .hasMessage( String.format( "Setting %s and %s has to be performed together or not at all", HiveCatalog.HMS_DB_OWNER_TYPE, HiveCatalog.HMS_DB_OWNER)); assertThatThrownBy( () -> setNamespaceOwnershipAndVerify( "set_invalid_owner_type", ImmutableMap.of(), ImmutableMap.of( HiveCatalog.HMS_DB_OWNER, "iceberg", HiveCatalog.HMS_DB_OWNER_TYPE, "invalidOwnerType"), System.getProperty("user.name"), PrincipalType.USER, "no_post_setting_expectation_due_to_exception_thrown", null)) .isInstanceOf(IllegalArgumentException.class) .hasMessage( "No enum constant org.apache.hadoop.hive.metastore.api.PrincipalType.invalidOwnerType"); }
@Override public JFieldVar apply(String nodeName, JsonNode node, JsonNode parent, JFieldVar field, Schema currentSchema) { if (ruleFactory.getGenerationConfig().isIncludeJsr303Annotations() && (node.has("minItems") || node.has("maxItems")) && isApplicableType(field)) { final Class<? extends Annotation> sizeClass = ruleFactory.getGenerationConfig().isUseJakartaValidation() ? Size.class : javax.validation.constraints.Size.class; JAnnotationUse annotation = field.annotate(sizeClass); if (node.has("minItems")) { annotation.param("min", node.get("minItems").asInt()); } if (node.has("maxItems")) { annotation.param("max", node.get("maxItems").asInt()); } } return field; }
@Test public void testMinLength() { when(config.isIncludeJsr303Annotations()).thenReturn(true); final int minValue = new Random().nextInt(); when(subNode.asInt()).thenReturn(minValue); when(node.get("minItems")).thenReturn(subNode); when(fieldVar.annotate(sizeClass)).thenReturn(annotation); when(node.has("minItems")).thenReturn(true); when(fieldVar.type().boxify().fullName()).thenReturn(fieldClass.getTypeName()); JFieldVar result = rule.apply("node", node, null, fieldVar, null); assertSame(fieldVar, result); verify(fieldVar, times(isApplicable ? 1 : 0)).annotate(sizeClass); verify(annotation, times(isApplicable ? 1 : 0)).param("min", minValue); verify(annotation, never()).param(eq("max"), anyString()); }
@CanIgnoreReturnValue public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) { List<@Nullable Object> expected = (varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs); return containsExactlyElementsIn( expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable); }
@Test public void iterableContainsExactlySingleElement() { assertThat(asList(1)).containsExactly(1); expectFailureWhenTestingThat(asList(1)).containsExactly(2); assertFailureKeys("value of", "expected", "but was"); assertFailureValue("value of", "iterable.onlyElement()"); }
public static Builder newBuilder(String name) { return new Builder(name); }
@Test void testBuildSlotSharingGroupWithIllegalConfig() { assertThatThrownBy( () -> SlotSharingGroup.newBuilder("ssg") .setCpuCores(1) .setTaskHeapMemory(MemorySize.ZERO) .setTaskOffHeapMemoryMB(10) .build()) .isInstanceOf(IllegalArgumentException.class); }
@Override public ConsumerBuilder<T> topics(List<String> topicNames) { checkArgument(topicNames != null && !topicNames.isEmpty(), "Passed in topicNames list should not be null or empty."); topicNames.stream().forEach(topicName -> checkArgument(StringUtils.isNotBlank(topicName), "topicNames cannot have blank topic")); conf.getTopicNames().addAll(topicNames.stream().map(StringUtils::trim).collect(Collectors.toList())); return this; }
@Test(expectedExceptions = IllegalArgumentException.class) public void testConsumerBuilderImplWhenTopicNamesIsEmpty() { consumerBuilderImpl.topics(Arrays.asList()); }
public static @CheckForNull String getActionUrl(String itUrl, Action action) { String urlName = action.getUrlName(); if (urlName == null) return null; // Should not be displayed try { if (new URI(urlName).isAbsolute()) { return urlName; } } catch (URISyntaxException x) { Logger.getLogger(Functions.class.getName()).log(Level.WARNING, "Failed to parse URL for {0}: {1}", new Object[] {action, x}); return null; } if (urlName.startsWith("/")) return joinPath(Stapler.getCurrentRequest().getContextPath(), urlName); else // relative URL name return joinPath(Stapler.getCurrentRequest().getContextPath() + '/' + itUrl, urlName); }
@Test public void testGetActionUrl_absoluteUriWithAuthority() { String[] uris = { "http://example.com/foo/bar", "https://example.com/foo/bar", "ftp://example.com/foo/bar", "svn+ssh://nobody@example.com/foo/bar", }; for (String uri : uris) { String result = Functions.getActionUrl(null, createMockAction(uri)); assertEquals(uri, result); } }
public List<TradeHistoryResponse> findTradeHistories(final Long memberId, final Long authId, final boolean isSeller) { Member member = findMember(authId); member.validateAuth(memberId); return tradeHistoryRepository.findHistories(memberId, isSeller); }
@Test void 거래_내역을_조회한다() { // given TradeHistory savedHistory = tradeHistoryRepository.save( new TradeHistory( 1L, 2L, 1L, 10000, 10, List.of() ) ); Member saved = memberRepository.save(MemberFixture.일반_유저_생성()); // when List<TradeHistoryResponse> buyerHistories = memberService.findTradeHistories(saved.getId(), saved.getId(), false); // then assertSoftly(softly -> { softly.assertThat(buyerHistories).hasSize(1); softly.assertThat(buyerHistories.get(0).buyerName()).isEqualTo("buyer"); }); }
static Builder newBuilder() { return new AutoValue_SplunkEventWriter.Builder(); }
@Test @Category(NeedsRunner.class) public void successfulSplunkWriteSingleBatchTest() { // Create server expectation for success. mockServerListening(200); int testPort = mockServerRule.getPort(); List<KV<Integer, SplunkEvent>> testEvents = ImmutableList.of( KV.of( 123, SplunkEvent.newBuilder() .withEvent("test-event-1") .withHost("test-host-1") .withIndex("test-index-1") .withSource("test-source-1") .withSourceType("test-source-type-1") .withTime(12345L) .create()), KV.of( 123, SplunkEvent.newBuilder() .withEvent("test-event-2") .withHost("test-host-2") .withIndex("test-index-2") .withSource("test-source-2") .withSourceType("test-source-type-2") .withTime(12345L) .create())); PCollection<SplunkWriteError> actual = pipeline .apply("Create Input data", Create.of(testEvents)) .apply( "SplunkEventWriter", ParDo.of( SplunkEventWriter.newBuilder() .withUrl(Joiner.on(':').join("http://localhost", testPort)) .withInputBatchCount( StaticValueProvider.of(1)) // Test one request per SplunkEvent .withToken("test-token") .build())); // All successful responses. PAssert.that(actual).empty(); pipeline.run(); // Server received exactly the expected number of POST requests. mockServerClient.verify( HttpRequest.request(EXPECTED_PATH), VerificationTimes.exactly(testEvents.size())); }
public static Request.Builder buildRequestBuilder(final String url, final Map<String, ?> form, final HTTPMethod method) { switch (method) { case GET: return new Request.Builder() .url(buildHttpUrl(url, form)) .get(); case HEAD: return new Request.Builder() .url(buildHttpUrl(url, form)) .head(); case PUT: return new Request.Builder() .url(buildHttpUrl(url)) .put(buildFormBody(form)); case DELETE: return new Request.Builder() .url(buildHttpUrl(url)) .delete(buildFormBody(form)); default: return new Request.Builder() .url(buildHttpUrl(url)) .post(buildFormBody(form)); } }
@Test public void buildRequestBuilderForPUTTest() { Request.Builder builder = HttpUtils.buildRequestBuilder(TEST_URL, formMap, HttpUtils.HTTPMethod.PUT); Assert.assertNotNull(builder); Assert.assertNotNull(builder.build().body()); Assert.assertEquals(builder.build().method(), HttpUtils.HTTPMethod.PUT.value()); Assert.assertEquals(builder.build().url().toString(), TEST_URL); }
public PhysicalPartition getPhysicalPartition(long physicalPartitionId) { Long partitionId = physicalPartitionIdToPartitionId.get(physicalPartitionId); if (partitionId == null) { for (Partition partition : tempPartitions.getAllPartitions()) { for (PhysicalPartition subPartition : partition.getSubPartitions()) { if (subPartition.getId() == physicalPartitionId) { return subPartition; } } } for (Partition partition : idToPartition.values()) { for (PhysicalPartition subPartition : partition.getSubPartitions()) { if (subPartition.getId() == physicalPartitionId) { return subPartition; } } } } else { Partition partition = getPartition(partitionId); if (partition != null) { return partition.getSubPartition(physicalPartitionId); } } return null; }
@Test public void testGetPhysicalPartitionByName() { Database db = UnitTestUtil.createDb(1, 2, 3, 4, 5, 6, 7, KeysType.AGG_KEYS); List<Table> tables = db.getTables(); for (Table table : tables) { OlapTable olapTable = (OlapTable) table; PhysicalPartition partition = olapTable.getPhysicalPartition("not_existed_name"); Assert.assertNull(partition); } }
@VisibleForTesting @Override public String getFailureDomain() { final String responsePayload = getAzureInstanceMetadata(); // For a sample response payload, // check https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service?tabs=linux try { final JsonNode jsonNode = JsonUtils.stringToJsonNode(responsePayload); final JsonNode computeNode = jsonNode.path(COMPUTE); if (computeNode.isMissingNode()) { throw new RuntimeException( "[AzureEnvironmentProvider]: Compute node is missing in the payload. Cannot retrieve failure domain " + "information"); } final JsonNode platformFailureDomainNode = computeNode.path(PLATFORM_FAULT_DOMAIN); if (platformFailureDomainNode.isMissingNode() || !platformFailureDomainNode.isTextual()) { throw new RuntimeException("[AzureEnvironmentProvider]: Json node platformFaultDomain is missing or is invalid." + " No failure domain information retrieved for given server instance"); } return platformFailureDomainNode.textValue(); } catch (IOException ex) { throw new RuntimeException(String.format( "[AzureEnvironmentProvider]: Errors when parsing response payload from Azure Instance Metadata Service: %s", responsePayload), ex); } }
@Test public void testFailureDomainRetrieval() throws IOException { mockUtil(); when(_mockHttpEntity.getContent()).thenReturn(getClass().getClassLoader().getResourceAsStream(IMDS_RESPONSE_FILE)); String failureDomain = _azureEnvironmentProviderWithParams.getFailureDomain(); Assert.assertEquals(failureDomain, "36"); verify(_mockHttpClient, times(1)).execute(any(HttpGet.class)); verify(_mockHttpResponse, times(1)).getCode(); verify(_mockHttpResponse, times(1)).getEntity(); verifyNoMoreInteractions(_mockHttpClient, _mockHttpResponse); }
@VisibleForTesting protected static String stringifyThreadInfo( java.lang.management.ThreadInfo threadInfo, int maxDepth) { StringBuilder sb = new StringBuilder( "\"" + threadInfo.getThreadName() + "\"" + " Id=" + threadInfo.getThreadId() + " " + threadInfo.getThreadState()); if (threadInfo.getLockName() != null) { sb.append(" on " + threadInfo.getLockName()); } if (threadInfo.getLockOwnerName() != null) { sb.append( " owned by \"" + threadInfo.getLockOwnerName() + "\" Id=" + threadInfo.getLockOwnerId()); } if (threadInfo.isSuspended()) { sb.append(" (suspended)"); } if (threadInfo.isInNative()) { sb.append(" (in native)"); } sb.append('\n'); int i = 0; StackTraceElement[] stackTraceElements = threadInfo.getStackTrace(); for (; i < stackTraceElements.length && i < maxDepth; i++) { StackTraceElement ste = stackTraceElements[i]; sb.append("\tat " + ste.toString()); sb.append('\n'); if (i == 0 && threadInfo.getLockInfo() != null) { Thread.State ts = threadInfo.getThreadState(); switch (ts) { case BLOCKED: sb.append("\t- blocked on " + threadInfo.getLockInfo()); sb.append('\n'); break; case WAITING: case TIMED_WAITING: sb.append("\t- waiting on " + threadInfo.getLockInfo()); sb.append('\n'); break; default: } } for (MonitorInfo mi : threadInfo.getLockedMonitors()) { if (mi.getLockedStackDepth() == i) { sb.append("\t- locked " + mi); sb.append('\n'); } } } if (i < threadInfo.getStackTrace().length) { sb.append("\t..."); sb.append('\n'); } LockInfo[] locks = threadInfo.getLockedSynchronizers(); if (locks.length > 0) { sb.append("\n\tNumber of locked synchronizers = " + locks.length); sb.append('\n'); for (LockInfo li : locks) { sb.append("\t- " + li); sb.append('\n'); } } sb.append('\n'); return sb.toString(); }
@Test void testComparedWithDefaultJDKImplemetation() { ThreadMXBean threadMxBean = ManagementFactory.getThreadMXBean(); ThreadInfo threadInfo = threadMxBean.getThreadInfo(Thread.currentThread().getId(), Integer.MAX_VALUE); // JDK11 has increased the output info of threadInfo.daemon and threadInfo.priority compared // to JDK8, hence only compare the output of stacktrace content for compatibility. String[] threadInfoLines = threadInfo.toString().split("\n"); String[] expected = Arrays.copyOfRange(threadInfoLines, 1, threadInfoLines.length); String stringifyThreadInfo = ThreadDumpInfo.stringifyThreadInfo(threadInfo, 8); String[] stringifyThreadInfoLines = stringifyThreadInfo.split("\n"); String[] stringified = Arrays.copyOfRange(stringifyThreadInfoLines, 1, stringifyThreadInfoLines.length); assertThat(stringified).isEqualTo(expected); }
@Override public Double getLocalValue() { return this.max; }
@Test void testGet() { DoubleMaximum max = new DoubleMaximum(); assertThat(max.getLocalValue()).isCloseTo(Double.NEGATIVE_INFINITY, within(0.0)); }
@Override public Publisher<Exchange> to(String uri, Object data) { String streamName = requestedUriToStream.computeIfAbsent(uri, camelUri -> { try { String uuid = context.getUuidGenerator().generateUuid(); RouteBuilder.addRoutes(context, rb -> rb.from("reactive-streams:" + uuid).to(camelUri)); return uuid; } catch (Exception e) { throw new IllegalStateException("Unable to create requested reactive stream from direct URI: " + uri, e); } }); return toStream(streamName, data); }
@Test public void testToFunction() throws Exception { context.start(); AtomicInteger value = new AtomicInteger(); CountDownLatch latch = new CountDownLatch(1); Function<Object, Publisher<String>> fun = crs.to("bean:hello", String.class); Flowable.just(1, 2, 3).flatMap(fun::apply).doOnNext(res -> assertEquals("Hello " + value.incrementAndGet(), res)) .doOnNext(res -> latch.countDown()).subscribe(); assertTrue(latch.await(2, TimeUnit.SECONDS)); }
@VisibleForTesting Object evaluate(final GenericRow row) { return term.getValue(new TermEvaluationContext(row)); }
@Test public void shouldEvaluateCastToDouble() { // Given: final Expression cast1 = new Cast( new LongLiteral(10L), new Type(SqlPrimitiveType.of("DOUBLE")) ); final Expression cast2 = new Cast( new StringLiteral("1234.5"), new Type(SqlPrimitiveType.of("DOUBLE")) ); final Expression cast3 = new Cast( new IntegerLiteral(12), new Type(SqlPrimitiveType.of("DOUBLE")) ); final Expression cast4 = new Cast( new DecimalLiteral(new BigDecimal("4567.5")), new Type(SqlPrimitiveType.of("DOUBLE")) ); // When: InterpretedExpression interpreter1 = interpreter(cast1); InterpretedExpression interpreter2 = interpreter(cast2); InterpretedExpression interpreter3 = interpreter(cast3); InterpretedExpression interpreter4 = interpreter(cast4); // Then: assertThat(interpreter1.evaluate(ROW), is(10d)); assertThat(interpreter2.evaluate(ROW), is(1234.5d)); assertThat(interpreter3.evaluate(ROW), is(12d)); assertThat(interpreter4.evaluate(ROW), is(4567.5d)); }
public static long btcToSatoshi(BigDecimal coins) throws ArithmeticException { return coins.movePointRight(SMALLEST_UNIT_EXPONENT).longValueExact(); }
@Test(expected = ArithmeticException.class) public void testBtcToSatoshi_tooPrecise2() { btcToSatoshi(new BigDecimal("92233720368.547758079")); // More than SMALLEST_UNIT_EXPONENT precision }
@Override public long getMailTemplateCountByAccountId(Long accountId) { return mailTemplateMapper.selectCountByAccountId(accountId); }
@Test public void testCountByAccountId() { // mock 数据 MailTemplateDO dbMailTemplate = randomPojo(MailTemplateDO.class); mailTemplateMapper.insert(dbMailTemplate); // 测试 accountId 不匹配 mailTemplateMapper.insert(cloneIgnoreId(dbMailTemplate, o -> o.setAccountId(2L))); // 准备参数 Long accountId = dbMailTemplate.getAccountId(); // 调用 long count = mailTemplateService.getMailTemplateCountByAccountId(accountId); // 断言 assertEquals(1, count); }
protected boolean parseManifest(Dependency dependency, List<ClassNameInformation> classInformation) throws IOException { boolean foundSomething = false; try (JarFile jar = new JarFile(dependency.getActualFilePath(), false)) { final Manifest manifest = jar.getManifest(); if (manifest == null) { if (!dependency.getFileName().toLowerCase().endsWith("-sources.jar") && !dependency.getFileName().toLowerCase().endsWith("-javadoc.jar") && !dependency.getFileName().toLowerCase().endsWith("-src.jar") && !dependency.getFileName().toLowerCase().endsWith("-doc.jar")) { LOGGER.debug("Jar file '{}' does not contain a manifest.", dependency.getFileName()); } return false; } String source = "Manifest"; String specificationVersion = null; boolean hasImplementationVersion = false; Attributes atts = manifest.getMainAttributes(); for (Entry<Object, Object> entry : atts.entrySet()) { String key = entry.getKey().toString(); String value = atts.getValue(key); if (HTML_DETECTION_PATTERN.matcher(value).find()) { value = Jsoup.parse(value).text(); } if (value.startsWith("git@github.com:") || value.startsWith("git@gitlab.com:")) { value = value.substring(15); } if (IGNORE_VALUES.contains(value)) { continue; } else if (key.equalsIgnoreCase(Attributes.Name.IMPLEMENTATION_TITLE.toString())) { foundSomething = true; dependency.addEvidence(EvidenceType.PRODUCT, source, key, value, Confidence.HIGH); addMatchingValues(classInformation, value, dependency, EvidenceType.PRODUCT); } else if (key.equalsIgnoreCase(Attributes.Name.IMPLEMENTATION_VERSION.toString())) { hasImplementationVersion = true; foundSomething = true; dependency.addEvidence(EvidenceType.VERSION, source, key, value, Confidence.HIGH); } else if ("specification-version".equalsIgnoreCase(key)) { specificationVersion = value; } else if (key.equalsIgnoreCase(Attributes.Name.IMPLEMENTATION_VENDOR.toString())) { foundSomething = true; dependency.addEvidence(EvidenceType.VENDOR, source, key, value, Confidence.HIGH); addMatchingValues(classInformation, value, dependency, EvidenceType.VENDOR); } else if (key.equalsIgnoreCase(IMPLEMENTATION_VENDOR_ID)) { foundSomething = true; dependency.addEvidence(EvidenceType.VENDOR, source, key, value, Confidence.MEDIUM); addMatchingValues(classInformation, value, dependency, EvidenceType.VENDOR); } else if (key.equalsIgnoreCase(BUNDLE_DESCRIPTION)) { if (!value.startsWith("Sonatype helps open source projects")) { foundSomething = true; addDescription(dependency, value, "manifest", key); addMatchingValues(classInformation, value, dependency, EvidenceType.PRODUCT); } } else if (key.equalsIgnoreCase(BUNDLE_NAME)) { foundSomething = true; dependency.addEvidence(EvidenceType.PRODUCT, source, key, value, Confidence.MEDIUM); addMatchingValues(classInformation, value, dependency, EvidenceType.PRODUCT); // //the following caused false positives. // } else if (key.equalsIgnoreCase(BUNDLE_VENDOR)) { } else if (key.equalsIgnoreCase(BUNDLE_VERSION)) { foundSomething = true; dependency.addEvidence(EvidenceType.VERSION, source, key, value, Confidence.HIGH); } else if (key.equalsIgnoreCase(Attributes.Name.MAIN_CLASS.toString())) { //noinspection UnnecessaryContinue continue; //skipping main class as if this has important information to add it will be added during class name analysis... } else if ("implementation-url".equalsIgnoreCase(key) && value != null && value.startsWith("https://projects.spring.io/spring-boot/#/spring-boot-starter-parent/parent/")) { continue; } else { key = key.toLowerCase(); if (!IGNORE_KEYS.contains(key) && !key.endsWith("jdk") && !key.contains("lastmodified") && !key.endsWith("package") && !key.endsWith("classpath") && !key.endsWith("class-path") && !key.endsWith("-scm") //todo change this to a regex? && !key.startsWith("scm-") && !value.trim().startsWith("scm:") && !isImportPackage(key, value) && !isPackage(key, value)) { foundSomething = true; if (key.contains("version")) { if (!key.contains("specification")) { dependency.addEvidence(EvidenceType.VERSION, source, key, value, Confidence.MEDIUM); } } else if ("build-id".equals(key)) { int pos = value.indexOf('('); if (pos > 0) { value = value.substring(0, pos - 1); } pos = value.indexOf('['); if (pos > 0) { value = value.substring(0, pos - 1); } dependency.addEvidence(EvidenceType.VERSION, source, key, value, Confidence.MEDIUM); } else if (key.contains("title")) { dependency.addEvidence(EvidenceType.PRODUCT, source, key, value, Confidence.MEDIUM); addMatchingValues(classInformation, value, dependency, EvidenceType.PRODUCT); } else if (key.contains("vendor")) { if (key.contains("specification")) { dependency.addEvidence(EvidenceType.VENDOR, source, key, value, Confidence.LOW); } else { dependency.addEvidence(EvidenceType.VENDOR, source, key, value, Confidence.MEDIUM); addMatchingValues(classInformation, value, dependency, EvidenceType.VENDOR); } } else if (key.contains("name")) { dependency.addEvidence(EvidenceType.PRODUCT, source, key, value, Confidence.MEDIUM); dependency.addEvidence(EvidenceType.VENDOR, source, key, value, Confidence.MEDIUM); addMatchingValues(classInformation, value, dependency, EvidenceType.VENDOR); addMatchingValues(classInformation, value, dependency, EvidenceType.PRODUCT); } else if (key.contains("license")) { addLicense(dependency, value); } else if (key.contains("description")) { if (!value.startsWith("Sonatype helps open source projects")) { final String trimmedDescription = addDescription(dependency, value, "manifest", key); addMatchingValues(classInformation, trimmedDescription, dependency, EvidenceType.VENDOR); addMatchingValues(classInformation, trimmedDescription, dependency, EvidenceType.PRODUCT); } } else { dependency.addEvidence(EvidenceType.PRODUCT, source, key, value, Confidence.LOW); dependency.addEvidence(EvidenceType.VENDOR, source, key, value, Confidence.LOW); addMatchingValues(classInformation, value, dependency, EvidenceType.VERSION); addMatchingValues(classInformation, value, dependency, EvidenceType.PRODUCT); if (value.matches(".*\\d.*")) { final StringTokenizer tokenizer = new StringTokenizer(value, " "); while (tokenizer.hasMoreElements()) { final String s = tokenizer.nextToken(); if (s.matches("^[0-9.]+$")) { dependency.addEvidence(EvidenceType.VERSION, source, key, s, Confidence.LOW); } } } } } } } for (Map.Entry<String, Attributes> item : manifest.getEntries().entrySet()) { final String name = item.getKey(); source = "manifest: " + name; atts = item.getValue(); for (Entry<Object, Object> entry : atts.entrySet()) { final String key = entry.getKey().toString(); final String value = atts.getValue(key); if (key.equalsIgnoreCase(Attributes.Name.IMPLEMENTATION_TITLE.toString())) { foundSomething = true; dependency.addEvidence(EvidenceType.PRODUCT, source, key, value, Confidence.MEDIUM); addMatchingValues(classInformation, value, dependency, EvidenceType.PRODUCT); } else if (key.equalsIgnoreCase(Attributes.Name.IMPLEMENTATION_VERSION.toString())) { foundSomething = true; dependency.addEvidence(EvidenceType.VERSION, source, key, value, Confidence.MEDIUM); } else if (key.equalsIgnoreCase(Attributes.Name.IMPLEMENTATION_VENDOR.toString())) { foundSomething = true; dependency.addEvidence(EvidenceType.VENDOR, source, key, value, Confidence.MEDIUM); addMatchingValues(classInformation, value, dependency, EvidenceType.VENDOR); } else if (key.equalsIgnoreCase(Attributes.Name.SPECIFICATION_TITLE.toString())) { foundSomething = true; dependency.addEvidence(EvidenceType.PRODUCT, source, key, value, Confidence.MEDIUM); addMatchingValues(classInformation, value, dependency, EvidenceType.PRODUCT); } } } if (specificationVersion != null && !hasImplementationVersion) { foundSomething = true; dependency.addEvidence(EvidenceType.VERSION, source, "specification-version", specificationVersion, Confidence.HIGH); } } return foundSomething; }
@Test public void testParseManifest() throws Exception { File file = BaseTest.getResourceAsFile(this, "xalan-2.7.0.jar"); Dependency result = new Dependency(file); JarAnalyzer instance = new JarAnalyzer(); List<JarAnalyzer.ClassNameInformation> cni = new ArrayList<>(); instance.parseManifest(result, cni); assertTrue(result.getEvidence(EvidenceType.VENDOR).toString().contains("manifest: org/apache/xalan/")); }
String from(Workload workload) { if (workload.name != null) { return workload.name; } Map<String, Object> params = new HashMap<>(); params.put("topics", countToDisplaySize(workload.topics)); params.put("partitionsPerTopic", countToDisplaySize(workload.partitionsPerTopic)); params.put("messageSize", countToDisplaySize(workload.messageSize)); params.put("subscriptionsPerTopic", countToDisplaySize(workload.subscriptionsPerTopic)); params.put("producersPerTopic", countToDisplaySize(workload.producersPerTopic)); params.put("consumerPerSubscription", countToDisplaySize(workload.consumerPerSubscription)); params.put( "producerRate", (workload.producerRate >= MAX_PRODUCER_RATE) ? "max-rate" : countToDisplaySize(workload.producerRate)); params.put("keyDistributor", workload.keyDistributor); params.put("payloadFile", workload.payloadFile); params.put("useRandomizedPayloads", workload.useRandomizedPayloads); params.put("randomBytesRatio", workload.randomBytesRatio); params.put("randomizedPayloadPoolSize", countToDisplaySize(workload.randomizedPayloadPoolSize)); params.put("consumerBacklogSizeGB", countToDisplaySize(workload.consumerBacklogSizeGB)); params.put("testDurationMinutes", workload.testDurationMinutes); params.put("warmupDurationMinutes", workload.warmupDurationMinutes); return StrSubstitutor.replace(format, params, "${", "}"); }
@Test void from() { Workload workload = new Workload(); workload.topics = 1456; workload.partitionsPerTopic = 2123; workload.messageSize = 617890; workload.producersPerTopic = 45; workload.consumerPerSubscription = 541; workload.producerRate = 1000000; String name = new WorkloadNameFormat(nameFormat).from(workload); assertThat(name).isEqualTo("1k-topics-2k-partitions-617kb-45p-541c-1m"); }
public final <K, V> void addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... predecessorNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(topic, "topic must not be null"); Objects.requireNonNull(predecessorNames, "predecessor names must not be null"); if (predecessorNames.length == 0) { throw new TopologyException("Sink " + name + " must have at least one parent"); } addSink(name, new StaticTopicNameExtractor<>(topic), keySerializer, valSerializer, partitioner, predecessorNames); nodeToSinkTopic.put(name, topic); nodeGroups = null; }
@Test public void testAddSinkWithSelfParent() { assertThrows(TopologyException.class, () -> builder.addSink("sink", "topic-2", null, null, null, "sink")); }
@VisibleForTesting public static ActorSystem startRemoteActorSystem( Configuration configuration, String externalAddress, String externalPortRange, Logger logger) throws Exception { return startRemoteActorSystem( configuration, PekkoUtils.getFlinkActorSystemName(), externalAddress, externalPortRange, NetUtils.getWildcardIPAddress(), Optional.empty(), logger, PekkoUtils.getForkJoinExecutorConfig( getForkJoinExecutorConfiguration(configuration)), null); }
@Test void testConcurrentActorSystemCreation() throws Exception { final int concurrentCreations = 10; final ExecutorService executorService = Executors.newFixedThreadPool(concurrentCreations); final CyclicBarrier cyclicBarrier = new CyclicBarrier(concurrentCreations); try { final List<CompletableFuture<Void>> actorSystemFutures = IntStream.range(0, concurrentCreations) .mapToObj( ignored -> CompletableFuture.supplyAsync( CheckedSupplier.unchecked( () -> { cyclicBarrier.await(); return ActorSystemBootstrapTools .startRemoteActorSystem( new Configuration(), "localhost", "0", LOG); }), executorService)) .map( // terminate ActorSystems actorSystemFuture -> actorSystemFuture.thenCompose( PekkoUtils::terminateActorSystem)) .collect(Collectors.toList()); FutureUtils.completeAll(actorSystemFutures).get(); } finally { ExecutorUtils.gracefulShutdown(10000L, TimeUnit.MILLISECONDS, executorService); } }
public final TypeAdapter<T> nullSafe() { return new TypeAdapter<T>() { @Override public void write(JsonWriter out, T value) throws IOException { if (value == null) { out.nullValue(); } else { TypeAdapter.this.write(out, value); } } @Override public T read(JsonReader reader) throws IOException { if (reader.peek() == JsonToken.NULL) { reader.nextNull(); return null; } return TypeAdapter.this.read(reader); } }; }
@Test public void testNullSafe() throws IOException { TypeAdapter<String> adapter = new TypeAdapter<String>() { @Override public void write(JsonWriter out, String value) { throw new AssertionError("unexpected call"); } @Override public String read(JsonReader in) { throw new AssertionError("unexpected call"); } }.nullSafe(); assertThat(adapter.toJson(null)).isEqualTo("null"); assertThat(adapter.fromJson("null")).isNull(); }
public static Expression generateFilterExpression(SearchArgument sarg) { return translate(sarg.getExpression(), sarg.getLeaves()); }
@Test public void testTimestampType() { Literal<Long> timestampLiteral = Literal.of("2012-10-02T05:16:17.123456").to(Types.TimestampType.withoutZone()); long timestampMicros = timestampLiteral.value(); Timestamp ts = Timestamp.valueOf(DateTimeUtil.timestampFromMicros(timestampMicros)); SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); SearchArgument arg = builder.startAnd().equals("timestamp", PredicateLeaf.Type.TIMESTAMP, ts).end().build(); UnboundPredicate expected = Expressions.equal("timestamp", timestampMicros); UnboundPredicate actual = (UnboundPredicate) HiveIcebergFilterFactory.generateFilterExpression(arg); assertPredicatesMatch(expected, actual); }
Set<String> allConfigNames() { Set<String> allNames = new HashSet<>(); List<ConfigDef> connectorConfigDefs = Arrays.asList( MirrorCheckpointConfig.CONNECTOR_CONFIG_DEF, MirrorSourceConfig.CONNECTOR_CONFIG_DEF, MirrorHeartbeatConfig.CONNECTOR_CONFIG_DEF ); for (ConfigDef cd : connectorConfigDefs) { allNames.addAll(cd.names()); } return allNames; }
@Test public void testAllConfigNames() { MirrorMakerConfig mirrorConfig = new MirrorMakerConfig(makeProps( "clusters", "a, b")); Set<String> allNames = mirrorConfig.allConfigNames(); assertTrue(allNames.contains("topics")); assertTrue(allNames.contains("groups")); assertTrue(allNames.contains("emit.heartbeats.enabled")); }
public Map<String, LocalResource> getLocalResources( Container container) throws IOException { Map<String, LocalResource> containerLocalRsrc = container.getLaunchContext().getLocalResources(); long layerCount = 0; Map<String, String> environment = container.getLaunchContext().getEnvironment(); String imageName = environment.get(ENV_RUNC_CONTAINER_IMAGE); if (imageName == null || imageName.isEmpty()) { environment.put(ENV_RUNC_CONTAINER_IMAGE, defaultRuncImage); imageName = defaultRuncImage; } ImageManifest manifest = imageTagToManifestPlugin.getManifestFromImageTag(imageName); LocalResource config = manifestToResourcesPlugin.getConfigResource(manifest); List<LocalResource> layers = manifestToResourcesPlugin.getLayerResources(manifest); RuncRuntimeObject runcRuntimeObject = new RuncRuntimeObject(config, layers); container.setContainerRuntimeData(runcRuntimeObject); for (LocalResource localRsrc : layers) { while(containerLocalRsrc.putIfAbsent("runc-layer" + Long.toString(layerCount), localRsrc) != null) { layerCount++; } } while(containerLocalRsrc.putIfAbsent("runc-config" + Long.toString(layerCount), config) != null) { layerCount++; } return containerLocalRsrc; }
@Test public void testGetLocalResources() throws Exception { RuncContainerRuntime runtime = new MockRuncContainerRuntime( mockExecutor, mockCGroupsHandler); runtime.initialize(conf, nmContext); runtime.getLocalResources(container); RuncRuntimeObject runtimeObject = captureRuncRuntimeObject(1); LocalResource testConfig = runtimeObject.getConfig(); List<LocalResource> testLayers = runtimeObject.getOCILayers(); Assert.assertEquals(config, testConfig); Assert.assertEquals(layers, testLayers); }
@Udf(description = "Returns the sign of an INT value, denoted by 1, 0 or -1.") public Integer sign( @UdfParameter( value = "value", description = "The value to get the sign of." ) final Integer value ) { return value == null ? null : Integer.signum(value); }
@Test public void shouldHandleNull() { assertThat(udf.sign((Integer)null), is(nullValue())); assertThat(udf.sign((Long)null), is(nullValue())); assertThat(udf.sign((Double)null), is(nullValue())); }
boolean hasProjectionMaskApi(JClass definedClass, ClassTemplateSpec templateSpec) { return _hasProjectionMaskCache.computeIfAbsent(definedClass, (jClass) -> { try { final Class<?> clazz = _classLoader.loadClass(jClass.fullName()); return Arrays.stream(clazz.getClasses()).anyMatch( c -> c.getSimpleName().equals(JavaDataTemplateGenerator.PROJECTION_MASK_CLASSNAME)); } catch (ClassNotFoundException e) { // Ignore, and check if the class will be generated from a source PDL } return isGeneratedFromSource(templateSpec); }); }
@Test public void testHasProjectionMaskApiExternal() throws Exception { ProjectionMaskApiChecker projectionMaskApiChecker = new ProjectionMaskApiChecker( _templateSpecGenerator, _sourceFiles, _mockClassLoader); Mockito.when(_nestedTypeSource.getAbsolutePath()).thenReturn("models.jar:/AuditStamp.pdl"); Mockito.when(_nestedType.fullName()).thenReturn("com.linkedin.common.AuditStamp"); Mockito.when(_mockClassLoader.loadClass("com.linkedin.common.AuditStamp")).thenThrow( new ClassNotFoundException()); Assert.assertFalse(projectionMaskApiChecker.hasProjectionMaskApi(_nestedType, _templateSpec)); Mockito.verify(_mockClassLoader, Mockito.times(1)).loadClass(Mockito.anyString()); Mockito.verify(_nestedType, Mockito.times(1)).fullName(); Mockito.verify(_nestedTypeSource, Mockito.times(1)).getAbsolutePath(); // Check caching Assert.assertFalse(projectionMaskApiChecker.hasProjectionMaskApi(_nestedType, _templateSpec)); Mockito.verifyNoMoreInteractions(_mockClassLoader); }
public Result runExtractor(String value) { final Matcher matcher = pattern.matcher(value); final boolean found = matcher.find(); if (!found) { return null; } final int start = matcher.groupCount() > 0 ? matcher.start(1) : -1; final int end = matcher.groupCount() > 0 ? matcher.end(1) : -1; final String s; try { s = replaceAll ? matcher.replaceAll(replacement) : matcher.replaceFirst(replacement); } catch (Exception e) { throw new RuntimeException("Error while trying to replace string", e); } return new Result(s, start, end); }
@Test(expected = RuntimeException.class) public void testReplacementWithTooManyPlaceholders() throws Exception { final Message message = messageFactory.createMessage("Foobar 123", "source", Tools.nowUTC()); final RegexReplaceExtractor extractor = new RegexReplaceExtractor( metricRegistry, "id", "title", 0L, Extractor.CursorStrategy.COPY, "message", "message", ImmutableMap.<String, Object>of("regex", "Foobar (\\d+)", "replacement", "$1 $2"), "user", Collections.<Converter>emptyList(), Extractor.ConditionType.NONE, null); extractor.runExtractor(message); }
@Override public String pluginNamed() { return PluginEnum.WEB_SOCKET.getName(); }
@Test public void testPluginNamed() { Assertions.assertEquals(webSocketPluginDataHandler.pluginNamed(), "websocket"); }
@Override @PublicAPI(usage = ACCESS) public String getName() { return name; }
@Test public void type_variable_name() { @SuppressWarnings("unused") class ClassWithUnboundTypeParameter<SOME_NAME> { } JavaTypeVariable<JavaClass> type = new ClassFileImporter().importClass(ClassWithUnboundTypeParameter.class).getTypeParameters().get(0); assertThat(type.getName()).isEqualTo("SOME_NAME"); }
@Override public <W extends Window> TimeWindowedKStream<K, V> windowedBy(final Windows<W> windows) { return new TimeWindowedKStreamImpl<>( windows, builder, subTopologySourceNodes, name, keySerde, valueSerde, aggregateBuilder, graphNode ); }
@Test public void shouldNotAcceptNullSessionWindowsWhenAggregatingSessionWindows() { assertThrows(NullPointerException.class, () -> groupedStream.windowedBy((SessionWindows) null)); }
public static void mergeParams( Map<String, ParamDefinition> params, Map<String, ParamDefinition> paramsToMerge, MergeContext context) { if (paramsToMerge == null) { return; } Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream()) .forEach( name -> { ParamDefinition paramToMerge = paramsToMerge.get(name); if (paramToMerge == null) { return; } if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) { Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name); Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name); mergeParams( baseMap, toMergeMap, MergeContext.copyWithParentMode( context, params.getOrDefault(name, paramToMerge).getMode())); params.put( name, buildMergedParamDefinition( name, paramToMerge, params.get(name), context, baseMap)); } else if (paramToMerge.getType() == ParamType.STRING_MAP && paramToMerge.isLiteral()) { Map<String, String> baseMap = stringMapValueOrEmpty(params, name); Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name); baseMap.putAll(toMergeMap); params.put( name, buildMergedParamDefinition( name, paramToMerge, params.get(name), context, baseMap)); } else { params.put( name, buildMergedParamDefinition( name, paramToMerge, params.get(name), context, paramToMerge.getValue())); } }); }
@Test public void testMergeOverwriteModes() throws JsonProcessingException { Map<String, ParamDefinition> allParams = parseParamDefMap( "{'tomerge': {'type': 'STRING','value': 'hello', 'internal_mode': 'OPTIONAL'}}"); Map<String, ParamDefinition> paramsToMerge = parseParamDefMap("{'tomerge': {'type': 'STRING', 'value': 'goodbye', 'mode': 'MUTABLE'}}"); ParamsMergeHelper.mergeParams(allParams, paramsToMerge, restartContext); assertEquals(1, allParams.size()); assertEquals("goodbye", allParams.get("tomerge").asStringParamDef().getValue()); assertEquals(ParamMode.MUTABLE, allParams.get("tomerge").asStringParamDef().getMode()); // Should keep internal mode until cleanup assertEquals( InternalParamMode.OPTIONAL, allParams.get("tomerge").asStringParamDef().getInternalMode()); }
public static HttpRequest newJDiscRequest(CurrentContainer container, HttpServletRequest servletRequest) { try { var jettyRequest = (Request) servletRequest; var jdiscHttpReq = HttpRequest.newServerRequest( container, getUri(servletRequest), getMethod(servletRequest), HttpRequest.Version.fromString(servletRequest.getProtocol()), new InetSocketAddress(servletRequest.getRemoteAddr(), servletRequest.getRemotePort()), getConnection(jettyRequest).getCreatedTimeStamp(), jettyRequest.getTimeStamp()); jdiscHttpReq.context().put(RequestUtils.JDISC_REQUEST_X509CERT, getCertChain(servletRequest)); jdiscHttpReq.context().put(RequestUtils.JDICS_REQUEST_PORT, servletRequest.getLocalPort()); SSLSession sslSession = (SSLSession) servletRequest.getAttribute(RequestUtils.JETTY_REQUEST_SSLSESSION); jdiscHttpReq.context().put(RequestUtils.JDISC_REQUEST_SSLSESSION, sslSession); servletRequest.setAttribute(HttpRequest.class.getName(), jdiscHttpReq); return jdiscHttpReq; } catch (Utf8Appendable.NotUtf8Exception e) { throw createBadQueryException(e); } }
@Test void testIllegalQuery() { try { HttpRequestFactory.newJDiscRequest( new MockContainer(), createMockRequest("http", "example.com", "/search", "query=\"contains_quotes\"")); fail("Above statement should throw"); } catch (RequestException e) { assertThat(e.getResponseStatus(), is(Response.Status.BAD_REQUEST)); } }
public Optional<RouteContext> loadRouteContext(final OriginSQLRouter originSQLRouter, final QueryContext queryContext, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database, final ShardingCache shardingCache, final ConfigurationProperties props, final ConnectionContext connectionContext) { if (queryContext.getSql().length() > shardingCache.getConfiguration().getAllowedMaxSqlLength()) { return Optional.empty(); } ShardingRouteCacheableCheckResult cacheableCheckResult = shardingCache.getRouteCacheableChecker().check(database, queryContext); if (!cacheableCheckResult.isProbablyCacheable()) { return Optional.empty(); } List<Object> shardingConditionParams = new ArrayList<>(cacheableCheckResult.getShardingConditionParameterMarkerIndexes().size()); for (int each : cacheableCheckResult.getShardingConditionParameterMarkerIndexes()) { if (each >= queryContext.getParameters().size()) { return Optional.empty(); } shardingConditionParams.add(queryContext.getParameters().get(each)); } Optional<RouteContext> cachedResult = shardingCache.getRouteCache().get(new ShardingRouteCacheKey(queryContext.getSql(), shardingConditionParams)) .flatMap(ShardingRouteCacheValue::getCachedRouteContext); RouteContext result = cachedResult.orElseGet( () -> originSQLRouter.createRouteContext(queryContext, globalRuleMetaData, database, shardingCache.getShardingRule(), props, connectionContext)); if (!cachedResult.isPresent() && hitOneShardOnly(result)) { shardingCache.getRouteCache().put(new ShardingRouteCacheKey(queryContext.getSql(), shardingConditionParams), new ShardingRouteCacheValue(result)); } return Optional.of(result); }
@Test void assertCreateRouteContextWithQueryRoutedToMultiDataNodes() { QueryContext queryContext = new QueryContext(sqlStatementContext, "select * from t", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class)); when(shardingCache.getConfiguration()).thenReturn(new ShardingCacheConfiguration(100, null)); when(shardingCache.getRouteCacheableChecker()).thenReturn(mock(ShardingRouteCacheableChecker.class)); when(shardingCache.getRouteCacheableChecker().check(null, queryContext)).thenReturn(new ShardingRouteCacheableCheckResult(true, Collections.emptyList())); when(shardingCache.getRouteCache()).thenReturn(mock(ShardingRouteCache.class)); RouteContext expected = new RouteContext(); expected.getRouteUnits().add(new RouteUnit(new RouteMapper("ds_0", "ds_0"), Arrays.asList(new RouteMapper("t", "t_0"), new RouteMapper("t", "t_1")))); expected.getOriginalDataNodes().add(Collections.singletonList(new DataNode("ds_0", "t_0"))); OriginSQLRouter router = (unused, globalRuleMetaData, database, rule, props, connectionContext) -> expected; RuleMetaData globalRuleMetaData = mock(RuleMetaData.class); Optional<RouteContext> actual = new CachedShardingSQLRouter().loadRouteContext(router, queryContext, globalRuleMetaData, null, shardingCache, null, null); assertTrue(actual.isPresent()); assertThat(actual.get(), is(expected)); verify(shardingCache.getRouteCache(), never()).put(any(ShardingRouteCacheKey.class), any(ShardingRouteCacheValue.class)); }
public void assignStates() { checkStateMappingCompleteness(allowNonRestoredState, operatorStates, tasks); Map<OperatorID, OperatorState> localOperators = new HashMap<>(operatorStates); // find the states of all operators belonging to this task and compute additional // information in first pass for (ExecutionJobVertex executionJobVertex : tasks) { List<OperatorIDPair> operatorIDPairs = executionJobVertex.getOperatorIDs(); Map<OperatorID, OperatorState> operatorStates = CollectionUtil.newHashMapWithExpectedSize(operatorIDPairs.size()); for (OperatorIDPair operatorIDPair : operatorIDPairs) { OperatorID operatorID = operatorIDPair .getUserDefinedOperatorID() .filter(localOperators::containsKey) .orElse(operatorIDPair.getGeneratedOperatorID()); OperatorState operatorState = localOperators.remove(operatorID); if (operatorState == null) { operatorState = new OperatorState( operatorID, executionJobVertex.getParallelism(), executionJobVertex.getMaxParallelism()); } operatorStates.put(operatorIDPair.getGeneratedOperatorID(), operatorState); } final TaskStateAssignment stateAssignment = new TaskStateAssignment( executionJobVertex, operatorStates, consumerAssignment, vertexAssignments); vertexAssignments.put(executionJobVertex, stateAssignment); for (final IntermediateResult producedDataSet : executionJobVertex.getInputs()) { consumerAssignment.put(producedDataSet.getId(), stateAssignment); } } // repartition state for (TaskStateAssignment stateAssignment : vertexAssignments.values()) { if (stateAssignment.hasNonFinishedState // FLINK-31963: We need to run repartitioning for stateless operators that have // upstream output or downstream input states. || stateAssignment.hasUpstreamOutputStates() || stateAssignment.hasDownstreamInputStates()) { assignAttemptState(stateAssignment); } } // actually assign the state for (TaskStateAssignment stateAssignment : vertexAssignments.values()) { // If upstream has output states or downstream has input states, even the empty task // state should be assigned for the current task in order to notify this task that the // old states will send to it which likely should be filtered. if (stateAssignment.hasNonFinishedState || stateAssignment.isFullyFinished || stateAssignment.hasUpstreamOutputStates() || stateAssignment.hasDownstreamInputStates()) { assignTaskStateToExecutionJobVertices(stateAssignment); } } }
@Test void assigningStatesShouldWorkWithUserDefinedOperatorIdsAsWell() { int numSubTasks = 1; OperatorID operatorId = new OperatorID(); OperatorID userDefinedOperatorId = new OperatorID(); List<OperatorID> operatorIds = singletonList(userDefinedOperatorId); ExecutionJobVertex executionJobVertex = buildExecutionJobVertex(operatorId, userDefinedOperatorId, 1); Map<OperatorID, OperatorState> states = buildOperatorStates(operatorIds, numSubTasks); new StateAssignmentOperation(0, Collections.singleton(executionJobVertex), states, false) .assignStates(); assertThat(getAssignedState(executionJobVertex, operatorId, 0)) .isEqualTo(states.get(userDefinedOperatorId).getState(0)); }
@Override public void init() throws LoadException { createEtlJobConf(); }
@Test(expected = LoadException.class) public void testNoDb(@Injectable SparkLoadJob sparkLoadJob, @Injectable SparkResource resource, @Injectable BrokerDesc brokerDesc, @Mocked GlobalStateMgr globalStateMgr) throws LoadException { long dbId = 0L; new Expectations() { { globalStateMgr.getDb(dbId); result = null; } }; SparkLoadPendingTask task = new SparkLoadPendingTask(sparkLoadJob, null, resource, brokerDesc); task.init(); }
public static String sanitizePath(String path) { String sanitized = path; if (path != null) { sanitized = PATH_USERINFO_PASSWORD.matcher(sanitized).replaceFirst("$1xxxxxx$3"); } return sanitized; }
@Test public void testSanitizePathWithoutSensitiveInfoIsUnchanged() { String path = "myhost:8080/mypath"; assertEquals(path, URISupport.sanitizePath(path)); }
public static String format(double amount, boolean isUseTraditional) { return format(amount, isUseTraditional, false); }
@Test public void dotTest() { final String format = NumberChineseFormatter.format(new BigDecimal("3.1415926"), false, false); assertEquals("三点一四一五九二六", format); }
public static String compareMd5ResultString(List<String> changedGroupKeys) throws IOException { if (null == changedGroupKeys) { return ""; } StringBuilder sb = new StringBuilder(); for (String groupKey : changedGroupKeys) { String[] dataIdGroupId = GroupKey2.parseKey(groupKey); sb.append(dataIdGroupId[0]); sb.append(WORD_SEPARATOR); sb.append(dataIdGroupId[1]); // if have tenant, then set it if (dataIdGroupId.length == 3) { if (StringUtils.isNotBlank(dataIdGroupId[2])) { sb.append(WORD_SEPARATOR); sb.append(dataIdGroupId[2]); } } sb.append(LINE_SEPARATOR); } // To encode WORD_SEPARATOR and LINE_SEPARATOR invisible characters, encoded value is %02 and %01 return URLEncoder.encode(sb.toString(), "UTF-8"); }
@Test void testCompareMd5ResultString() { final MockedStatic<GroupKey2> groupKey2MockedStatic = Mockito.mockStatic(GroupKey2.class); List<String> changedGroupKeys = new ArrayList<>(); changedGroupKeys.add("test"); String[] arr = new String[3]; arr[0] = "test0"; arr[1] = "test1"; arr[2] = "test2"; when(GroupKey2.parseKey(anyString())).thenReturn(arr); try { String actualValue = MD5Util.compareMd5ResultString(changedGroupKeys); assertEquals("test0%02test1%02test2%01", actualValue); } catch (IOException e) { System.out.println(e.toString()); } groupKey2MockedStatic.close(); }
public boolean isHealthServer() { return agent.isHealthServer(); }
@Test void testIsHealthServer() throws NacosException, NoSuchFieldException, IllegalAccessException { Properties prop = new Properties(); ConfigFilterChainManager filter = new ConfigFilterChainManager(new Properties()); ServerListManager agent = Mockito.mock(ServerListManager.class); final NacosClientProperties nacosClientProperties = NacosClientProperties.PROTOTYPE.derive(prop); ClientWorker clientWorker = new ClientWorker(filter, agent, nacosClientProperties); ClientWorker.ConfigRpcTransportClient client = Mockito.mock(ClientWorker.ConfigRpcTransportClient.class); Mockito.when(client.isHealthServer()).thenReturn(Boolean.TRUE); Field declaredField = ClientWorker.class.getDeclaredField("agent"); declaredField.setAccessible(true); declaredField.set(clientWorker, client); assertTrue(clientWorker.isHealthServer()); Mockito.when(client.isHealthServer()).thenReturn(Boolean.FALSE); assertFalse(clientWorker.isHealthServer()); }
static int run(File buildResult, Path root) throws IOException { // parse included dependencies from build output final Map<String, Set<Dependency>> modulesWithBundledDependencies = combineAndFilterFlinkDependencies( ShadeParser.parseShadeOutput(buildResult.toPath()), DependencyParser.parseDependencyCopyOutput(buildResult.toPath())); final Set<String> deployedModules = DeployParser.parseDeployOutput(buildResult); LOG.info( "Extracted " + deployedModules.size() + " modules that were deployed and " + modulesWithBundledDependencies.keySet().size() + " modules which bundle dependencies with a total of " + modulesWithBundledDependencies.values().size() + " dependencies"); // find modules producing a shaded-jar List<Path> noticeFiles = findNoticeFiles(root); LOG.info("Found {} NOTICE files to check", noticeFiles.size()); final Map<String, Optional<NoticeContents>> moduleToNotice = noticeFiles.stream() .collect( Collectors.toMap( NoticeFileChecker::getModuleFromNoticeFile, noticeFile -> { try { return NoticeParser.parseNoticeFile(noticeFile); } catch (IOException e) { // some machine issue throw new RuntimeException(e); } })); return run(modulesWithBundledDependencies, deployedModules, moduleToNotice); }
@Test void testRunRejectsMissingNotice() throws IOException { final String moduleName = "test"; final Dependency bundledDependency = Dependency.create("a", "b", "c", null); final Map<String, Set<Dependency>> bundleDependencies = new HashMap<>(); bundleDependencies.put(moduleName, Collections.singleton(bundledDependency)); final Set<String> deployedModules = Collections.singleton(moduleName); final Optional<NoticeContents> missingNotice = Optional.empty(); assertThat( NoticeFileChecker.run( bundleDependencies, deployedModules, Collections.singletonMap(moduleName, missingNotice))) .isEqualTo(1); }
public static TraceTransferBean encoderFromContextBean(TraceContext ctx) { if (ctx == null) { return null; } //build message trace of the transferring entity content bean TraceTransferBean transferBean = new TraceTransferBean(); StringBuilder sb = new StringBuilder(256); switch (ctx.getTraceType()) { case Pub: { TraceBean bean = ctx.getTraceBeans().get(0); //append the content of context and traceBean to transferBean's TransData sb.append(ctx.getTraceType()).append(TraceConstants.CONTENT_SPLITOR)// .append(ctx.getTimeStamp()).append(TraceConstants.CONTENT_SPLITOR)// .append(ctx.getRegionId()).append(TraceConstants.CONTENT_SPLITOR)// .append(ctx.getGroupName()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getTopic()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getMsgId()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getTags()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getKeys()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getStoreHost()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getBodyLength()).append(TraceConstants.CONTENT_SPLITOR)// .append(ctx.getCostTime()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getMsgType().ordinal()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getOffsetMsgId()).append(TraceConstants.CONTENT_SPLITOR)// .append(ctx.isSuccess()).append(TraceConstants.FIELD_SPLITOR);// } break; case SubBefore: { for (TraceBean bean : ctx.getTraceBeans()) { sb.append(ctx.getTraceType()).append(TraceConstants.CONTENT_SPLITOR)// .append(ctx.getTimeStamp()).append(TraceConstants.CONTENT_SPLITOR)// .append(ctx.getRegionId()).append(TraceConstants.CONTENT_SPLITOR)// .append(ctx.getGroupName()).append(TraceConstants.CONTENT_SPLITOR)// .append(ctx.getRequestId()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getMsgId()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getRetryTimes()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getKeys()).append(TraceConstants.FIELD_SPLITOR);// } } break; case SubAfter: { for (TraceBean bean : ctx.getTraceBeans()) { sb.append(ctx.getTraceType()).append(TraceConstants.CONTENT_SPLITOR)// .append(ctx.getRequestId()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getMsgId()).append(TraceConstants.CONTENT_SPLITOR)// .append(ctx.getCostTime()).append(TraceConstants.CONTENT_SPLITOR)// .append(ctx.isSuccess()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getKeys()).append(TraceConstants.CONTENT_SPLITOR)// .append(ctx.getContextCode()).append(TraceConstants.CONTENT_SPLITOR); if (!ctx.getAccessChannel().equals(AccessChannel.CLOUD)) { sb.append(ctx.getTimeStamp()).append(TraceConstants.CONTENT_SPLITOR); sb.append(ctx.getGroupName()); } sb.append(TraceConstants.FIELD_SPLITOR); } } break; case EndTransaction: { TraceBean bean = ctx.getTraceBeans().get(0); sb.append(ctx.getTraceType()).append(TraceConstants.CONTENT_SPLITOR)// .append(ctx.getTimeStamp()).append(TraceConstants.CONTENT_SPLITOR)// .append(ctx.getRegionId()).append(TraceConstants.CONTENT_SPLITOR)// .append(ctx.getGroupName()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getTopic()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getMsgId()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getTags()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getKeys()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getStoreHost()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getMsgType().ordinal()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getTransactionId()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.getTransactionState().name()).append(TraceConstants.CONTENT_SPLITOR)// .append(bean.isFromTransactionCheck()).append(TraceConstants.FIELD_SPLITOR); } break; default: } transferBean.setTransData(sb.toString()); for (TraceBean bean : ctx.getTraceBeans()) { transferBean.getTransKey().add(bean.getMsgId()); if (bean.getKeys() != null && bean.getKeys().length() > 0) { String[] keys = bean.getKeys().split(MessageConst.KEY_SEPARATOR); transferBean.getTransKey().addAll(Arrays.asList(keys)); } } return transferBean; }
@Test public void testEncoderFromContextBean() { TraceContext context = new TraceContext(); context.setTraceType(TraceType.Pub); context.setGroupName("PID-test"); context.setRegionId("DefaultRegion"); context.setCostTime(245); context.setSuccess(true); context.setTimeStamp(time); TraceBean traceBean = new TraceBean(); traceBean.setTopic("topic-test"); traceBean.setKeys("Keys"); traceBean.setTags("Tags"); traceBean.setMsgId("AC1415116D1418B4AAC217FE1B4E0000"); traceBean.setOffsetMsgId("0A9A002600002A9F0000000000002329"); traceBean.setStoreHost("127.0.0.1:10911"); traceBean.setStoreTime(time); traceBean.setMsgType(MessageType.Normal_Msg); traceBean.setBodyLength(26); List<TraceBean> traceBeans = new ArrayList<>(); traceBeans.add(traceBean); context.setTraceBeans(traceBeans); TraceTransferBean traceTransferBean = TraceDataEncoder.encoderFromContextBean(context); Assert.assertEquals(traceTransferBean.getTransData(), traceData); Assert.assertEquals(traceTransferBean.getTransKey().size(), 2); }
public Canvas canvas() { Canvas canvas = new Canvas(getLowerBound(), getUpperBound()); canvas.add(this); if (name != null) { canvas.setTitle(name); } return canvas; }
@Test public void testQQ() throws Exception { System.out.println("QQ"); var gauss = new GaussianDistribution(0.0, 1.0); var data = DoubleStream.generate(gauss::rand).limit(1000).toArray(); QQPlot.of(data).canvas().window(); }
public static String escapeString(String identifier) { return "'" + identifier.replace("\\", "\\\\").replace("'", "\\'") + "'"; }
@Test public void testEscapeStringWithSpecialCharacters() { assertEquals( "'a\\'\\'sdasd\\' \\\\asd\\' \\\\ad\\''", SingleStoreUtil.escapeString("a''sdasd' \\asd' \\ad'")); }
@Override protected void service( HttpServletRequest req, HttpServletResponse resp ) throws IOException { if ( isJettyMode() && !req.getContextPath().endsWith( getContextPath() ) ) { return; } if ( log.isDebug() ) { logDebug( getService() ); } handleRequest( new CarteRequestImpl( req, resp ) ); }
@Test public void testService() throws Exception { when( req.getContextPath() ).thenReturn( "/Path" ); when( baseCartePlugin.getContextPath() ).thenReturn( "/Path" ); when( log.isDebug() ).thenReturn( true ); baseCartePlugin.service( req, resp ); verify( log ).logDebug( baseCartePlugin.getService() ); verify( baseCartePlugin ).handleRequest( carteReqCaptor.capture() ); CarteRequestHandler.CarteRequest carteRequest = carteReqCaptor.getValue(); testCarteRequest( carteRequest ); testCarteResponse( carteRequest.respond( 200 ) ); }
@Override @Transactional public boolean checkForPreApproval(Long userId, Integer userType, String clientId, Collection<String> requestedScopes) { // 第一步,基于 Client 的自动授权计算,如果 scopes 都在自动授权中,则返回 true 通过 OAuth2ClientDO clientDO = oauth2ClientService.validOAuthClientFromCache(clientId); Assert.notNull(clientDO, "客户端不能为空"); // 防御性编程 if (CollUtil.containsAll(clientDO.getAutoApproveScopes(), requestedScopes)) { // gh-877 - if all scopes are auto approved, approvals still need to be added to the approval store. LocalDateTime expireTime = LocalDateTime.now().plusSeconds(TIMEOUT); for (String scope : requestedScopes) { saveApprove(userId, userType, clientId, scope, true, expireTime); } return true; } // 第二步,算上用户已经批准的授权。如果 scopes 都包含,则返回 true List<OAuth2ApproveDO> approveDOs = getApproveList(userId, userType, clientId); Set<String> scopes = convertSet(approveDOs, OAuth2ApproveDO::getScope, OAuth2ApproveDO::getApproved); // 只保留未过期的 + 同意的 return CollUtil.containsAll(scopes, requestedScopes); }
@Test public void checkForPreApproval_approve() { // 准备参数 Long userId = randomLongId(); Integer userType = randomEle(UserTypeEnum.values()).getValue(); String clientId = randomString(); List<String> requestedScopes = Lists.newArrayList("read"); // mock 方法 when(oauth2ClientService.validOAuthClientFromCache(eq(clientId))) .thenReturn(randomPojo(OAuth2ClientDO.class).setAutoApproveScopes(null)); // mock 数据 OAuth2ApproveDO approve = randomPojo(OAuth2ApproveDO.class).setUserId(userId) .setUserType(userType).setClientId(clientId).setScope("read") .setExpiresTime(LocalDateTimeUtil.offset(LocalDateTime.now(), 1L, ChronoUnit.DAYS)).setApproved(true); // 同意 oauth2ApproveMapper.insert(approve); // 调用 boolean success = oauth2ApproveService.checkForPreApproval(userId, userType, clientId, requestedScopes); // 断言 assertTrue(success); }
public static UriTemplate create(String template, Charset charset) { return new UriTemplate(template, true, charset); }
@Test void literalTemplateWithQueryString() { String template = "https://api.example.com?wsdl"; UriTemplate uriTemplate = UriTemplate.create(template, Util.UTF_8); String expanded = uriTemplate.expand(Collections.emptyMap()); assertThat(expanded).isEqualToIgnoringCase("https://api.example.com?wsdl"); }
@Override public void acceleratorChanged(AFreeplaneAction action, KeyStroke oldStroke, KeyStroke newStroke) { for (Entry entry : entries.entries(action)) { Object component = new EntryAccessor().getComponent(entry); if (component instanceof JMenu) component = ((JMenu) component).getPopupMenu().getComponent(0); if (component instanceof JMenuItem) ((JMenuItem) component).setAccelerator(newStroke); } }
@Test public void setsKeystroke() throws Exception { final EntriesForAction entriesForAction = new EntriesForAction(); final MenuAcceleratorChangeListener menuAcceleratorChangeListener = new MenuAcceleratorChangeListener(entriesForAction); final AFreeplaneAction action = mock(AFreeplaneAction.class); Entry actionEntry = new Entry(); final JMenuItem menu = new JMenuItem(); new EntryAccessor().setComponent(actionEntry, menu); entriesForAction.registerEntry(action, actionEntry); final KeyStroke keyStroke = KeyStroke.getKeyStroke(KeyEvent.VK_INSERT, 0); menuAcceleratorChangeListener.acceleratorChanged(action, null, keyStroke); assertThat(menu.getAccelerator(), equalTo(keyStroke)); }
@Override public void run() { try { backgroundJobServer.getJobSteward().notifyThreadOccupied(); MDCMapper.loadMDCContextFromJob(job); performJob(); } catch (Exception e) { if (isJobDeletedWhileProcessing(e)) { // nothing to do anymore as Job is deleted return; } else if (isJobServerStopped(e)) { updateJobStateToFailedAndRunJobFilters("Job processing was stopped as background job server has stopped", e); Thread.currentThread().interrupt(); } else if (isJobNotFoundException(e)) { updateJobStateToFailedAndRunJobFilters("Job method not found", e); } else { updateJobStateToFailedAndRunJobFilters("An exception occurred during the performance of the job", e); } } finally { backgroundJobServer.getJobSteward().notifyThreadIdle(); MDC.clear(); } }
@Test void onFailureIllegalJobStateChangeIsThrown() throws Exception { Job job = anEnqueuedJob().build(); mockBackgroundJobRunner(job, jobFromStorage -> { jobFromStorage.succeeded(); jobFromStorage.succeeded(); //to throw exception that will bring it to failed state }); BackgroundJobPerformer backgroundJobPerformer = new BackgroundJobPerformer(backgroundJobServer, job); assertThatThrownBy(() -> backgroundJobPerformer.run()) .isInstanceOf(IllegalJobStateChangeException.class); }
public static ScalarOperator convertPredicate(ScalarOperator predicate, ExpressionRangePartitionInfoV2 exprRangePartitionInfo, Map<ColumnId, Column> idToColumn) { // Currently only one partition column is supported if (exprRangePartitionInfo.getPartitionExprsSize() != 1) { return predicate; } Expr firstPartitionExpr = exprRangePartitionInfo.getPartitionExprs(idToColumn).get(0); Expr predicateExpr = firstPartitionExpr.clone(); // only support binary predicate if (predicate instanceof BinaryPredicateOperator && predicate.getChild(0) instanceof ColumnRefOperator && predicate.getChild(1) instanceof ConstantOperator) { List<ScalarOperator> argument = predicate.getChildren(); ColumnRefOperator columnRef = (ColumnRefOperator) argument.get(0); ConstantOperator constant = (ConstantOperator) argument.get(1); boolean success = rewritePredicate(predicateExpr, columnRef, constant); if (!success) { return predicate; } ScalarOperator translate = SqlToScalarOperatorTranslator.translate(predicateExpr); CallOperator callOperator = AnalyzerUtils.getCallOperator(translate); if (callOperator == null) { return predicate; } ScalarOperator evaluation = ScalarOperatorEvaluator.INSTANCE.evaluation(callOperator); if (!(evaluation instanceof ConstantOperator)) { return predicate; } predicate = predicate.clone(); ConstantOperator result = (ConstantOperator) evaluation; Optional<ConstantOperator> castResult = result.castTo(predicateExpr.getType()); if (!castResult.isPresent()) { return predicate; } result = castResult.get(); predicate.setChild(1, result); } return predicate; }
@Test public void testConvertPredicate() { List<ScalarOperator> argument = Lists.newArrayList(); ColumnRefOperator columnRefOperator = new ColumnRefOperator(1, Type.VARCHAR, "bill_code", false); ConstantOperator constantOperator = new ConstantOperator("JT2921712368984", Type.VARCHAR); argument.add(columnRefOperator); argument.add(constantOperator); ScalarOperator predicate = new BinaryPredicateOperator(BinaryType.EQ, argument); Table table = GlobalStateMgr.getCurrentState().getDb("test").getTable("bill_detail"); ExpressionRangePartitionInfoV2 partitionInfo = (ExpressionRangePartitionInfoV2) ((OlapTable) table).getPartitionInfo(); ScalarOperator afterConvert = ColumnFilterConverter.convertPredicate(predicate, partitionInfo, table.getIdToColumn()); Assert.assertEquals(2921712368984L, ((ConstantOperator) afterConvert.getChild(1)).getValue()); }
@VisibleForTesting static List<Tuple2<ConfigGroup, String>> generateTablesForClass( Class<?> optionsClass, Collection<OptionWithMetaInfo> optionWithMetaInfos) { ConfigGroups configGroups = optionsClass.getAnnotation(ConfigGroups.class); List<OptionWithMetaInfo> allOptions = selectOptionsToDocument(optionWithMetaInfos); if (allOptions.isEmpty()) { return Collections.emptyList(); } List<Tuple2<ConfigGroup, String>> tables; if (configGroups != null) { tables = new ArrayList<>(configGroups.groups().length + 1); Tree tree = new Tree(configGroups.groups(), allOptions); for (ConfigGroup group : configGroups.groups()) { List<OptionWithMetaInfo> configOptions = tree.findConfigOptions(group); if (!configOptions.isEmpty()) { sortOptions(configOptions); tables.add(Tuple2.of(group, toHtmlTable(configOptions))); } } List<OptionWithMetaInfo> configOptions = tree.getDefaultOptions(); if (!configOptions.isEmpty()) { sortOptions(configOptions); tables.add(Tuple2.of(null, toHtmlTable(configOptions))); } } else { sortOptions(allOptions); tables = Collections.singletonList(Tuple2.of(null, toHtmlTable(allOptions))); } return tables; }
@Test void testLongestPrefixMatching() { final List<Tuple2<ConfigGroup, String>> tables = ConfigOptionsDocGenerator.generateTablesForClass( TestConfigPrefix.class, ConfigurationOptionLocator.extractConfigOptions(TestConfigPrefix.class)); assertThat(tables).hasSize(3); final Map<String, String> tablesConverted = new HashMap<>(tables.size()); for (final Tuple2<ConfigGroup, String> table : tables) { tablesConverted.put(table.f0 != null ? table.f0.name() : "default", table.f1); } assertThat(tablesConverted.get("group1")) .contains("a.b.option", "a.b.c.option", "a.b.c.e.option") .doesNotContain("a.b.c.d.option"); assertThat(tablesConverted.get("group2")).contains("a.b.c.d.option"); assertThat(tablesConverted.get("default")).contains("a.option", "a.c.b.option"); }
@VisibleForTesting Optional<ImagesAndRegistryClient> tryMirrors( BuildContext buildContext, ProgressEventDispatcher.Factory progressDispatcherFactory) throws LayerCountMismatchException, BadContainerConfigurationFormatException { EventHandlers eventHandlers = buildContext.getEventHandlers(); Collection<Map.Entry<String, String>> mirrorEntries = buildContext.getRegistryMirrors().entries(); try (ProgressEventDispatcher progressDispatcher1 = progressDispatcherFactory.create("trying mirrors", mirrorEntries.size()); TimerEventDispatcher ignored1 = new TimerEventDispatcher(eventHandlers, "trying mirrors")) { for (Map.Entry<String, String> entry : mirrorEntries) { String registry = entry.getKey(); String mirror = entry.getValue(); eventHandlers.dispatch(LogEvent.debug("mirror config: " + registry + " --> " + mirror)); if (!buildContext.getBaseImageConfiguration().getImageRegistry().equals(registry)) { progressDispatcher1.dispatchProgress(1); continue; } eventHandlers.dispatch(LogEvent.info("trying mirror " + mirror + " for the base image")); try (ProgressEventDispatcher progressDispatcher2 = progressDispatcher1.newChildProducer().create("trying mirror " + mirror, 2)) { RegistryClient registryClient = buildContext.newBaseImageRegistryClientFactory(mirror).newRegistryClient(); List<Image> images = pullPublicImages(registryClient, progressDispatcher2); eventHandlers.dispatch(LogEvent.info("pulled manifest from mirror " + mirror)); return Optional.of(new ImagesAndRegistryClient(images, registryClient)); } catch (IOException | RegistryException ex) { // Ignore errors from this mirror and continue. eventHandlers.dispatch( LogEvent.debug( "failed to get manifest from mirror " + mirror + ": " + ex.getMessage())); } } return Optional.empty(); } }
@Test public void testTryMirrors_multipleMirrors() throws LayerCountMismatchException, BadContainerConfigurationFormatException, IOException, RegistryException, InvalidImageReferenceException { Mockito.when(imageConfiguration.getImage()).thenReturn(ImageReference.parse("registry/repo")); Mockito.when(imageConfiguration.getImageRegistry()).thenReturn("registry"); Mockito.when(buildContext.getRegistryMirrors()) .thenReturn(ImmutableListMultimap.of("registry", "quay.io", "registry", "gcr.io")); Mockito.when(buildContext.newBaseImageRegistryClientFactory("quay.io")) .thenReturn(registryClientFactory); Mockito.when(registryClient.pullManifest(Mockito.any())) .thenThrow(new RegistryException("not found")); Mockito.when(containerConfig.getPlatforms()) .thenReturn(ImmutableSet.of(new Platform("amd64", "linux"))); RegistryClient.Factory gcrRegistryClientFactory = setUpWorkingRegistryClientFactoryWithV22ManifestTemplate(); Mockito.when(buildContext.newBaseImageRegistryClientFactory("gcr.io")) .thenReturn(gcrRegistryClientFactory); Optional<ImagesAndRegistryClient> result = pullBaseImageStep.tryMirrors(buildContext, progressDispatcherFactory); Assert.assertTrue(result.isPresent()); Assert.assertEquals(gcrRegistryClientFactory.newRegistryClient(), result.get().registryClient); InOrder inOrder = Mockito.inOrder(eventHandlers); inOrder .verify(eventHandlers) .dispatch(LogEvent.info("trying mirror quay.io for the base image")); inOrder .verify(eventHandlers) .dispatch(LogEvent.debug("failed to get manifest from mirror quay.io: not found")); inOrder .verify(eventHandlers) .dispatch(LogEvent.info("trying mirror gcr.io for the base image")); inOrder.verify(eventHandlers).dispatch(LogEvent.info("pulled manifest from mirror gcr.io")); }
@Override public List<LogicalSlot> peakSlotsToAllocate(SlotTracker slotTracker) { updateOptionsPeriodically(); List<LogicalSlot> slotsToAllocate = Lists.newArrayList(); int curNumAllocatedSmallSlots = numAllocatedSmallSlots; for (SlotContext slotContext : requiringSmallSlots.values()) { LogicalSlot slot = slotContext.getSlot(); if (curNumAllocatedSmallSlots + slot.getNumPhysicalSlots() > opts.v2().getTotalSmallSlots()) { break; } requiringQueue.remove(slotContext); slotsToAllocate.add(slot); slotContext.setAllocateAsSmallSlot(); curNumAllocatedSmallSlots += slot.getNumPhysicalSlots(); } int numAllocatedSlots = slotTracker.getNumAllocatedSlots() - numAllocatedSmallSlots; while (!requiringQueue.isEmpty()) { SlotContext slotContext = requiringQueue.peak(); if (!isGlobalSlotAvailable(numAllocatedSlots, slotContext.getSlot())) { break; } requiringQueue.poll(); slotsToAllocate.add(slotContext.getSlot()); numAllocatedSlots += slotContext.getSlot().getNumPhysicalSlots(); } return slotsToAllocate; }
@Test public void testHeadLineBlocking1() { QueryQueueOptions opts = QueryQueueOptions.createFromEnv(); SlotSelectionStrategyV2 strategy = new SlotSelectionStrategyV2(); SlotTracker slotTracker = new SlotTracker(ImmutableList.of(strategy)); LogicalSlot slot1 = generateSlot(opts.v2().getTotalSlots() / 2 + 1); LogicalSlot slot2 = generateSlot(opts.v2().getTotalSlots() / 2); LogicalSlot slot3 = generateSlot(2); // 1. Require and allocate slot1. slotTracker.requireSlot(slot1); assertThat(strategy.peakSlotsToAllocate(slotTracker)).containsExactly(slot1); slotTracker.allocateSlot(slot1); // 2. Require slot2. slotTracker.requireSlot(slot2); assertThat(strategy.peakSlotsToAllocate(slotTracker)).isEmpty(); // 3. Require enough small slots to make its priority lower. { List<LogicalSlot> smallSlots = IntStream.range(0, 10) .mapToObj(i -> generateSlot(2)) .collect(Collectors.toList()); smallSlots.forEach(slotTracker::requireSlot); for (int numPeakedSmallSlots = 0; numPeakedSmallSlots < 10; ) { List<LogicalSlot> peakSlots = strategy.peakSlotsToAllocate(slotTracker); numPeakedSmallSlots += peakSlots.size(); peakSlots.forEach(slotTracker::allocateSlot); peakSlots.forEach(slot -> assertThat(slotTracker.releaseSlot(slot.getSlotId())).isSameAs(slot)); } } // Try peak the only rest slot2, but it is blocked by slot1. assertThat(strategy.peakSlotsToAllocate(slotTracker)).isEmpty(); // 4. slot3 cannot be peaked because it is blocked by slot2. slotTracker.requireSlot(slot3); assertThat(strategy.peakSlotsToAllocate(slotTracker)).isEmpty(); // 5. slot3 can be peaked after releasing the pending slot2. assertThat(slotTracker.releaseSlot(slot2.getSlotId())).isSameAs(slot2); assertThat(strategy.peakSlotsToAllocate(slotTracker)).containsExactly(slot3); slotTracker.allocateSlot(slot3); assertThat(slotTracker.releaseSlot(slot3.getSlotId())).isSameAs(slot3); }
@Override public void processResource(EfestoCompilationContext context, EfestoResource... toProcess) { for (EfestoResource efestoResource : toProcess) { processResourceWithContext(efestoResource, context); } }
@Test void processResources() { EfestoCompilationContext context = EfestoCompilationContextUtils.buildWithParentClassLoader(CompilationManager.class.getClassLoader()); List<AbstractMockOutput> toProcess = new ArrayList<>(); MANAGED_Efesto_RESOURCES.forEach(managedResource -> { try { AbstractMockOutput toAdd = managedResource.getDeclaredConstructor().newInstance(); toProcess.add(toAdd); } catch (Exception e) { fail(e); } }); toProcess.add(new MockEfestoRedirectOutputD()); compilationManager.processResource(context, toProcess.toArray(new EfestoResource[0])); assertThat(context.getGeneratedResourcesMap()).hasSize(1); IndexFile indexFile = null; try { Map<String, IndexFile> indexFiles = context.createIndexFiles(TARGET_TEST_CLASSES_DIRECTORY); assertThat(indexFiles).hasSize(1); indexFile = indexFiles.get("mock"); assertThat(indexFile).exists(); } finally { if (indexFile != null) { indexFile.delete(); } } }
@SuppressWarnings({"unchecked", "rawtypes"}) public Collection<DataNode> getDataNodes(final String tableName) { Collection<DataNode> result = getDataNodesByTableName(tableName); if (result.isEmpty()) { return result; } for (Entry<ShardingSphereRule, DataNodeBuilder> entry : dataNodeBuilders.entrySet()) { result = entry.getValue().build(result, entry.getKey()); } return result; }
@Test void assertGetDataNodesForShardingTableWithDataNodeContainedRuleWithoutDataSourceContainedRule() { DataNodes dataNodes = new DataNodes(mockDataNodeRules()); Collection<DataNode> actual = dataNodes.getDataNodes("t_order"); assertThat(actual.size(), is(2)); Iterator<DataNode> iterator = actual.iterator(); DataNode firstDataNode = iterator.next(); assertThat(firstDataNode.getDataSourceName(), is("readwrite_ds")); assertThat(firstDataNode.getTableName(), is("t_order_0")); DataNode secondDataNode = iterator.next(); assertThat(secondDataNode.getDataSourceName(), is("readwrite_ds")); assertThat(secondDataNode.getTableName(), is("t_order_1")); }