focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static <T> Collection<T> union(Collection<T> coll1, Collection<T> coll2) { if (isEmpty(coll1) && isEmpty(coll2)) { return new ArrayList<>(); } if (isEmpty(coll1)) { return new ArrayList<>(coll2); } else if (isEmpty(coll2)) { return new ArrayList<>(coll1); } final ArrayList<T> list = new ArrayList<>(Math.max(coll1.size(), coll2.size())); final Map<T, Integer> map1 = countMap(coll1); final Map<T, Integer> map2 = countMap(coll2); final Set<T> elts = newHashSet(coll2); elts.addAll(coll1); int m; for (T t : elts) { m = Math.max(Convert.toInt(map1.get(t), 0), Convert.toInt(map2.get(t), 0)); for (int i = 0; i < m; i++) { list.add(t); } } return list; }
@Test public void unionTest() { final ArrayList<String> list1 = CollUtil.newArrayList("a", "b", "b", "c", "d", "x"); final ArrayList<String> list2 = CollUtil.newArrayList("a", "b", "b", "b", "c", "d"); final Collection<String> union = CollUtil.union(list1, list2); assertEquals(3, CollUtil.count(union, "b"::equals)); }
@Override public Collection<LocalDataQueryResultRow> getRows(final ShowComputeNodesStatement sqlStatement, final ContextManager contextManager) { String modeType = contextManager.getComputeNodeInstanceContext().getModeConfiguration().getType(); return "Standalone".equals(modeType) ? Collections.singleton(buildRow(contextManager.getComputeNodeInstanceContext().getInstance(), modeType)) : contextManager.getComputeNodeInstanceContext().getAllClusterInstances().stream().map(each -> buildRow(each, modeType)).collect(Collectors.toList()); }
@Test void assertExecuteWithClusterMode() { ShowComputeNodesExecutor executor = new ShowComputeNodesExecutor(); ContextManager contextManager = mock(ContextManager.class); ComputeNodeInstanceContext computeNodeInstanceContext = createClusterInstanceContext(); when(contextManager.getComputeNodeInstanceContext()).thenReturn(computeNodeInstanceContext); Collection<LocalDataQueryResultRow> actual = executor.getRows(mock(ShowComputeNodesStatement.class), contextManager); assertThat(actual.size(), is(1)); LocalDataQueryResultRow row = actual.iterator().next(); assertThat(row.getCell(1), is("foo")); assertThat(row.getCell(2), is("PROXY")); assertThat(row.getCell(3), is("127.0.0.1")); assertThat(row.getCell(4), is("3309")); assertThat(row.getCell(5), is("OK")); assertThat(row.getCell(6), is("Cluster")); assertThat(row.getCell(7), is("1")); assertThat(row.getCell(8), is("")); assertThat(row.getCell(9), is("foo_version")); }
@Override public Duration convert(String source) { try { if (ISO8601.matcher(source).matches()) { return Duration.parse(source); } Matcher matcher = SIMPLE.matcher(source); Assert.state(matcher.matches(), "'" + source + "' is not a valid duration"); long amount = Long.parseLong(matcher.group(1)); ChronoUnit unit = getUnit(matcher.group(2)); return Duration.of(amount, unit); } catch (Exception ex) { throw new IllegalStateException("'" + source + "' is not a valid duration", ex); } }
@Test public void convertWhenSimpleMicrosShouldReturnDuration() { assertThat(convert("10us")).isEqualTo(Duration.ofNanos(10000)); assertThat(convert("10US")).isEqualTo(Duration.ofNanos(10000)); assertThat(convert("+10us")).isEqualTo(Duration.ofNanos(10000)); assertThat(convert("-10us")).isEqualTo(Duration.ofNanos(-10000)); }
public List<MountTable> getMounts(final String path) throws IOException { verifyMountTable(); return getTreeValues(RouterAdmin.normalizeFileSystemPath(path), false); }
@Test public void testGetMounts() throws IOException { // Check listing the mount table records at or beneath a path List<MountTable> records = mountTable.getMounts("/"); assertEquals(10, records.size()); compareRecords(records, new String[] {"/", "/tmp", "/user", "/usr/bin", "user/a", "/user/a/demo/a", "/user/a/demo/b", "/user/b/file1.txt", "readonly", "multi"}); records = mountTable.getMounts("/user"); assertEquals(5, records.size()); compareRecords(records, new String[] {"/user", "/user/a/demo/a", "/user/a/demo/b", "user/a", "/user/b/file1.txt"}); records = mountTable.getMounts("/user/a"); assertEquals(3, records.size()); compareRecords(records, new String[] {"/user/a/demo/a", "/user/a/demo/b", "/user/a"}); records = mountTable.getMounts("/tmp"); assertEquals(1, records.size()); compareRecords(records, new String[] {"/tmp"}); records = mountTable.getMounts("/readonly"); assertEquals(1, records.size()); compareRecords(records, new String[] {"/readonly"}); assertTrue(records.get(0).isReadOnly()); records = mountTable.getMounts("/multi"); assertEquals(1, records.size()); compareRecords(records, new String[] {"/multi"}); }
@VisibleForTesting static void findStartPointLoop(Position p, long start, long end, CompletableFuture<Long> promise, AsyncLoadingCache<Long, MessageIdData> cache) { long midpoint = start + ((end - start) / 2); CompletableFuture<MessageIdData> startEntry = cache.get(start); CompletableFuture<MessageIdData> middleEntry = cache.get(midpoint); CompletableFuture<MessageIdData> endEntry = cache.get(end); CompletableFuture.allOf(startEntry, middleEntry, endEntry).thenRun( () -> { if (comparePositionAndMessageId(p, startEntry.join()) <= 0) { promise.complete(start); } else if (comparePositionAndMessageId(p, middleEntry.join()) <= 0) { findStartPointLoop(p, start + 1, midpoint, promise, cache); } else if (comparePositionAndMessageId(p, endEntry.join()) <= 0) { findStartPointLoop(p, midpoint + 1, end, promise, cache); } else { promise.complete(NEWER_THAN_COMPACTED); } }).exceptionally((exception) -> { promise.completeExceptionally(exception); return null; }); }
@Test public void testRecursionNumberOfFindStartPointLoop() { AtomicLong bingoMarker = new AtomicLong(); long start = 0; long end = 100; long targetMessageId = 1; // Mock cache. AsyncLoadingCache<Long, MessageIdData> cache = Caffeine.newBuilder() .buildAsync(mockCacheLoader(start, end, targetMessageId, bingoMarker)); AtomicInteger invokeCounterOfCacheGet = new AtomicInteger(); AsyncLoadingCache<Long, MessageIdData> cacheWithCounter = spy(cache); doAnswer(invocation -> { invokeCounterOfCacheGet.incrementAndGet(); return cache.get((Long) invocation.getArguments()[0]); }).when(cacheWithCounter).get(anyLong()); // Because when "findStartPointLoop(...)" is executed, will trigger "cache.get()" three times, including // "cache.get(start)", "cache.get(mid)" and "cache.get(end)". Therefore, we can calculate the count of // executed "findStartPointLoop". Supplier<Integer> loopCounter = () -> invokeCounterOfCacheGet.get() / 3; // Do test. Position targetPosition = PositionFactory.create(DEFAULT_LEDGER_ID, targetMessageId); CompletableFuture<Long> promise = new CompletableFuture<>(); CompactedTopicImpl.findStartPointLoop(targetPosition, start, end, promise, cacheWithCounter); // Do verify. promise.join(); assertEquals(loopCounter.get().intValue(), 2); }
@Override public String toString() { return "StreamSerializerAdapter{serializer=" + serializer + '}'; }
@Test public void testString() { assertNotNull(adapter.toString()); }
@Override public Set<KubevirtLoadBalancer> loadBalancers() { return ImmutableSet.copyOf(kubevirtLoadBalancerStore.loadBalancers()); }
@Test public void testGetLoadBalancers() { createBasicLoadBalancers(); assertEquals("Number of load balancers did not match", 1, target.loadBalancers().size()); }
static void closeStateManager(final Logger log, final String logPrefix, final boolean closeClean, final boolean eosEnabled, final ProcessorStateManager stateMgr, final StateDirectory stateDirectory, final TaskType taskType) { // if EOS is enabled, wipe out the whole state store for unclean close since it is now invalid final boolean wipeStateStore = !closeClean && eosEnabled; final TaskId id = stateMgr.taskId(); log.trace("Closing state manager for {} task {}", taskType, id); final AtomicReference<ProcessorStateException> firstException = new AtomicReference<>(null); try { if (stateDirectory.lock(id)) { try { stateMgr.close(); } catch (final ProcessorStateException e) { firstException.compareAndSet(null, e); } finally { try { if (wipeStateStore) { log.debug("Wiping state stores for {} task {}", taskType, id); // we can just delete the whole dir of the task, including the state store images and the checkpoint files, // and then we write an empty checkpoint file indicating that the previous close is graceful and we just // need to re-bootstrap the restoration from the beginning Utils.delete(stateMgr.baseDir()); } } finally { stateDirectory.unlock(id); } } } else { log.error("Failed to acquire lock while closing the state store for {} task {}", taskType, id); } } catch (final IOException e) { final ProcessorStateException exception = new ProcessorStateException( String.format("%sFatal error while trying to close the state manager for task %s", logPrefix, id), e ); firstException.compareAndSet(null, exception); } final ProcessorStateException exception = firstException.get(); if (exception != null) { throw exception; } }
@Test public void shouldNotCloseStateManagerIfUnableToLockTaskDirectory() { final InOrder inOrder = inOrder(stateManager, stateDirectory); when(stateManager.taskId()).thenReturn(taskId); when(stateDirectory.lock(taskId)).thenReturn(false); StateManagerUtil.closeStateManager( logger, "logPrefix:", true, false, stateManager, stateDirectory, TaskType.ACTIVE); inOrder.verify(stateManager).taskId(); inOrder.verify(stateDirectory).lock(taskId); verify(stateManager, never()).close(); verify(stateManager, never()).baseDir(); verify(stateDirectory, never()).unlock(taskId); verifyNoMoreInteractions(stateManager, stateDirectory); }
@Override public void updatePort(Port osPort) { checkNotNull(osPort, ERR_NULL_PORT); checkArgument(!Strings.isNullOrEmpty(osPort.getId()), ERR_NULL_PORT_ID); checkArgument(!Strings.isNullOrEmpty(osPort.getNetworkId()), ERR_NULL_PORT_NET_ID); osNetworkStore.updatePort(osPort); log.info(String.format(MSG_PORT, osPort.getId(), MSG_UPDATED)); }
@Test(expected = IllegalArgumentException.class) public void testUpdatePortWithNullNetworkId() { final Port testPort = NeutronPort.builder().build(); testPort.setId(PORT_ID); target.updatePort(testPort); }
public void mergeRuntimeUpdate( List<TimelineEvent> pendingTimeline, Map<String, Artifact> pendingArtifacts) { if (timeline.addAll(pendingTimeline)) { synced = false; } if (pendingArtifacts != null && !pendingArtifacts.isEmpty()) { for (Map.Entry<String, Artifact> entry : pendingArtifacts.entrySet()) { String key = entry.getKey(); if (!entry.getValue().equals(artifacts.get(key))) { if (artifacts.containsKey(key) && artifacts.get(key).getType() == Artifact.Type.DEFAULT && entry.getValue().getType() == Artifact.Type.DEFAULT) { artifacts.get(key).asDefault().getData().putAll(entry.getValue().asDefault().getData()); } else { artifacts.put(entry.getKey(), entry.getValue()); } synced = false; } } } if (!synced) { runtimeState.setModifyTime(System.currentTimeMillis()); } }
@Test public void testMergeDefaultArtifact() throws Exception { StepRuntimeSummary summary = loadObject( "fixtures/execution/sample-step-runtime-summary-1.json", StepRuntimeSummary.class); DefaultArtifact artifact = summary.getArtifacts().get("artifact1").asDefault(); assertEquals(1L, artifact.getValue()); assertEquals("bar", artifact.getField("foo")); Map<String, Artifact> artifacts = new LinkedHashMap<>(); DefaultArtifact artifact1 = new DefaultArtifact(); artifact1.setValue(12L); artifact1.add("value", 123L); artifact1.add("bar", true); artifact1.add("baz", 123L); artifacts.put("artifact1", artifact1); assertTrue(summary.isSynced()); summary.mergeRuntimeUpdate(null, artifacts); assertFalse(summary.isSynced()); String ser1 = MAPPER.writeValueAsString(summary); StepRuntimeSummary actual = MAPPER.readValue(ser1, StepRuntimeSummary.class); String ser2 = MAPPER.writeValueAsString(actual); assertEquals(summary, actual); assertEquals(ser1, ser2); artifact1 = summary.getArtifacts().get("artifact1").asDefault(); assertEquals(123L, artifact1.getValue()); assertEquals(123L, artifact1.getField("value")); assertEquals("bar", artifact1.getField("foo")); assertEquals(true, artifact1.getField("bar")); assertEquals(123L, artifact1.getField("baz")); }
protected boolean writeEndedLine() { boolean retval = false; try { String sLine = environmentSubstitute( meta.getEndedLine() ); if ( sLine != null ) { if ( sLine.trim().length() > 0 ) { data.writer.write( getBinaryString( sLine ) ); incrementLinesOutput(); } } } catch ( Exception e ) { logError( "Error writing ended tag line: " + e.toString() ); logError( Const.getStackTracker( e ) ); retval = true; } return retval; }
@Test public void testEndedLineVar() throws Exception { TextFileOutputData data = new TextFileOutputData(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); data.writer = baos; TextFileOutputMeta meta = new TextFileOutputMeta(); meta.setEndedLine( "${endvar}" ); meta.setDefault(); meta.setEncoding( StandardCharsets.UTF_8.name() ); stepMockHelper.stepMeta.setStepMetaInterface( meta ); TextFileOutput textFileOutput = new TextFileOutputTestHandler( stepMockHelper.stepMeta, data, 0, stepMockHelper.transMeta, stepMockHelper.trans ); textFileOutput.meta = meta; textFileOutput.data = data; textFileOutput.setVariable( "endvar", "this is the end" ); textFileOutput.writeEndedLine(); assertEquals( "this is the end", baos.toString( StandardCharsets.UTF_8.name() ) ); }
@Override public List<String> assignSegment(String segmentName, Map<String, Map<String, String>> currentAssignment, InstancePartitions instancePartitions, InstancePartitionsType instancePartitionsType) { int numPartitions = instancePartitions.getNumPartitions(); checkReplication(instancePartitions, _replication, _tableName); int partitionId; if (_partitionColumn == null || numPartitions == 1) { partitionId = 0; } else { // Uniformly spray the segment partitions over the instance partitions if (_tableConfig.getTableType() == TableType.OFFLINE) { partitionId = SegmentAssignmentUtils .getOfflineSegmentPartitionId(segmentName, _tableName, _helixManager, _partitionColumn) % numPartitions; } else { partitionId = SegmentAssignmentUtils .getRealtimeSegmentPartitionId(segmentName, _tableName, _helixManager, _partitionColumn) % numPartitions; } } return SegmentAssignmentUtils.assignSegmentWithReplicaGroup(currentAssignment, instancePartitions, partitionId); }
@Test public void testAssignSegmentWithoutPartition() { int numInstancesPerReplicaGroup = NUM_INSTANCES / NUM_REPLICAS; Map<String, Map<String, String>> currentAssignment = new TreeMap<>(); for (int segmentId = 0; segmentId < NUM_SEGMENTS; segmentId++) { String segmentName = SEGMENTS.get(segmentId); List<String> instancesAssigned = _segmentAssignmentWithoutPartition .assignSegment(segmentName, currentAssignment, _instancePartitionsMapWithoutPartition); assertEquals(instancesAssigned.size(), NUM_REPLICAS); // Segment 0 should be assigned to instance 0, 6, 12 // Segment 1 should be assigned to instance 1, 7, 13 // Segment 2 should be assigned to instance 2, 8, 14 // Segment 3 should be assigned to instance 3, 9, 15 // Segment 4 should be assigned to instance 4, 10, 16 // Segment 5 should be assigned to instance 5, 11, 17 // Segment 6 should be assigned to instance 0, 6, 12 // Segment 7 should be assigned to instance 1, 7, 13 // ... for (int replicaGroupId = 0; replicaGroupId < NUM_REPLICAS; replicaGroupId++) { int expectedAssignedInstanceId = segmentId % numInstancesPerReplicaGroup + replicaGroupId * numInstancesPerReplicaGroup; assertEquals(instancesAssigned.get(replicaGroupId), INSTANCES.get(expectedAssignedInstanceId)); } currentAssignment .put(segmentName, SegmentAssignmentUtils.getInstanceStateMap(instancesAssigned, SegmentStateModel.ONLINE)); } }
@ExceptionHandler({HttpMessageNotReadableException.class}) @ResponseStatus(HttpStatus.BAD_REQUEST) protected ResponseEntity<RestError> handleHttpMessageNotReadableException(HttpMessageNotReadableException httpMessageNotReadableException) { String exceptionMessage = getExceptionMessage(httpMessageNotReadableException); return new ResponseEntity<>(new RestError(exceptionMessage), HttpStatus.BAD_REQUEST); }
@Test public void handleHttpMessageNotReadableException_whenCauseIsNotInvalidFormatExceptionAndMessageIsNull_shouldUseEmptyStringAsMessage() { HttpMessageNotReadableException exception = new HttpMessageNotReadableException(null, (Exception) null); ResponseEntity<RestError> responseEntity = underTest.handleHttpMessageNotReadableException(exception); assertThat(responseEntity.getStatusCode()).isEqualTo(HttpStatus.BAD_REQUEST); assertThat(responseEntity.getBody().message()).isEmpty(); }
@Override public TagPosition findTag(int bucketIndex, int tag) { for (int slotIndex = 0; slotIndex < mTagsPerBucket; slotIndex++) { if (readTag(bucketIndex, slotIndex) == tag) { return new TagPosition(bucketIndex, slotIndex, CuckooStatus.OK); } } return new TagPosition(-1, -1, CuckooStatus.FAILURE_KEY_NOT_FOUND); }
@Test public void findTagTest() { CuckooTable cuckooTable = createCuckooTable(); Random random = new Random(); for (int i = 0; i < NUM_BUCKETS; i++) { for (int j = 0; j < TAGS_PER_BUCKET; j++) { int tag = random.nextInt(0xff); cuckooTable.writeTag(i, j, tag); assertEquals(CuckooStatus.OK, cuckooTable.findTag(i, tag).getStatus()); } } }
@Udf(description = "Returns the cotangent of an INT value") public Double cot( @UdfParameter( value = "value", description = "The value in radians to get the cotangent of." ) final Integer value ) { return cot(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleMoreThanPositive2Pi() { assertThat(udf.cot(9.1), closeTo(-2.9699983263892054, 0.000000000000001)); assertThat(udf.cot(6.3), closeTo(59.46619211372627, 0.000000000000001)); assertThat(udf.cot(7), closeTo(1.1475154224051356, 0.000000000000001)); assertThat(udf.cot(7L), closeTo(1.1475154224051356, 0.000000000000001)); }
@VisibleForTesting public void validateSmsTemplateCodeDuplicate(Long id, String code) { SmsTemplateDO template = smsTemplateMapper.selectByCode(code); if (template == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的字典类型 if (id == null) { throw exception(SMS_TEMPLATE_CODE_DUPLICATE, code); } if (!template.getId().equals(id)) { throw exception(SMS_TEMPLATE_CODE_DUPLICATE, code); } }
@Test public void testValidateDictDataValueUnique_valueDuplicateForUpdate() { // 准备参数 Long id = randomLongId(); String code = randomString(); // mock 数据 smsTemplateMapper.insert(randomSmsTemplateDO(o -> o.setCode(code))); // 调用,校验异常 assertServiceException(() -> smsTemplateService.validateSmsTemplateCodeDuplicate(id, code), SMS_TEMPLATE_CODE_DUPLICATE, code); }
@SuppressWarnings({"unchecked", "rawtypes"}) public static int compareTo(final Comparable thisValue, final Comparable otherValue, final OrderDirection orderDirection, final NullsOrderType nullsOrderType, final boolean caseSensitive) { if (null == thisValue && null == otherValue) { return 0; } if (null == thisValue) { return NullsOrderType.FIRST == nullsOrderType ? -1 : 1; } if (null == otherValue) { return NullsOrderType.FIRST == nullsOrderType ? 1 : -1; } if (!caseSensitive && thisValue instanceof String && otherValue instanceof String) { return compareToCaseInsensitiveString((String) thisValue, (String) otherValue, orderDirection); } return OrderDirection.ASC == orderDirection ? thisValue.compareTo(otherValue) : -thisValue.compareTo(otherValue); }
@Test void assertCompareToWhenSecondValueIsNullForOrderByDescAndNullsFirst() { assertThat(CompareUtils.compareTo(1, null, OrderDirection.DESC, NullsOrderType.FIRST, caseSensitive), is(1)); }
public static Optional<String> getVersion(final String ruleName, final String rulePath) { Pattern pattern = Pattern.compile(getVersionsNode(ruleName) + "/(\\d+)$", Pattern.CASE_INSENSITIVE); Matcher matcher = pattern.matcher(rulePath); return matcher.find() ? Optional.of(matcher.group(1)) : Optional.empty(); }
@Test void assertGetVersion() { Optional<String> actual = GlobalNodePath.getVersion("transaction", "/rules/transaction/versions/0"); assertTrue(actual.isPresent()); assertThat(actual.get(), is("0")); }
public static void checkProjectKey(String keyCandidate) { checkArgument(isValidProjectKey(keyCandidate), MALFORMED_KEY_MESSAGE, keyCandidate, ALLOWED_CHARACTERS_MESSAGE); }
@Test public void checkProjectKey_fail_if_key_is_empty() { assertThatThrownBy(() -> ComponentKeys.checkProjectKey("")) .isInstanceOf(IllegalArgumentException.class); }
@Override public Long del(byte[]... keys) { if (isQueueing() || isPipelined()) { for (byte[] key: keys) { write(key, LongCodec.INSTANCE, RedisCommands.DEL, key); } return null; } CommandBatchService es = new CommandBatchService(executorService); for (byte[] key: keys) { es.writeAsync(key, StringCodec.INSTANCE, RedisCommands.DEL, key); } BatchResult<Long> b = (BatchResult<Long>) es.execute(); return b.getResponses().stream().collect(Collectors.summarizingLong(v -> v)).getSum(); }
@Test public void testDel() { List<byte[]> keys = new ArrayList<>(); for (int i = 0; i < 10; i++) { byte[] key = ("test" + i).getBytes(); keys.add(key); connection.set(key, ("test" + i).getBytes()); } assertThat(connection.del(keys.toArray(new byte[0][]))).isEqualTo(10); }
public static CoordinatorRecord newOffsetCommitTombstoneRecord( String groupId, String topic, int partitionId ) { return new CoordinatorRecord( new ApiMessageAndVersion( new OffsetCommitKey() .setGroup(groupId) .setTopic(topic) .setPartition(partitionId), (short) 1 ), null ); }
@Test public void testNewOffsetCommitTombstoneRecord() { CoordinatorRecord expectedRecord = new CoordinatorRecord( new ApiMessageAndVersion( new OffsetCommitKey() .setGroup("group-id") .setTopic("foo") .setPartition(1), (short) 1), null); CoordinatorRecord record = GroupCoordinatorRecordHelpers.newOffsetCommitTombstoneRecord("group-id", "foo", 1); assertEquals(expectedRecord, record); }
public static boolean isMatchWithPrefix(final byte[] candidate, final byte[] expected, final int prefixLength) { if (candidate.length != expected.length) { return false; } if (candidate.length == 4) { final int mask = prefixLengthToIpV4Mask(prefixLength); return (toInt(candidate) & mask) == (toInt(expected) & mask); } else if (candidate.length == 16) { final long upperMask = prefixLengthToIpV6Mask(min(prefixLength, 64)); final long lowerMask = prefixLengthToIpV6Mask(max(prefixLength - 64, 0)); return (upperMask & toLong(candidate, 0)) == (upperMask & toLong(expected, 0)) && (lowerMask & toLong(candidate, 8)) == (lowerMask & toLong(expected, 8)); } throw new IllegalArgumentException("how many bytes does an IP address have again?"); }
@Test void shouldNotMatchIfLengthsAreDifferent() { assertFalse(isMatchWithPrefix(new byte[0], new byte[3], 0)); assertFalse(isMatchWithPrefix(new byte[1], new byte[2], 0)); assertFalse(isMatchWithPrefix(new byte[5], new byte[5000], 0)); }
@Override public ConfigInfo findConfigInfo(long id) { ConfigInfoMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO); final String sql = configInfoMapper.select( Arrays.asList("id", "data_id", "group_id", "tenant_id", "app_name", "content"), Collections.singletonList("id")); return databaseOperate.queryOne(sql, new Object[] {id}, CONFIG_INFO_ROW_MAPPER); }
@Test void testFindConfigInfoByDataId() { String dataId = "dataId4567"; String group = "group3456789"; String tenant = "tenant4567890"; ConfigInfoWrapper configInfoWrapper = new ConfigInfoWrapper(); configInfoWrapper.setDataId(dataId); configInfoWrapper.setGroup(group); configInfoWrapper.setTenant(tenant); Mockito.when(databaseOperate.queryOne(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_INFO_WRAPPER_ROW_MAPPER))) .thenReturn(configInfoWrapper); ConfigInfo configReturn = embeddedConfigInfoPersistService.findConfigInfo(dataId, group, tenant); assertEquals(dataId, configReturn.getDataId()); }
@Override public List<ServiceDTO> getServiceInstances(String serviceId) { try { List<Instance> instances = namingService.selectInstances(serviceId,true); List<ServiceDTO> serviceDTOList = Lists.newLinkedList(); instances.forEach(instance -> { ServiceDTO serviceDTO = this.toServiceDTO(instance, serviceId); serviceDTOList.add(serviceDTO); }); return serviceDTOList; } catch (NacosException ex) { logger.error(ex.getMessage(),ex); } return Collections.emptyList(); }
@Test public void testGetServiceInstances() throws Exception { String someIp = "1.2.3.4"; int somePort = 8080; String someInstanceId = "someInstanceId"; Instance someServiceInstance = mockServiceInstance(someInstanceId, someIp, somePort); when(nacosNamingService.selectInstances(someServiceId, true)).thenReturn( Lists.newArrayList(someServiceInstance)); List<ServiceDTO> serviceDTOList = nacosDiscoveryService.getServiceInstances(someServiceId); ServiceDTO serviceDTO = serviceDTOList.get(0); assertEquals(1, serviceDTOList.size()); assertEquals(someServiceId, serviceDTO.getAppName()); assertEquals("http://1.2.3.4:8080/", serviceDTO.getHomepageUrl()); }
public static Schema reassignIds(Schema schema, Schema idSourceSchema) { return reassignIds(schema, idSourceSchema, true); }
@Test public void testReassignIdsIllegalArgumentException() { Schema schema = new Schema( required(1, "a", Types.IntegerType.get()), required(2, "b", Types.IntegerType.get())); Schema sourceSchema = new Schema(required(1, "a", Types.IntegerType.get())); assertThatThrownBy(() -> TypeUtil.reassignIds(schema, sourceSchema)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Field b not found in source schema"); }
public LoginContext login() throws LoginException { LoginContext tmpLoginContext = loginContextFactory.createLoginContext(this); tmpLoginContext.login(); log.info("Successfully logged in."); loginContext = tmpLoginContext; subject = loginContext.getSubject(); expiringCredential = expiringCredential(); hasExpiringCredential = expiringCredential != null; if (!hasExpiringCredential) { // do not bother with re-logins. log.debug("No Expiring Credential"); principalName = null; refresherThread = null; return loginContext; } principalName = expiringCredential.principalName(); // Check for a clock skew problem long expireTimeMs = expiringCredential.expireTimeMs(); long nowMs = currentMs(); if (nowMs > expireTimeMs) { log.error( "[Principal={}]: Current clock: {} is later than expiry {}. This may indicate a clock skew problem." + " Check that this host's and remote host's clocks are in sync. Not starting refresh thread." + " This process is likely unable to authenticate SASL connections (for example, it is unlikely" + " to be able to authenticate a connection with a Kafka Broker).", principalLogText(), new Date(nowMs), new Date(expireTimeMs)); return loginContext; } if (log.isDebugEnabled()) log.debug("[Principal={}]: It is an expiring credential", principalLogText()); /* * Re-login periodically. How often is determined by the expiration date of the * credential and refresh-related configuration values. */ refresherThread = KafkaThread.daemon(String.format("kafka-expiring-relogin-thread-%s", principalName), new Refresher()); refresherThread.start(); loginContextFactory.refresherThreadStarted(); return loginContext; }
@Test public void testRefreshWithExpirationSmallerThanConfiguredBuffers() throws Exception { int numExpectedRefreshes = 1; boolean clientReloginAllowedBeforeLogout = true; final LoginContext mockLoginContext = mock(LoginContext.class); Subject subject = new Subject(); when(mockLoginContext.getSubject()).thenReturn(subject); MockTime mockTime = new MockTime(); long startMs = mockTime.milliseconds(); /* * Identify the lifetime of each expiring credential */ long lifetimeMinutes = 10L; /* * Identify the point at which refresh will occur in that lifetime */ long refreshEveryMinutes = 8L; /* * Set an absolute last refresh time that will cause the login thread to exit * after a certain number of re-logins (by adding an extra half of a refresh * interval). */ long absoluteLastRefreshMs = startMs + (1 + numExpectedRefreshes) * 1000 * 60 * refreshEveryMinutes - 1000 * 60 * refreshEveryMinutes / 2; /* * Identify buffer time on either side for the refresh algorithm that will cause * the entire lifetime to be taken up. In other words, make sure there is no way * to honor the buffers. */ short minPeriodSeconds = (short) (1 + lifetimeMinutes * 60 / 2); short bufferSeconds = minPeriodSeconds; /* * Define some listeners so we can keep track of who gets done and when. All * added listeners should end up done except the last, extra one, which should * not. */ MockScheduler mockScheduler = new MockScheduler(mockTime); List<KafkaFutureImpl<Long>> waiters = addWaiters(mockScheduler, 1000 * 60 * refreshEveryMinutes, numExpectedRefreshes + 1); // Create the ExpiringCredentialRefreshingLogin instance under test TestLoginContextFactory testLoginContextFactory = new TestLoginContextFactory(); TestExpiringCredentialRefreshingLogin testExpiringCredentialRefreshingLogin = new TestExpiringCredentialRefreshingLogin( refreshConfigThatPerformsReloginEveryGivenPercentageOfLifetime( 1.0 * refreshEveryMinutes / lifetimeMinutes, minPeriodSeconds, bufferSeconds, clientReloginAllowedBeforeLogout), testLoginContextFactory, mockTime, 1000 * 60 * lifetimeMinutes, absoluteLastRefreshMs, clientReloginAllowedBeforeLogout); testLoginContextFactory.configure(mockLoginContext, testExpiringCredentialRefreshingLogin); /* * Perform the login, wait up to a certain amount of time for the refresher * thread to exit, and make sure the correct calls happened at the correct times */ long expectedFinalMs = startMs + numExpectedRefreshes * 1000 * 60 * refreshEveryMinutes; assertFalse(testLoginContextFactory.refresherThreadStartedFuture().isDone()); assertFalse(testLoginContextFactory.refresherThreadDoneFuture().isDone()); testExpiringCredentialRefreshingLogin.login(); assertTrue(testLoginContextFactory.refresherThreadStartedFuture().isDone()); testLoginContextFactory.refresherThreadDoneFuture().get(1L, TimeUnit.SECONDS); assertEquals(expectedFinalMs, mockTime.milliseconds()); for (int i = 0; i < numExpectedRefreshes; ++i) { KafkaFutureImpl<Long> waiter = waiters.get(i); assertTrue(waiter.isDone()); assertEquals((i + 1) * 1000 * 60 * refreshEveryMinutes, waiter.get() - startMs); } assertFalse(waiters.get(numExpectedRefreshes).isDone()); InOrder inOrder = inOrder(mockLoginContext); inOrder.verify(mockLoginContext).login(); for (int i = 0; i < numExpectedRefreshes; ++i) { inOrder.verify(mockLoginContext).login(); inOrder.verify(mockLoginContext).logout(); } }
public File[] getFiles(File configRepoCheckoutDirectory, PartialConfigLoadContext context) { String pattern = defaultPattern; Configuration configuration = context.configuration(); if (configuration != null) { ConfigurationProperty explicitPattern = configuration.getProperty("pattern"); if (explicitPattern != null) { pattern = explicitPattern.getValue(); } } return getFiles(configRepoCheckoutDirectory, pattern); }
@Test public void shouldUseExplicitPatternWhenProvided() throws Exception { GoConfigMother mother = new GoConfigMother(); PipelineConfig pipe1 = mother.cruiseConfigWithOnePipelineGroup().getAllPipelineConfigs().get(0); File file1 = helper.addFileWithPipeline("pipe1.myextension", pipe1); File file2 = helper.addFileWithPipeline("pipe1.gcd.xml", pipe1); File file3 = helper.addFileWithPipeline("subdir/pipe1.gocd.xml", pipe1); File file4 = helper.addFileWithPipeline("subdir/sub/pipe1.gocd.xml", pipe1); PartialConfigLoadContext context = mock(PartialConfigLoadContext.class); Configuration configs = new Configuration(); configs.addNewConfigurationWithValue("pattern","*.myextension",false); when(context.configuration()).thenReturn(configs); File[] matchingFiles = xmlPartialProvider.getFiles(tmpFolder, context); File[] expected = new File[1]; expected[0] = file1; assertArrayEquals(expected,matchingFiles); }
public String findMsh18(byte[] hl7Message, Charset charset) { String answer = ""; if (hl7Message != null && hl7Message.length > 0) { List<Integer> fieldSeparatorIndexes = findFieldSeparatorIndicesInSegment(hl7Message, 0); if (fieldSeparatorIndexes.size() > 17) { int startOfMsh19 = fieldSeparatorIndexes.get(16) + 1; int length = fieldSeparatorIndexes.get(17) - fieldSeparatorIndexes.get(16) - 1; if (length > 0) { answer = new String(hl7Message, startOfMsh19, length, charset); } } } return answer; }
@Test public void testFindMsh18WhenMissingWithTrailingPipe() { final String testMessage = MSH_SEGMENT + "|||||||" + '\r' + REMAINING_SEGMENTS; assertEquals("", hl7util.findMsh18(testMessage.getBytes(), charset)); }
static Map<String, Object> javaSerializationFilterGenerator(JavaSerializationFilterConfig jsfConfig) { Map<String, Object> javaSerializationFilterCfg = new LinkedHashMap<>(); addNonNullToMap(javaSerializationFilterCfg, "defaults-disabled", jsfConfig.isDefaultsDisabled()); Map<String, Object> whiteListAsMap = classFilterGenerator(jsfConfig.getWhitelist()); Map<String, Object> blackListAsMap = classFilterGenerator(jsfConfig.getBlacklist()); javaSerializationFilterCfg.put("blacklist", blackListAsMap); javaSerializationFilterCfg.put("whitelist", whiteListAsMap); return javaSerializationFilterCfg; }
@Test public void testJavaSerializationConfig() { JavaSerializationFilterConfig jsfConfig = new JavaSerializationFilterConfig(); jsfConfig.setDefaultsDisabled(true); jsfConfig.setBlacklist(createClassFilter()); Map<String, Object> jsfAsMap = DynamicConfigYamlGenerator.javaSerializationFilterGenerator(jsfConfig); assertTrue((boolean) jsfAsMap.get("defaults-disabled")); assertClassFilterAsMap((Map<String, Object>) jsfAsMap.get("blacklist")); }
public static boolean isUrl(String text) { return IS_URL_TEST.matcher(text).matches(); }
@Test public void testIsUrl() { String text = "https://github.com"; assertTrue(UrlStringUtils.isUrl(text)); text = "simple text"; assertFalse(UrlStringUtils.isUrl(text)); }
@PUT @Path("{id}") @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) public Response updateFloatingIp(@PathParam("id") String id, InputStream input) throws IOException { log.trace(String.format(MESSAGE, "UPDATE " + id)); String inputStr = IOUtils.toString(input, REST_UTF8); if (!haService.isActive() && !DEFAULT_ACTIVE_IP_ADDRESS.equals(haService.getActiveIp())) { return syncPut(haService, FLOATING_IPS, id, inputStr); } final NeutronFloatingIP floatingIp = (NeutronFloatingIP) jsonToModelEntity(inputStr, NeutronFloatingIP.class); adminService.updateFloatingIp(floatingIp); return status(Response.Status.OK).build(); }
@Test public void testUpdateFloatingIpWithNonexistId() { expect(mockOpenstackHaService.isActive()).andReturn(true).anyTimes(); replay(mockOpenstackHaService); mockOpenstackRouterAdminService.updateFloatingIp(anyObject()); expectLastCall().andThrow(new IllegalArgumentException()); replay(mockOpenstackRouterAdminService); final WebTarget wt = target(); InputStream jsonStream = OpenstackFloatingIpWebResourceTest.class .getResourceAsStream("openstack-floatingip1.json"); Response response = wt.path(PATH + "/2f245a7b-796b-4f26-9cf9-9e82d248fda7") .request(MediaType.APPLICATION_JSON_TYPE) .put(Entity.json(jsonStream)); final int status = response.getStatus(); assertThat(status, is(400)); verify(mockOpenstackRouterAdminService); }
@Override public ScheduledReporter build(MetricRegistry registry) { GraphiteReporter.Builder builder = builder(registry); if ("udp".equalsIgnoreCase(transport)) { return builder.build(new GraphiteUDP(host, port)); } else { return builder.build(new Graphite(host, port)); } }
@Test void createDefaultFactory() throws Exception { final GraphiteReporterFactory factory = new YamlConfigurationFactory<>(GraphiteReporterFactory.class, BaseValidator.newValidator(), Jackson.newObjectMapper(), "dw") .build(); assertThat(factory.getFrequency()).isNotPresent(); }
@Override public AnalysisPhase getAnalysisPhase() { return ANALYSIS_PHASE; }
@Test public void testGetAnalysisPhase() { DependencyMergingAnalyzer instance = new DependencyMergingAnalyzer(); AnalysisPhase expResult = AnalysisPhase.POST_INFORMATION_COLLECTION1; AnalysisPhase result = instance.getAnalysisPhase(); assertEquals(expResult, result); }
public void maybeSeekToEnd(final String groupId, final Consumer<byte[], byte[]> client, final Set<TopicPartition> intermediateTopicPartitions) { if (intermediateTopicPartitions.size() > 0) { System.out.println("Following intermediate topics offsets will be reset to end (for consumer group " + groupId + ")"); for (final TopicPartition topicPartition : intermediateTopicPartitions) { if (allTopics.contains(topicPartition.topic())) { System.out.println("Topic: " + topicPartition.topic()); } } client.seekToEnd(intermediateTopicPartitions); } }
@Test public void shouldSeekToEndOffset() { final Map<TopicPartition, Long> endOffsets = new HashMap<>(); endOffsets.put(topicPartition, 3L); consumer.updateEndOffsets(endOffsets); final Map<TopicPartition, Long> beginningOffsets = new HashMap<>(); beginningOffsets.put(topicPartition, 0L); consumer.updateBeginningOffsets(beginningOffsets); final Set<TopicPartition> intermediateTopicPartitions = new HashSet<>(); intermediateTopicPartitions.add(topicPartition); streamsResetter.maybeSeekToEnd("g1", consumer, intermediateTopicPartitions); final ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofMillis(500)); assertEquals(2, records.count()); }
@Override public Num getValue(int index) { return getBarSeries().getBar(index).getOpenPrice(); }
@Test public void indicatorShouldRetrieveBarOpenPrice() { for (int i = 0; i < 10; i++) { assertEquals(openPriceIndicator.getValue(i), barSeries.getBar(i).getOpenPrice()); } }
public static boolean isKillable(DockerContainerStatus containerStatus) { return isStoppable(containerStatus); }
@Test public void testIsKIllable() { assertTrue(DockerCommandExecutor.isKillable( DockerContainerStatus.RUNNING)); assertTrue(DockerCommandExecutor.isKillable( DockerContainerStatus.RESTARTING)); assertFalse(DockerCommandExecutor.isKillable( DockerContainerStatus.EXITED)); assertFalse(DockerCommandExecutor.isKillable( DockerContainerStatus.CREATED)); assertFalse(DockerCommandExecutor.isKillable( DockerContainerStatus.DEAD)); assertFalse(DockerCommandExecutor.isKillable( DockerContainerStatus.NONEXISTENT)); assertFalse(DockerCommandExecutor.isKillable( DockerContainerStatus.REMOVING)); assertFalse(DockerCommandExecutor.isKillable( DockerContainerStatus.STOPPED)); assertFalse(DockerCommandExecutor.isKillable( DockerContainerStatus.UNKNOWN)); }
public Filter merge(Filter... filters) { BloomFilter merged = new BloomFilter(this.getHashCount(), (BitSet) this.filter().clone()); if (filters == null) { return merged; } for (Filter filter : filters) { if (!(filter instanceof BloomFilter)) { throw new IllegalArgumentException("Cannot merge filters of different class"); } BloomFilter bf = (BloomFilter) filter; merged.addAll(bf); } return merged; }
@Test public void testMerge() { bf.add("a"); bf2.add("c"); BloomFilter[] bfs = new BloomFilter[1]; bfs[0] = bf; BloomFilter mergeBf = (BloomFilter) bf2.merge(bf); assertTrue(mergeBf.isPresent("a")); assertFalse(mergeBf.isPresent("b")); assertTrue(mergeBf.isPresent("c")); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } if(containerService.isContainer(file)) { return PathAttributes.EMPTY; } return this.toAttributes(this.details(file)); }
@Test public void testFindBucket() throws Exception { final Path container = new SpectraDirectoryFeature(session, new SpectraWriteFeature(session)).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final PathAttributes attributes = new SpectraAttributesFinderFeature(session).find(container); assertEquals(-1L, attributes.getSize()); assertNull(attributes.getRegion()); assertEquals(EnumSet.of(Path.Type.directory, Path.Type.volume), container.getType()); new SpectraDeleteFeature(session).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static List<String> computeNameParts(String loggerName) { List<String> partList = new ArrayList<String>(); int fromIndex = 0; while (true) { int index = getSeparatorIndexOf(loggerName, fromIndex); if (index == -1) { partList.add(loggerName.substring(fromIndex)); break; } partList.add(loggerName.substring(fromIndex, index)); fromIndex = index + 1; } return partList; }
@Test public void smoke0() { List<String> witnessList = new ArrayList<String>(); witnessList.add("a"); witnessList.add("b"); witnessList.add("c"); List<String> partList = LoggerNameUtil.computeNameParts("a.b.c"); assertEquals(witnessList, partList); }
@Override public String authenticateRequest(final HttpServletRequest request) { final String smUser = request.getHeader(SITE_MINDER_HEADER.getValue()); if (smUser == null || smUser.trim().isEmpty()) { // SiteMinder has not authenticated the user return null; } else { return smUser; } }
@Test public void willNotAuthenticateABlankUser() { doReturn("").when(request).getHeader("SM_USER"); final String authenticatedUser = authenticator.authenticateRequest(request); assertThat(authenticatedUser, is(nullValue())); }
Map<String, String> execute(ServerWebExchange exchange, StainingRule stainingRule) { if (stainingRule == null) { return Collections.emptyMap(); } List<StainingRule.Rule> rules = stainingRule.getRules(); if (CollectionUtils.isEmpty(rules)) { return Collections.emptyMap(); } Map<String, String> parsedLabels = new HashMap<>(); for (StainingRule.Rule rule : rules) { List<Condition> conditions = rule.getConditions(); Set<String> keys = new HashSet<>(); conditions.forEach(condition -> keys.add(condition.getKey())); Map<String, String> actualValues = SpringWebExpressionLabelUtils.resolve(exchange, keys); if (!ConditionUtils.match(actualValues, conditions)) { continue; } parsedLabels.putAll(KVPairUtils.toMap(rule.getLabels())); } return parsedLabels; }
@Test public void testNotMatchCondition() { Condition condition1 = new Condition(); condition1.setKey("${http.header.uid}"); condition1.setOperation(Operation.EQUALS.toString()); condition1.setValues(Collections.singletonList("1000")); Condition condition2 = new Condition(); condition2.setKey("${http.query.source}"); condition2.setOperation(Operation.IN.toString()); condition2.setValues(Collections.singletonList("wx")); StainingRule.Rule rule = new StainingRule.Rule(); rule.setConditions(Arrays.asList(condition1, condition2)); KVPair kvPair = new KVPair(); kvPair.setKey("env"); kvPair.setValue("blue"); rule.setLabels(Collections.singletonList(kvPair)); StainingRule stainingRule = new StainingRule(); stainingRule.setRules(Collections.singletonList(rule)); MockServerHttpRequest request = MockServerHttpRequest.get("/users") .queryParam("source", "wx") .header("uid", "10001").build(); MockServerWebExchange exchange = new MockServerWebExchange.Builder(request).build(); RuleStainingExecutor executor = new RuleStainingExecutor(); Map<String, String> stainedLabels = executor.execute(exchange, stainingRule); assertThat(stainedLabels).isNotNull(); assertThat(stainedLabels.size()).isEqualTo(0); }
@Override public Iterable<HouseTable> findAll() { return this.findAllByDatabaseId(""); }
@Test public void testListOfAllTables() { List<UserTable> tables = new ArrayList<>(); tables.add(houseTableMapper.toUserTable(HOUSE_TABLE)); tables.add(houseTableMapper.toUserTable(HOUSE_TABLE_SAME_DB)); tables.add(houseTableMapper.toUserTable(HOUSE_TABLE_DIFF_DB)); GetAllEntityResponseBodyUserTable listResponse = new GetAllEntityResponseBodyUserTable(); Field resultField = ReflectionUtils.findField(GetAllEntityResponseBodyUserTable.class, "results"); Assertions.assertNotNull(resultField); ReflectionUtils.makeAccessible(resultField); ReflectionUtils.setField(resultField, listResponse, tables); mockHtsServer.enqueue( new MockResponse() .setResponseCode(200) .setBody((new Gson()).toJson(listResponse)) .addHeader("Content-Type", "application/json")); Iterable<HouseTable> returnList = htsRepo.findAll(); assertThat(returnList).hasSize(3); }
public static <T> Inner<T> create() { return new Inner<T>(); }
@Test @Category(NeedsRunner.class) public void testFilterFieldsByName() { // Pass only elements where field1 == "pass && field2 > 50. PCollection<AutoValue_FilterTest_Simple> filtered = pipeline .apply( Create.of( new AutoValue_FilterTest_Simple("pass", 52, 2), new AutoValue_FilterTest_Simple("pass", 2, 2), new AutoValue_FilterTest_Simple("fail", 100, 100))) .apply( Filter.<AutoValue_FilterTest_Simple>create() .whereFieldName("field1", s -> "pass".equals(s)) .whereFieldName("field2", (Integer i) -> i > 50)); PAssert.that(filtered).containsInAnyOrder(new AutoValue_FilterTest_Simple("pass", 52, 2)); pipeline.run(); }
@Override public Integer doCall() throws Exception { Vertx vertx = null; HttpServer server = null; try { CountDownLatch latch = new CountDownLatch(1); vertx = Vertx.vertx(); server = serve(vertx).toCompletableFuture().get(); latch.await(); } finally { if (server != null) { server.close(); } if (vertx != null) { vertx.close(); } } return 0; }
@Disabled @Test public void testCall() throws Exception { cmd().doCall(); }
public static Stream<AssociationRule> apply(double confidence, FPTree tree) { TotalSupportTree ttree = new TotalSupportTree(tree); ARM arm = new ARM(confidence, ttree); return StreamSupport.stream(arm.spliterator(), false); }
@Test public void testPima() { System.out.println("pima"); FPTree tree = FPTree.of(20, () -> ItemSetTestData.read("transaction/pima.D38.N768.C2")); Stream<AssociationRule> rules = ARM.apply(0.9, tree); assertEquals(6803, rules.count()); }
@Override public boolean retryRequest(HttpResponse response, int executionCount, HttpContext ctx) { log.fine(() -> String.format("retryRequest(responseCode='%s', executionCount='%d', ctx='%s'", response.getStatusLine().getStatusCode(), executionCount, ctx)); HttpClientContext clientCtx = HttpClientContext.adapt(ctx); if (!predicate.test(response, clientCtx)) { log.fine(() -> String.format("Not retrying for '%s'", ctx)); return false; } if (executionCount > maxRetries) { log.fine(() -> String.format("Max retries exceeded for '%s'", ctx)); retryFailedConsumer.onRetryFailed(response, executionCount, clientCtx); return false; } Duration delay = delaySupplier.getDelay(executionCount); log.fine(() -> String.format("Retrying after %s for '%s'", delay, ctx)); retryInterval.set(delay.toMillis()); retryConsumer.onRetry(response, delay, executionCount, clientCtx); return true; }
@Test @SuppressWarnings("unchecked") void retry_consumers_are_invoked() { RetryConsumer<HttpResponse> retryConsumer = mock(RetryConsumer.class); RetryFailedConsumer<HttpResponse> retryFailedConsumer = mock(RetryFailedConsumer.class); Duration delay = Duration.ofSeconds(10); int maxRetries = 5; DelayedResponseLevelRetryHandler handler = DelayedResponseLevelRetryHandler.Builder .withFixedDelay(delay, maxRetries) .onRetry(retryConsumer) .onRetryFailed(retryFailedConsumer) .build(); HttpResponse response = createResponse(HttpStatus.SC_SERVICE_UNAVAILABLE); HttpClientContext ctx = new HttpClientContext(); int lastExecutionCount = maxRetries + 1; for (int i = 1; i <= lastExecutionCount; i++) { handler.retryRequest(response, i, ctx); } verify(retryFailedConsumer).onRetryFailed(response, lastExecutionCount, ctx); for (int i = 1; i < lastExecutionCount; i++) { verify(retryConsumer).onRetry(response, delay, i, ctx); } }
public static <T> List<List<T>> splitBySize(List<T> list, int expectedSize) throws NullPointerException, IllegalArgumentException { Preconditions.checkNotNull(list, "list must not be null"); Preconditions.checkArgument(expectedSize > 0, "expectedSize must larger than 0"); if (1 == expectedSize) { return Collections.singletonList(list); } int splitSize = Math.min(expectedSize, list.size()); List<List<T>> result = new ArrayList<List<T>>(splitSize); for (int i = 0; i < splitSize; i++) { result.add(new ArrayList<>()); } int index = 0; for (T t : list) { result.get(index).add(t); index = (index + 1) % splitSize; } return result; }
@Test public void testSplitBySizeNormal() { List<Integer> lists = Lists.newArrayList(1, 2, 3, 4, 5, 6, 7); int expectSize = 3; List<List<Integer>> splitLists = ListUtil.splitBySize(lists, expectSize); Assert.assertEquals(splitLists.size(), 3); Assert.assertEquals(splitLists.get(0).size(), 3); Assert.assertEquals(splitLists.get(1).size(), 2); Assert.assertEquals(splitLists.get(2).size(), 2); }
public static MetricsTag tag(MetricsInfo info, String value) { return Tags.INSTANCE.cache.add(info, value); }
@Test public void testTagOverflow() { MetricsTag t0 = tag("t0", "t desc", "t value"); for (int i = 0; i < MAX_TAG_NAMES + 1; ++i) { tag("t"+ i, "t desc", "t value"); if (i < MAX_TAG_NAMES) { assertSame("t0 still there", t0, tag("t0", "t desc", "t value")); } } assertNotSame("t0 is gone", t0, tag("t0", "t desc", "t value")); MetricsTag t1 = tag("t1", "t desc", "t value"); for (int i = 0; i < MAX_TAG_VALUES; ++i) { tag("t1", "t desc", "t value"+ i); if (i < MAX_TAG_VALUES -1) { assertSame("t1 is still there", t1, tag("t1", "t desc", "t value")); } } assertNotSame("t1 is gone", t1, tag("t1", "t desc", "t value")); }
public static byte[] decodeHex(String hexStr) { return decodeHex((CharSequence) hexStr); }
@Test public void decodeTest(){ final String str = "e8c670380cb220095268f40221fc748fa6ac39d6e930e63c30da68bad97f885d"; assertArrayEquals(HexUtil.decodeHex(str), HexUtil.decodeHex(str.toUpperCase())); }
public BlobOperationResponse getBlob(final Exchange exchange) throws IOException { if (LOG.isTraceEnabled()) { LOG.trace("Getting a blob [{}] from exchange [{}]...", configurationProxy.getBlobName(exchange), exchange); } final Message message = BlobUtils.getInMessage(exchange); final OutputStream outputStream = ObjectHelper.isEmpty(message) ? null : message.getBody(OutputStream.class); final BlobRange blobRange = configurationProxy.getBlobRange(exchange); final BlobCommonRequestOptions blobCommonRequestOptions = getCommonRequestOptions(exchange); if (outputStream == null) { // Then we create an input stream final Map<String, Object> blobInputStream = client.openInputStream(blobRange, blobCommonRequestOptions.getBlobRequestConditions()); final BlobExchangeHeaders blobExchangeHeaders = BlobExchangeHeaders .createBlobExchangeHeadersFromBlobProperties((BlobProperties) blobInputStream.get("properties")); return BlobOperationResponse.create(blobInputStream.get("inputStream"), blobExchangeHeaders.toMap()); } // we have an outputStream set, so we use it final DownloadRetryOptions downloadRetryOptions = getDownloadRetryOptions(configurationProxy); try { final ResponseBase<BlobDownloadHeaders, Void> response = client.downloadWithResponse(outputStream, blobRange, downloadRetryOptions, blobCommonRequestOptions.getBlobRequestConditions(), blobCommonRequestOptions.getContentMD5() != null, blobCommonRequestOptions.getTimeout()); final BlobExchangeHeaders blobExchangeHeaders = BlobExchangeHeaders.createBlobExchangeHeadersFromBlobDownloadHeaders(response.getDeserializedHeaders()) .httpHeaders(response.getHeaders()); return BlobOperationResponse.create(outputStream, blobExchangeHeaders.toMap()); } finally { if (configurationProxy.getConfiguration().isCloseStreamAfterRead()) { outputStream.close(); } } }
@Test void testGetBlob() throws IOException { // mocking final Map<String, Object> mockedResults = new HashMap<>(); mockedResults.put("inputStream", new ByteArrayInputStream("testInput".getBytes(Charset.defaultCharset()))); mockedResults.put("properties", createBlobProperties()); when(client.openInputStream(any(), any())).thenReturn(mockedResults); final Exchange exchange = new DefaultExchange(context); // first: test with no exchange provided final BlobOperations operations = new BlobOperations(configuration, client); final BlobOperationResponse response = operations.getBlob(null); assertNotNull(response); assertNotNull(response.getBody()); assertNotNull(response.getHeaders()); assertNotNull(response.getHeaders().get(BlobConstants.CREATION_TIME)); assertEquals("testInput", new BufferedReader(new InputStreamReader((InputStream) response.getBody())).readLine()); // second: test with exchange provided configuration.setBlobType(BlobType.blockblob); final BlobOperationResponse response2 = operations.getBlob(exchange); assertNotNull(response2); assertNotNull(response2.getBody()); assertNotNull(response2.getHeaders()); assertNotNull(response2.getHeaders().get(BlobConstants.CREATION_TIME)); // third: test with exchange provided but with outputstream set // mocking final ResponseBase<BlobDownloadHeaders, Void> mockedResults2 = new ResponseBase<>( null, 200, new HttpHeaders().set("x-test-header", "123"), null, new BlobDownloadHeaders().setETag("tag1")); when(client.downloadWithResponse(any(), any(), any(), any(), anyBoolean(), any())).thenReturn(mockedResults2); exchange.getIn().setBody(new ByteArrayOutputStream()); final BlobOperationResponse response3 = operations.getBlob(exchange); assertNotNull(response3); assertNotNull(response3.getBody()); assertNotNull(response3.getHeaders()); assertEquals("tag1", response3.getHeaders().get(BlobConstants.E_TAG)); }
@Override public WindowStoreIterator<V> backwardFetch(final K key, final Instant timeFrom, final Instant timeTo) throws IllegalArgumentException { Objects.requireNonNull(key, "key can't be null"); final List<ReadOnlyWindowStore<K, V>> stores = provider.stores(storeName, windowStoreType); for (final ReadOnlyWindowStore<K, V> windowStore : stores) { try { final WindowStoreIterator<V> result = windowStore.backwardFetch(key, timeFrom, timeTo); if (!result.hasNext()) { result.close(); } else { return result; } } catch (final InvalidStateStoreException e) { throw new InvalidStateStoreException( "State store is not available anymore and may have been migrated to another instance; " + "please re-discover its location from the state metadata."); } } return KeyValueIterators.emptyWindowStoreIterator(); }
@Test public void shouldBackwardFetchKeyRangeAcrossStoresWithNullKeyTo() { final ReadOnlyWindowStoreStub<String, String> secondUnderlying = new ReadOnlyWindowStoreStub<>(WINDOW_SIZE); stubProviderTwo.addStore(storeName, secondUnderlying); underlyingWindowStore.put("a", "a", 0L); secondUnderlying.put("b", "b", 10L); secondUnderlying.put("c", "c", 10L); final List<KeyValue<Windowed<String>, String>> results = StreamsTestUtils.toList(windowStore.backwardFetch("a", null, ofEpochMilli(0), ofEpochMilli(10))); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("a", new TimeWindow(0, WINDOW_SIZE)), "a"), KeyValue.pair(new Windowed<>("c", new TimeWindow(10, 10 + WINDOW_SIZE)), "c"), KeyValue.pair(new Windowed<>("b", new TimeWindow(10, 10 + WINDOW_SIZE)), "b")))); }
public static long getNumSector(String requestSize, String sectorSize) { Double memSize = Double.parseDouble(requestSize); Double sectorBytes = Double.parseDouble(sectorSize); Double nSectors = memSize / sectorBytes; Double memSizeKB = memSize / 1024; Double memSizeGB = memSize / (1024 * 1024 * 1024); Double memSize100GB = memSizeGB / 100; // allocation bitmap file: one bit per sector Double allocBitmapSize = nSectors / 8; // extend overflow file: 4MB, plus 4MB per 100GB Double extOverflowFileSize = memSize100GB * 1024 * 1024 * 4; // journal file: 8MB, plus 8MB per 100GB Double journalFileSize = memSize100GB * 1024 * 1024 * 8; // catalog file: 10bytes per KB Double catalogFileSize = memSizeKB * 10; // hot files: 5bytes per KB Double hotFileSize = memSizeKB * 5; // quota users file and quota groups file Double quotaUsersFileSize = (memSizeGB * 256 + 1) * 64; Double quotaGroupsFileSize = (memSizeGB * 32 + 1) * 64; Double metadataSize = allocBitmapSize + extOverflowFileSize + journalFileSize + catalogFileSize + hotFileSize + quotaUsersFileSize + quotaGroupsFileSize; Double allocSize = memSize + metadataSize; Double numSectors = allocSize / sectorBytes; System.out.println(numSectors.longValue() + 1); // round up return numSectors.longValue() + 1; }
@Test public void getSectorTest0() { String testRequestSize = "0"; String testSectorSize = "512"; long result = HFSUtils.getNumSector(testRequestSize, testSectorSize); assertEquals(1L, result); }
public static RelativeReferenceURLBuilder getRelativeReferenceURLBuilder( Map<String, List<String>> baseFileProperties) { if (baseFileProperties != null) { if (baseFileProperties.containsKey(GitLabReferenceURLBuilder.GITLAB_FILE_NAME_HEADER) || baseFileProperties.containsKey(GitLabReferenceURLBuilder.GITLAB_FILE_NAME_HEADER.toLowerCase())) { log.debug("Found a GitLab File specific header, returning a GitLabReferenceURLBuilder"); return new GitLabReferenceURLBuilder(); } } log.debug("Returning a SimpleReferenceURLBuilder"); return new SimpleReferenceURLBuilder(); }
@Test void testGetRelativeReferenceURLBuilder() { RelativeReferenceURLBuilder builder = RelativeReferenceURLBuilderFactory.getRelativeReferenceURLBuilder(null); assertTrue(builder instanceof SimpleReferenceURLBuilder); Map<String, List<String>> properties = Map.of("key", List.of("value1", "value2")); builder = RelativeReferenceURLBuilderFactory.getRelativeReferenceURLBuilder(properties); assertTrue(builder instanceof SimpleReferenceURLBuilder); properties = Map.of(GitLabReferenceURLBuilder.GITLAB_FILE_NAME_HEADER, List.of("value1", "value2")); builder = RelativeReferenceURLBuilderFactory.getRelativeReferenceURLBuilder(properties); assertTrue(builder instanceof GitLabReferenceURLBuilder); properties = Map.of(GitLabReferenceURLBuilder.GITLAB_FILE_NAME_HEADER.toLowerCase(), List.of("value1", "value2")); builder = RelativeReferenceURLBuilderFactory.getRelativeReferenceURLBuilder(properties); assertTrue(builder instanceof GitLabReferenceURLBuilder); }
@VisibleForTesting protected void parseRequestHeader(Map<String, String> headerMap, Object sofaRequest) { if (sofaRequest instanceof SofaRequest) { // 处理 tracer parseRequestHeader(RemotingConstants.RPC_TRACE_NAME, headerMap, (SofaRequest) sofaRequest); if (RpcInvokeContext.isBaggageEnable()) { parseRequestHeader(RemotingConstants.RPC_REQUEST_BAGGAGE, headerMap, (SofaRequest) sofaRequest); } Map<String, Object> requestProps = ((SofaRequest) sofaRequest).getRequestProps(); if (requestProps == null) { for (Map.Entry<String, String> entry : headerMap.entrySet()) { ((SofaRequest) sofaRequest).addRequestProp(entry.getKey(), entry.getValue()); } } else { replaceWithHeaderMap(headerMap, requestProps); } } }
@Test public void testParseRequestHeader(){ Map<String, String> headerMap = new HashMap<>(); headerMap.put("testKey1","testValue1"); headerMap.put("rpc_trace_context.sofaTraceId", "traceId"); headerMap.put("rpc_trace_context.sofaRpcId", "rpcId"); headerMap.put("rpc_req_baggage.testBaggageKey1", "testBaggageValue1"); headerMap.put("rpc_req_baggage.testBaggageKey2", "testBaggageValue2"); SofaRpcSerialization sofaRpcSerialization = new SofaRpcSerialization(); SofaRequest sofaRequest = new SofaRequest(); sofaRequest.addRequestProp("testKey1", "testValue11"); sofaRequest.addRequestProp("testKey2", "testValue2"); sofaRpcSerialization.parseRequestHeader(headerMap, sofaRequest); sofaRpcSerialization.parseRequestHeader(headerMap, sofaRequest); Assert.assertEquals("testValue1", sofaRequest.getRequestProp("testKey1")); Assert.assertEquals("testValue2", sofaRequest.getRequestProp("testKey2")); Object traceMap = sofaRequest.getRequestProp(RemotingConstants.RPC_TRACE_NAME); Assert.assertTrue(traceMap instanceof Map); Assert.assertEquals("traceId",((Map)traceMap).get("sofaTraceId")); Assert.assertEquals("rpcId",((Map)traceMap).get("sofaRpcId")); Object baggageMap = sofaRequest.getRequestProp(RemotingConstants.RPC_REQUEST_BAGGAGE); Assert.assertTrue(baggageMap instanceof Map); Assert.assertEquals("testBaggageValue1", ((Map) baggageMap).get("testBaggageKey1")); Assert.assertEquals("testBaggageValue2", ((Map) baggageMap).get("testBaggageKey2")); }
@GetMapping(params = "search=blur") @Secured(resource = AuthConstants.CONSOLE_RESOURCE_NAME_PREFIX + "permissions", action = ActionTypes.READ) public Page<PermissionInfo> fuzzySearchPermission(@RequestParam int pageNo, @RequestParam int pageSize, @RequestParam(name = "role", defaultValue = StringUtils.EMPTY) String role) { return nacosRoleService.findPermissionsLike4Page(role, pageNo, pageSize); }
@Test void testFuzzySearchPermission() { Page<PermissionInfo> permissionInfoPage = new Page<PermissionInfo>(); when(nacosRoleService.findPermissionsLike4Page(anyString(), anyInt(), anyInt())).thenReturn(permissionInfoPage); Page<PermissionInfo> permissions = permissionController.fuzzySearchPermission(1, 10, "admin"); assertEquals(permissionInfoPage, permissions); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { final AttributedList<Path> children = new AttributedList<Path>(); try (RemoteDirectory handle = session.sftp().openDir(directory.getAbsolute())) { for(List<RemoteResourceInfo> list : ListUtils.partition(handle.scan(new RemoteResourceFilter() { @Override public boolean accept(RemoteResourceInfo remoteResourceInfo) { return true; } }), new HostPreferences(session.getHost()).getInteger("sftp.listing.chunksize"))) { for(RemoteResourceInfo f : list) { final PathAttributes attr = attributes.toAttributes(f.getAttributes()); final EnumSet<Path.Type> type = EnumSet.noneOf(Path.Type.class); switch(f.getAttributes().getType()) { case DIRECTORY: type.add(Path.Type.directory); break; case SYMLINK: type.add(Path.Type.symboliclink); break; default: type.add(Path.Type.file); break; } final Path file = new Path(directory, f.getName(), type, attr); if(this.post(file)) { children.add(file); listener.chunk(directory, children); } } } return children; } catch(IOException e) { throw new SFTPExceptionMappingService().map("Listing directory {0} failed", e, directory); } }
@Test(expected = NotfoundException.class) public void testListNotfound() throws Exception { final Path f = new Path(UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory)); final SFTPListService service = new SFTPListService(session); service.list(f, new DisabledListProgressListener()); }
public static String toJson(UpdateRequirement updateRequirement) { return toJson(updateRequirement, false); }
@Test public void testAssertTableDoesNotExistToJson() { String expected = "{\"type\":\"assert-create\"}"; UpdateRequirement actual = new UpdateRequirement.AssertTableDoesNotExist(); assertThat(UpdateRequirementParser.toJson(actual)) .as("AssertTableDoesNotExist should convert to the correct JSON value") .isEqualTo(expected); }
@Override public DataflowPipelineJob run(Pipeline pipeline) { // Multi-language pipelines and pipelines that include upgrades should automatically be upgraded // to Runner v2. if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_runner_v2")) { LOG.info( "Automatically enabling Dataflow Runner v2 since the pipeline used cross-language" + " transforms or pipeline needed a transform upgrade."); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build()); } } if (useUnifiedWorker(options)) { if (hasExperiment(options, "disable_runner_v2") || hasExperiment(options, "disable_runner_v2_until_2023") || hasExperiment(options, "disable_prime_runner_v2")) { throw new IllegalArgumentException( "Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set."); } List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("use_runner_v2")) { experiments.add("use_runner_v2"); } if (!experiments.contains("use_unified_worker")) { experiments.add("use_unified_worker"); } if (!experiments.contains("beam_fn_api")) { experiments.add("beam_fn_api"); } if (!experiments.contains("use_portable_job_submission")) { experiments.add("use_portable_job_submission"); } options.setExperiments(ImmutableList.copyOf(experiments)); } logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline); logWarningIfBigqueryDLQUnused(pipeline); if (shouldActAsStreaming(pipeline)) { options.setStreaming(true); if (useUnifiedWorker(options)) { options.setEnableStreamingEngine(true); List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("enable_streaming_engine")) { experiments.add("enable_streaming_engine"); } if (!experiments.contains("enable_windmill_service")) { experiments.add("enable_windmill_service"); } } } if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) { ProjectionPushdownOptimizer.optimize(pipeline); } LOG.info( "Executing pipeline on the Dataflow Service, which will have billing implications " + "related to Google Compute Engine usage and other Google Cloud Services."); DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class); String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions); // This incorrectly puns the worker harness container image (which implements v1beta3 API) // with the SDK harness image (which implements Fn API). // // The same Environment is used in different and contradictory ways, depending on whether // it is a v1 or v2 job submission. RunnerApi.Environment defaultEnvironmentForDataflow = Environments.createDockerEnvironment(workerHarnessContainerImageURL); // The SdkComponents for portable an non-portable job submission must be kept distinct. Both // need the default environment. SdkComponents portableComponents = SdkComponents.create(); portableComponents.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); RunnerApi.Pipeline portablePipelineProto = PipelineTranslation.toProto(pipeline, portableComponents, false); // Note that `stageArtifacts` has to be called before `resolveArtifact` because // `resolveArtifact` updates local paths to staged paths in pipeline proto. portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto); List<DataflowPackage> packages = stageArtifacts(portablePipelineProto); portablePipelineProto = resolveArtifacts(portablePipelineProto); portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options); if (LOG.isDebugEnabled()) { LOG.debug( "Portable pipeline proto:\n{}", TextFormat.printer().printToString(portablePipelineProto)); } // Stage the portable pipeline proto, retrieving the staged pipeline path, then update // the options on the new job // TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation()); byte[] serializedProtoPipeline = portablePipelineProto.toByteArray(); DataflowPackage stagedPipeline = options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME); dataflowOptions.setPipelineUrl(stagedPipeline.getLocation()); if (useUnifiedWorker(options)) { LOG.info("Skipping v1 transform replacements since job will run on v2."); } else { // Now rewrite things to be as needed for v1 (mutates the pipeline) // This way the job submitted is valid for v1 and v2, simultaneously replaceV1Transforms(pipeline); } // Capture the SdkComponents for look up during step translations SdkComponents dataflowV1Components = SdkComponents.create(); dataflowV1Components.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); // No need to perform transform upgrading for the Runner v1 proto. RunnerApi.Pipeline dataflowV1PipelineProto = PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false); if (LOG.isDebugEnabled()) { LOG.debug( "Dataflow v1 pipeline proto:\n{}", TextFormat.printer().printToString(dataflowV1PipelineProto)); } // Set a unique client_request_id in the CreateJob request. // This is used to ensure idempotence of job creation across retried // attempts to create a job. Specifically, if the service returns a job with // a different client_request_id, it means the returned one is a different // job previously created with the same job name, and that the job creation // has been effectively rejected. The SDK should return // Error::Already_Exists to user in that case. int randomNum = new Random().nextInt(9000) + 1000; String requestId = DateTimeFormat.forPattern("YYYYMMddHHmmssmmm") .withZone(DateTimeZone.UTC) .print(DateTimeUtils.currentTimeMillis()) + "_" + randomNum; JobSpecification jobSpecification = translator.translate( pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages); if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_staged_dataflow_worker_jar")) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("use_staged_dataflow_worker_jar") .build()); } } Job newJob = jobSpecification.getJob(); try { newJob .getEnvironment() .setSdkPipelineOptions( MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class)); } catch (IOException e) { throw new IllegalArgumentException( "PipelineOptions specified failed to serialize to JSON.", e); } newJob.setClientRequestId(requestId); DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo(); String version = dataflowRunnerInfo.getVersion(); checkState( !"${pom.version}".equals(version), "Unable to submit a job to the Dataflow service with unset version ${pom.version}"); LOG.info("Dataflow SDK version: {}", version); newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties()); // The Dataflow Service may write to the temporary directory directly, so // must be verified. if (!isNullOrEmpty(options.getGcpTempLocation())) { newJob .getEnvironment() .setTempStoragePrefix( dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation())); } newJob.getEnvironment().setDataset(options.getTempDatasetId()); if (options.getWorkerRegion() != null) { newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion()); } if (options.getWorkerZone() != null) { newJob.getEnvironment().setWorkerZone(options.getWorkerZone()); } if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED"); } else if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED"); } // Represent the minCpuPlatform pipeline option as an experiment, if not already present. if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); List<String> minCpuFlags = experiments.stream() .filter(p -> p.startsWith("min_cpu_platform")) .collect(Collectors.toList()); if (minCpuFlags.isEmpty()) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform()) .build()); } else { LOG.warn( "Flag min_cpu_platform is defined in both top level PipelineOption, " + "as well as under experiments. Proceed using {}.", minCpuFlags.get(0)); } } newJob .getEnvironment() .setExperiments( ImmutableList.copyOf( firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()))); // Set the Docker container image that executes Dataflow worker harness, residing in Google // Container Registry. Translator is guaranteed to create a worker pool prior to this point. // For runner_v1, only worker_harness_container is set. // For runner_v2, both worker_harness_container and sdk_harness_container are set to the same // value. String containerImage = getContainerImageForJob(options); for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) { workerPool.setWorkerHarnessContainerImage(containerImage); } configureSdkHarnessContainerImages(options, portablePipelineProto, newJob); newJob.getEnvironment().setVersion(getEnvironmentVersion(options)); if (hooks != null) { hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment()); } // enable upload_graph when the graph is too large byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8); int jobGraphByteSize = jobGraphBytes.length; if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES && !hasExperiment(options, "upload_graph") && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build()); LOG.info( "The job graph size ({} in bytes) is larger than {}. Automatically add " + "the upload_graph option to experiments.", jobGraphByteSize, CREATE_JOB_REQUEST_LIMIT_BYTES); } if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) { ArrayList<String> experiments = new ArrayList<>(options.getExperiments()); while (experiments.remove("upload_graph")) {} options.setExperiments(experiments); LOG.warn( "The upload_graph experiment was specified, but it does not apply " + "to runner v2 jobs. Option has been automatically removed."); } // Upload the job to GCS and remove the graph object from the API call. The graph // will be downloaded from GCS by the service. if (hasExperiment(options, "upload_graph")) { DataflowPackage stagedGraph = options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME); newJob.getSteps().clear(); newJob.setStepsLocation(stagedGraph.getLocation()); } if (!isNullOrEmpty(options.getDataflowJobFile()) || !isNullOrEmpty(options.getTemplateLocation())) { boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation()); if (isTemplate) { checkArgument( isNullOrEmpty(options.getDataflowJobFile()), "--dataflowJobFile and --templateLocation are mutually exclusive."); } String fileLocation = firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile()); checkArgument( fileLocation.startsWith("/") || fileLocation.startsWith("gs://"), "Location must be local or on Cloud Storage, got %s.", fileLocation); ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */); String workSpecJson = DataflowPipelineTranslator.jobToString(newJob); try (PrintWriter printWriter = new PrintWriter( new BufferedWriter( new OutputStreamWriter( Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)), UTF_8)))) { printWriter.print(workSpecJson); LOG.info("Printed job specification to {}", fileLocation); } catch (IOException ex) { String error = String.format("Cannot create output file at %s", fileLocation); if (isTemplate) { throw new RuntimeException(error, ex); } else { LOG.warn(error, ex); } } if (isTemplate) { LOG.info("Template successfully created."); return new DataflowTemplateJob(); } } String jobIdToUpdate = null; if (options.isUpdate()) { jobIdToUpdate = getJobIdFromName(options.getJobName()); newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setReplaceJobId(jobIdToUpdate); } if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) { newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot()); } Job jobResult; try { jobResult = dataflowClient.createJob(newJob); } catch (GoogleJsonResponseException e) { String errorMessages = "Unexpected errors"; if (e.getDetails() != null) { if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) { errorMessages = "The size of the serialized JSON representation of the pipeline " + "exceeds the allowable limit. " + "For more information, please see the documentation on job submission:\n" + "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs"; } else { errorMessages = e.getDetails().getMessage(); } } throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e); } catch (IOException e) { throw new RuntimeException("Failed to create a workflow job", e); } // Use a raw client for post-launch monitoring, as status calls may fail // regularly and need not be retried automatically. DataflowPipelineJob dataflowPipelineJob = new DataflowPipelineJob( DataflowClient.create(options), jobResult.getId(), options, jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(), portablePipelineProto); // If the service returned client request id, the SDK needs to compare it // with the original id generated in the request, if they are not the same // (i.e., the returned job is not created by this request), throw // DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException // depending on whether this is a reload or not. if (jobResult.getClientRequestId() != null && !jobResult.getClientRequestId().isEmpty() && !jobResult.getClientRequestId().equals(requestId)) { // If updating a job. if (options.isUpdate()) { throw new DataflowJobAlreadyUpdatedException( dataflowPipelineJob, String.format( "The job named %s with id: %s has already been updated into job id: %s " + "and cannot be updated again.", newJob.getName(), jobIdToUpdate, jobResult.getId())); } else { throw new DataflowJobAlreadyExistsException( dataflowPipelineJob, String.format( "There is already an active job named %s with id: %s. If you want to submit a" + " second job, try again by setting a different name using --jobName.", newJob.getName(), jobResult.getId())); } } LOG.info( "To access the Dataflow monitoring console, please navigate to {}", MonitoringUtil.getJobMonitoringPageURL( options.getProject(), options.getRegion(), jobResult.getId())); LOG.info("Submitted job: {}", jobResult.getId()); LOG.info( "To cancel the job using the 'gcloud' tool, run:\n> {}", MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId())); return dataflowPipelineJob; }
@Test(expected = RuntimeException.class) public void testRunWithMissingFiles() throws IOException { final String cloudDataflowDataset = "somedataset"; File temp = new File("/this/is/not/a/path/that/will/exist"); String overridePackageName = "alias.txt"; when(mockGcsUtil.getObjects(anyListOf(GcsPath.class))) .thenReturn( ImmutableList.of( GcsUtil.StorageObjectOrIOException.create(new FileNotFoundException("some/path")))); DataflowPipelineOptions options = PipelineOptionsFactory.as(DataflowPipelineOptions.class); options.setFilesToStage(ImmutableList.of(overridePackageName + "=" + temp.getAbsolutePath())); options.setStagingLocation(VALID_STAGING_BUCKET); options.setTempLocation(VALID_TEMP_BUCKET); options.setTempDatasetId(cloudDataflowDataset); options.setProject(PROJECT_ID); options.setRegion(REGION_ID); options.setJobName("job"); options.setDataflowClient(buildMockDataflow(mockJobs)); options.setGcsUtil(mockGcsUtil); options.setGcpCredential(new TestCredential()); when(mockGcsUtil.create(any(GcsPath.class), any(GcsUtil.CreateOptions.class))) .then( invocation -> FileChannel.open( Files.createTempFile("channel-", ".tmp"), StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.DELETE_ON_CLOSE)); Pipeline p = buildDataflowPipeline(options); p.run(); }
@Override @PublicAPI(usage = ACCESS) public boolean isMetaAnnotatedWith(Class<? extends Annotation> type) { return isMetaAnnotatedWith(type.getName()); }
@Test public void isMetaAnnotatedWith_predicate() { JavaClass clazz = importClassesWithContext(Parent.class, SomeAnnotation.class).get(Parent.class); assertThat(clazz .isMetaAnnotatedWith(DescribedPredicate.alwaysTrue())) .as("predicate matches").isTrue(); assertThat(clazz .isMetaAnnotatedWith(DescribedPredicate.alwaysFalse())) .as("predicate matches").isFalse(); }
@Override public ConnectResponse<ConnectorInfo> describe(final String connector) { try { LOG.debug("Issuing request to Kafka Connect at URI {} to get config for {}", connectUri, connector); final ConnectResponse<ConnectorInfo> connectResponse = withRetries(() -> Request .get(resolveUri(String.format("%s/%s", CONNECTORS, connector))) .setHeaders(requestHeaders) .responseTimeout(Timeout.ofMilliseconds(requestTimeoutMs)) .connectTimeout(Timeout.ofMilliseconds(requestTimeoutMs)) .execute(httpClient) .handleResponse( createHandler(HttpStatus.SC_OK, new TypeReference<ConnectorInfo>() {}, Function.identity()))); connectResponse.error() .ifPresent(error -> LOG.warn("Could not list connectors: {}.", error)); return connectResponse; } catch (final Exception e) { throw new KsqlServerException(e); } }
@Test public void testDescribe() throws JsonProcessingException { // Given: WireMock.stubFor( WireMock.get(WireMock.urlEqualTo(pathPrefix + "/connectors/foo")) .withHeader(AUTHORIZATION.toString(), new EqualToPattern(AUTH_HEADER)) .withHeader(CUSTOM_HEADER_NAME, new EqualToPattern(CUSTOM_HEADER_VALUE)) .willReturn(WireMock.aResponse() .withStatus(HttpStatus.SC_OK) .withBody(MAPPER.writeValueAsString(SAMPLE_INFO))) ); // When: final ConnectResponse<ConnectorInfo> response = client.describe("foo"); // Then: assertThat(response.datum(), OptionalMatchers.of(is(SAMPLE_INFO))); assertThat("Expected no error!", !response.error().isPresent()); }
@Subscribe public void onChatMessage(ChatMessage chatMessage) { if (iconIds == null) { return; } switch (chatMessage.getType()) { case PUBLICCHAT: case MODCHAT: case FRIENDSCHAT: case CLAN_CHAT: case CLAN_GUEST_CHAT: case CLAN_GIM_CHAT: case PRIVATECHAT: case PRIVATECHATOUT: case MODPRIVATECHAT: break; default: return; } final MessageNode messageNode = chatMessage.getMessageNode(); final String message = messageNode.getValue(); final String updatedMessage = updateMessage(message); if (updatedMessage == null) { return; } messageNode.setValue(updatedMessage); }
@Test public void testGtLt() { MessageNode messageNode = mock(MessageNode.class); when(messageNode.getValue()).thenReturn("<gt>:D<lt>"); ChatMessage chatMessage = new ChatMessage(); chatMessage.setType(ChatMessageType.PUBLICCHAT); chatMessage.setMessageNode(messageNode); emojiPlugin.onChatMessage(chatMessage); verify(messageNode).setValue("<img=10>"); }
@Override public String read() throws IOException { CharBuffer response = CharBuffer.allocate(bufferSize); StringBuilder result = new StringBuilder(); do { response.clear(); reader.read(response); result.append(response.array(), response.arrayOffset(), response.position()); } while (response.get(response.position() - 1) != '\n'); return result.toString(); }
@Test public void testIpcService() throws IOException { unixDomainSocket = new UnixDomainSocket(reader, writer, RESPONSE.length()); doAnswer( invocation -> { Object[] args = invocation.getArguments(); ((CharBuffer) args[0]).append(RESPONSE); return RESPONSE.length(); // void method, so return null }) .when(reader) .read(any(CharBuffer.class)); runTest(); }
public final void setStrictness(Strictness strictness) { this.strictness = Objects.requireNonNull(strictness); }
@Test public void testSetStrictness() throws IOException { JsonWriter jsonWriter = new JsonWriter(new StringWriter()); jsonWriter.setStrictness(Strictness.STRICT); assertThat(jsonWriter.getStrictness()).isEqualTo(Strictness.STRICT); jsonWriter.value(false); jsonWriter.close(); }
@Override protected void validateDataImpl(TenantId tenantId, RuleChain ruleChain) { validateString("Rule chain name", ruleChain.getName()); if (ruleChain.getType() == null) { ruleChain.setType(RuleChainType.CORE); } if (ruleChain.getTenantId() == null || ruleChain.getTenantId().isNullUid()) { throw new DataValidationException("Rule chain should be assigned to tenant!"); } if (!tenantService.tenantExists(ruleChain.getTenantId())) { throw new DataValidationException("Rule chain is referencing to non-existent tenant!"); } if (ruleChain.isRoot() && RuleChainType.CORE.equals(ruleChain.getType())) { RuleChain rootRuleChain = ruleChainService.getRootTenantRuleChain(ruleChain.getTenantId()); if (rootRuleChain != null && !rootRuleChain.getId().equals(ruleChain.getId())) { throw new DataValidationException("Another root rule chain is present in scope of current tenant!"); } } if (ruleChain.isRoot() && RuleChainType.EDGE.equals(ruleChain.getType())) { RuleChain edgeTemplateRootRuleChain = ruleChainService.getEdgeTemplateRootRuleChain(ruleChain.getTenantId()); if (edgeTemplateRootRuleChain != null && !edgeTemplateRootRuleChain.getId().equals(ruleChain.getId())) { throw new DataValidationException("Another edge template root rule chain is present in scope of current tenant!"); } } }
@Test void testValidateNameInvocation() { RuleChain ruleChain = new RuleChain(); ruleChain.setName("generate daily report"); ruleChain.setType(RuleChainType.CORE); ruleChain.setTenantId(tenantId); validator.validateDataImpl(tenantId, ruleChain); verify(validator).validateString("Rule chain name", ruleChain.getName()); }
public MapConfig setBackupCount(final int backupCount) { this.backupCount = checkBackupCount(backupCount, asyncBackupCount); return this; }
@Test(expected = IllegalArgumentException.class) public void setBackupCount_tooLarge() { MapConfig config = new MapConfig(); // max allowed is 6 config.setBackupCount(200); }
public Iterator<Optional<Page>> process(SqlFunctionProperties properties, DriverYieldSignal yieldSignal, LocalMemoryContext memoryContext, Page page) { WorkProcessor<Page> processor = createWorkProcessor(properties, yieldSignal, memoryContext, page); return processor.yieldingIterator(); }
@Test public void testSelectNoneFilter() { PageProcessor pageProcessor = new PageProcessor(Optional.of(new SelectNoneFilter()), ImmutableList.of(createInputPageProjectionWithOutputs(0, BIGINT, 0))); Page inputPage = new Page(createLongSequenceBlock(0, 100)); LocalMemoryContext memoryContext = newSimpleAggregatedMemoryContext().newLocalMemoryContext(PageProcessor.class.getSimpleName()); Iterator<Optional<Page>> output = pageProcessor.process(SESSION.getSqlFunctionProperties(), new DriverYieldSignal(), memoryContext, inputPage); assertEquals(memoryContext.getBytes(), 0); List<Optional<Page>> outputPages = ImmutableList.copyOf(output); assertEquals(outputPages.size(), 0); }
@Override public List<FileEntriesLayer> createLayers() throws IOException { // Clear the exploded-artifact root first if (Files.exists(targetExplodedJarRoot)) { MoreFiles.deleteRecursively(targetExplodedJarRoot, RecursiveDeleteOption.ALLOW_INSECURE); } try (JarFile jarFile = new JarFile(jarPath.toFile())) { ZipUtil.unzip(jarPath, targetExplodedJarRoot, true); ZipEntry layerIndex = jarFile.getEntry(BOOT_INF + "/layers.idx"); if (layerIndex != null) { return createLayersForLayeredSpringBootJar(targetExplodedJarRoot); } Predicate<Path> isFile = Files::isRegularFile; // Non-snapshot layer Predicate<Path> isInBootInfLib = path -> path.startsWith(targetExplodedJarRoot.resolve(BOOT_INF).resolve("lib")); Predicate<Path> isSnapshot = path -> path.getFileName().toString().contains("SNAPSHOT"); Predicate<Path> isInBootInfLibAndIsNotSnapshot = isInBootInfLib.and(isSnapshot.negate()); Predicate<Path> nonSnapshotPredicate = isFile.and(isInBootInfLibAndIsNotSnapshot); FileEntriesLayer nonSnapshotLayer = ArtifactLayers.getDirectoryContentsAsLayer( ArtifactLayers.DEPENDENCIES, targetExplodedJarRoot, nonSnapshotPredicate, JarLayers.APP_ROOT); // Snapshot layer Predicate<Path> isInBootInfLibAndIsSnapshot = isInBootInfLib.and(isSnapshot); Predicate<Path> snapshotPredicate = isFile.and(isInBootInfLibAndIsSnapshot); FileEntriesLayer snapshotLayer = ArtifactLayers.getDirectoryContentsAsLayer( ArtifactLayers.SNAPSHOT_DEPENDENCIES, targetExplodedJarRoot, snapshotPredicate, JarLayers.APP_ROOT); // Spring-boot-loader layer. Predicate<Path> isLoader = path -> path.startsWith(targetExplodedJarRoot.resolve("org")); Predicate<Path> loaderPredicate = isFile.and(isLoader); FileEntriesLayer loaderLayer = ArtifactLayers.getDirectoryContentsAsLayer( "spring-boot-loader", targetExplodedJarRoot, loaderPredicate, JarLayers.APP_ROOT); // Classes layer. Predicate<Path> isClass = path -> path.getFileName().toString().endsWith(".class"); Predicate<Path> isInBootInfClasses = path -> path.startsWith(targetExplodedJarRoot.resolve(BOOT_INF).resolve("classes")); Predicate<Path> classesPredicate = isInBootInfClasses.and(isClass); FileEntriesLayer classesLayer = ArtifactLayers.getDirectoryContentsAsLayer( ArtifactLayers.CLASSES, targetExplodedJarRoot, classesPredicate, JarLayers.APP_ROOT); // Resources layer. Predicate<Path> isInMetaInf = path -> path.startsWith(targetExplodedJarRoot.resolve("META-INF")); Predicate<Path> isResource = isInMetaInf.or(isInBootInfClasses.and(isClass.negate())); Predicate<Path> resourcesPredicate = isFile.and(isResource); FileEntriesLayer resourcesLayer = ArtifactLayers.getDirectoryContentsAsLayer( ArtifactLayers.RESOURCES, targetExplodedJarRoot, resourcesPredicate, JarLayers.APP_ROOT); return Arrays.asList( nonSnapshotLayer, loaderLayer, snapshotLayer, resourcesLayer, classesLayer); } }
@Test public void testCreateLayers_nonLayered() throws IOException, URISyntaxException { Path springBootJar = Paths.get(Resources.getResource(SPRING_BOOT_NOT_LAYERED).toURI()); Path destDir = temporaryFolder.newFolder().toPath(); SpringBootExplodedProcessor springBootExplodedModeProcessor = new SpringBootExplodedProcessor(springBootJar, destDir, JAR_JAVA_VERSION); List<FileEntriesLayer> layers = springBootExplodedModeProcessor.createLayers(); assertThat(layers.size()).isEqualTo(5); FileEntriesLayer nonSnapshotLayer = layers.get(0); FileEntriesLayer loaderLayer = layers.get(1); FileEntriesLayer snapshotLayer = layers.get(2); FileEntriesLayer resourcesLayer = layers.get(3); FileEntriesLayer classesLayer = layers.get(4); assertThat(nonSnapshotLayer.getName()).isEqualTo("dependencies"); assertThat( nonSnapshotLayer.getEntries().stream() .map(FileEntry::getExtractionPath) .collect(Collectors.toList())) .containsExactly( AbsoluteUnixPath.get("/app/BOOT-INF/lib/dependency1.jar"), AbsoluteUnixPath.get("/app/BOOT-INF/lib/dependency2.jar")); assertThat(loaderLayer.getName()).isEqualTo("spring-boot-loader"); assertThat( loaderLayer.getEntries().stream() .map(FileEntry::getExtractionPath) .collect(Collectors.toList())) .containsExactly( AbsoluteUnixPath.get("/app/org/springframework/boot/loader/data/data1.class"), AbsoluteUnixPath.get("/app/org/springframework/boot/loader/launcher1.class")); assertThat(snapshotLayer.getName()).isEqualTo("snapshot dependencies"); assertThat(snapshotLayer.getEntries().get(0).getExtractionPath()) .isEqualTo(AbsoluteUnixPath.get("/app/BOOT-INF/lib/dependency3-SNAPSHOT.jar")); assertThat(resourcesLayer.getName()).isEqualTo("resources"); assertThat(resourcesLayer.getEntries().get(0).getExtractionPath()) .isEqualTo(AbsoluteUnixPath.get("/app/META-INF/MANIFEST.MF")); assertThat(classesLayer.getName()).isEqualTo("classes"); assertThat( classesLayer.getEntries().stream() .map(FileEntry::getExtractionPath) .collect(Collectors.toList())) .containsExactly( AbsoluteUnixPath.get("/app/BOOT-INF/classes/class1.class"), AbsoluteUnixPath.get("/app/BOOT-INF/classes/classDirectory/class2.class")); }
public CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> commitOffset( RequestContext context, OffsetCommitRequestData request ) throws ApiException { Group group = validateOffsetCommit(context, request); // In the old consumer group protocol, the offset commits maintain the session if // the group is in Stable or PreparingRebalance state. if (group.type() == Group.GroupType.CLASSIC) { ClassicGroup classicGroup = (ClassicGroup) group; if (classicGroup.isInState(ClassicGroupState.STABLE) || classicGroup.isInState(ClassicGroupState.PREPARING_REBALANCE)) { groupMetadataManager.rescheduleClassicGroupMemberHeartbeat( classicGroup, classicGroup.member(request.memberId()) ); } } final OffsetCommitResponseData response = new OffsetCommitResponseData(); final List<CoordinatorRecord> records = new ArrayList<>(); final long currentTimeMs = time.milliseconds(); final OptionalLong expireTimestampMs = expireTimestampMs(request.retentionTimeMs(), currentTimeMs); request.topics().forEach(topic -> { final OffsetCommitResponseTopic topicResponse = new OffsetCommitResponseTopic().setName(topic.name()); response.topics().add(topicResponse); topic.partitions().forEach(partition -> { if (isMetadataInvalid(partition.committedMetadata())) { topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code())); } else { log.debug("[GroupId {}] Committing offsets {} for partition {}-{} from member {} with leader epoch {}.", request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(), request.memberId(), partition.committedLeaderEpoch()); topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.NONE.code())); final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest( partition, currentTimeMs, expireTimestampMs ); records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord( request.groupId(), topic.name(), partition.partitionIndex(), offsetAndMetadata, metadataImage.features().metadataVersion() )); } }); }); if (!records.isEmpty()) { metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size()); } return new CoordinatorResult<>(records, response); }
@Test public void testConsumerGroupOffsetCommitFromAdminClient() { OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build(); // Create an empty group. context.groupMetadataManager.getOrMaybeCreatePersistedConsumerGroup( "foo", true ); CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> result = context.commitOffset( new OffsetCommitRequestData() .setGroupId("foo") .setTopics(Collections.singletonList( new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("bar") .setPartitions(Collections.singletonList( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(100L) )) )) ); assertEquals( new OffsetCommitResponseData() .setTopics(Collections.singletonList( new OffsetCommitResponseData.OffsetCommitResponseTopic() .setName("bar") .setPartitions(Collections.singletonList( new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code()) )) )), result.response() ); assertEquals( Collections.singletonList(GroupCoordinatorRecordHelpers.newOffsetCommitRecord( "foo", "bar", 0, new OffsetAndMetadata( 100L, OptionalInt.empty(), "", context.time.milliseconds(), OptionalLong.empty() ), MetadataImage.EMPTY.features().metadataVersion() )), result.records() ); }
public ClickHouseLogConfig getClickHouseLogConfig() { return Optional.ofNullable(clickHouseLogConfig).orElse(new ClickHouseLogConfig()); }
@Test public void testGetClickHouseLogConfig() { ClickHouseLogCollectConfig.ClickHouseLogConfig clickHouseLogConfig = clickHouseLogCollectConfig.getClickHouseLogConfig(); Assertions.assertEquals(clickHouseLogConfig, clickHouseLogConfig); }
public static SimpleTransform threshold(double min, double max) { return new SimpleTransform(Operation.threshold,min,max); }
@Test public void testThresholdAbove() { double min = Double.NEGATIVE_INFINITY; double max = 1.0; TransformationMap t = new TransformationMap(Collections.singletonList(SimpleTransform.threshold(min, max)),new HashMap<>()); testThresholding(t,min,max); }
@Override public boolean commitNeeded() { if (task.isActive()) { throw new UnsupportedOperationException("This task is read-only"); } return task.commitNeeded(); }
@Test public void shouldDelegateCommitNeededIfStandby() { final StandbyTask standbyTask = standbyTask(new TaskId(1, 0), mkSet(new TopicPartition("topic", 0))).build(); final ReadOnlyTask readOnlyTask = new ReadOnlyTask(standbyTask); readOnlyTask.commitNeeded(); verify(standbyTask).commitNeeded(); }
static int findSlot(Object object, int numElements) { // This performs a secondary hash using Knuth's multiplicative Fibonacci // hashing. Then, we choose some of the highest bits. The number of bits // we choose is based on the table size. If the size is 2, we need 1 bit; // if the size is 4, we need 2 bits, etc. int objectHashCode = object.hashCode(); int log2size = 32 - Integer.numberOfLeadingZeros(numElements); int shift = 65 - log2size; return (int) ((objectHashCode * -7046029254386353131L) >>> shift); }
@Test public void testFindSlot() { Random random = new Random(123); for (int i = 1; i <= 5; i++) { int numSlots = 2 << i; HashSet<Integer> slotsReturned = new HashSet<>(); while (slotsReturned.size() < numSlots) { int slot = BaseHashTable.findSlot(random.nextInt(), numSlots); assertTrue(slot >= 0); assertTrue(slot < numSlots); slotsReturned.add(slot); } } }
public static <V> SetOnceReference<V> ofNullable(final V value) { return new SetOnceReference<>(value); }
@Test public void testFromOfNullableWithNull() { checkUnsetReference(SetOnceReference.ofNullable(null)); }
@Override public void metricChange(final KafkaMetric metric) { if (!metric.metricName().name().equals("total-sst-files-size")) { return; } handleNewSstFilesSizeMetric( metric, metric.metricName().tags().getOrDefault(TASK_ID_TAG, ""), getQueryId(metric) ); }
@Test public void shouldCombineTaskMetricsToQueryMetricWithSharedRuntimeQueries() { // When: listener.metricChange(mockMetric( KAFKA_METRIC_GROUP, KAFKA_METRIC_NAME, BigInteger.valueOf(2), ImmutableMap.of("task-id", "CTAS_TEST_1__1_0", "thread-id", "THREAD_ID", "logical_cluster_id", "logical-id")) ); listener.metricChange(mockMetric( KAFKA_METRIC_GROUP, KAFKA_METRIC_NAME, BigInteger.valueOf(5), ImmutableMap.of("task-id", "CTAS_TEST_1__1_1", "thread-id", "THREAD_ID", "logical_cluster_id", "logical-id")) ); // Then: final Gauge<?> queryGauge = verifyAndGetRegisteredMetric(QUERY_STORAGE_METRIC, QUERY_TAGS); final Object queryValue = queryGauge.value(null, 0); final Map<String, String> task1 = ImmutableMap.of("logical_cluster_id", "logical-id", "query-id", "CTAS_TEST_1", "task-id", "CTAS_TEST_1__1_0"); final Gauge<?> taskGaugeOne = verifyAndGetRegisteredMetric(TASK_STORAGE_METRIC, task1); final Object taskValueOne = taskGaugeOne.value(null, 0); final Map<String, String> task2 = ImmutableMap.of("logical_cluster_id", "logical-id", "query-id", "CTAS_TEST_1", "task-id", "CTAS_TEST_1__1_1"); final Gauge<?> taskGaugeTwo = verifyAndGetRegisteredMetric(TASK_STORAGE_METRIC, task2); final Object taskValueTwo = taskGaugeTwo.value(null, 0); assertThat(taskValueOne, equalTo(BigInteger.valueOf(2))); assertThat(taskValueTwo, equalTo(BigInteger.valueOf(5))); assertThat(queryValue, equalTo(BigInteger.valueOf(7))); }
@InvokeOnHeader(Web3jConstants.ETH_GET_BLOCK_TRANSACTION_COUNT_BY_HASH) void ethGetBlockTransactionCountByHash(Message message) throws IOException { String blockHash = message.getHeader(Web3jConstants.BLOCK_HASH, configuration::getBlockHash, String.class); Request<?, EthGetBlockTransactionCountByHash> request = web3j.ethGetBlockTransactionCountByHash(blockHash); setRequestId(message, request); EthGetBlockTransactionCountByHash response = request.send(); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.getTransactionCount()); } }
@Test public void ethGetBlockTransactionCountByHashTest() throws Exception { EthGetBlockTransactionCountByHash response = Mockito.mock(EthGetBlockTransactionCountByHash.class); Mockito.when(mockWeb3j.ethGetBlockTransactionCountByHash(any())).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Mockito.when(response.getTransactionCount()).thenReturn(BigInteger.ONE); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_GET_BLOCK_TRANSACTION_COUNT_BY_HASH); template.send(exchange); BigInteger body = exchange.getIn().getBody(BigInteger.class); assertEquals(BigInteger.ONE, body); }
@Override public ResultSet getTables(final String catalog, final String schemaPattern, final String tableNamePattern, final String[] types) throws SQLException { return createDatabaseMetaDataResultSet(getDatabaseMetaData().getTables(getActualCatalog(catalog), getActualSchema(schemaPattern), getActualTableNamePattern(tableNamePattern), types)); }
@Test void assertGetTables() throws SQLException { when(databaseMetaData.getTables("test", null, "%" + TABLE_NAME + "%", null)).thenReturn(resultSet); assertThat(shardingSphereDatabaseMetaData.getTables("test", null, TABLE_NAME, null), instanceOf(DatabaseMetaDataResultSet.class)); }
public Map<String, NamespaceIsolationDataImpl> getPolicies() { return this.policies; }
@Test public void testDefaultConstructor() throws Exception { NamespaceIsolationPolicies policies = new NamespaceIsolationPolicies(); assertTrue(policies.getPolicies().isEmpty()); byte[] outJson = ObjectMapperFactory.create().writeValueAsBytes(policies.getPolicies()); assertEquals(new String(outJson), "{}"); }
public boolean isOk() { return Code.isOk(code.code); }
@Test void isOk() { Assertions.assertTrue(TriRpcStatus.OK.isOk()); Assertions.assertFalse(TriRpcStatus.NOT_FOUND.isOk()); }
public static boolean isUnclosedQuote(final String line) { // CHECKSTYLE_RULES.ON: CyclomaticComplexity int quoteStart = -1; for (int i = 0; i < line.length(); ++i) { if (quoteStart < 0 && isQuoteChar(line, i)) { quoteStart = i; } else if (quoteStart >= 0 && isTwoQuoteStart(line, i) && !isEscaped(line, i)) { // Together, two quotes are effectively an escaped quote and don't act as a quote character. // Skip the next quote char, since it's coupled with the first. i++; } else if (quoteStart >= 0 && isQuoteChar(line, i) && !isEscaped(line, i)) { quoteStart = -1; } } final int commentInd = line.indexOf(COMMENT); if (commentInd < 0) { return quoteStart >= 0; } else if (quoteStart < 0) { return false; } else { return commentInd > quoteStart; } }
@Test public void shouldNotFindUnclosedQuote_endsNonQuote() { // Given: final String line = "some line 'this is in a quote' more"; // Then: assertThat(UnclosedQuoteChecker.isUnclosedQuote(line), is(false)); }
@Override public KTable<K, VOut> aggregate(final Initializer<VOut> initializer, final Materialized<K, VOut, KeyValueStore<Bytes, byte[]>> materialized) { return aggregate(initializer, NamedInternal.empty(), materialized); }
@Test public void shouldNotHaveNullInitializerOnAggregateWitMaterialized() { assertThrows(NullPointerException.class, () -> cogroupedStream.aggregate(null, Materialized.as("store"))); }
@Override public ExecuteContext before(ExecuteContext context) { if (context.getObject() instanceof Builder) { Builder builder = (Builder) context.getObject(); Optional<Object> connector = ReflectUtils.getFieldValue(builder, "connector"); if (connector.isPresent() && connector.get() instanceof JettyClientHttpConnector) { // There is a bug in the Jetty Client Http Connector, and the retry filter cannot be injected, // otherwise it will be reported when it is retried // IllegalStateException: multiple subscribers not supported return context; } // Initialize init(); // Injected retries will no longer be injected Optional<Object> filters = ReflectUtils.getFieldValue(builder, "filters"); if (!filters.isPresent()) { context.skip(getRetryWebClient(builder)); return context; } List<ExchangeFilterFunction> list = (List<ExchangeFilterFunction>) filters.get(); for (ExchangeFilterFunction filterFunction : list) { if (filterFunction instanceof AbstractRetryExchangeFilterFunction) { return context; } } context.skip(getRetryWebClient(builder)); } return context; }
@Test public void testReactorClientHttpConnector() { // Normal Builder builder = WebClient.builder(); builder.clientConnector(new ReactorClientHttpConnector()); ExecuteContext context = ExecuteContext.forMemberMethod(builder, method, null, null, null); interceptor.before(context); Assert.assertTrue(context.isSkip()); WebClient client = (WebClient) context.getResult(); Optional<Object> filters = ReflectUtils.getFieldValue(client.mutate(), "filters"); Assert.assertTrue(filters.isPresent()); Assert.assertEquals(1, ((List<?>) filters.get()).size()); Assert.assertTrue(((List<?>) filters.get()).get(0) instanceof AbstractRetryExchangeFilterFunction); Assert.assertNull(ReflectUtils.getFieldValue(builder, "filters").orElse(null)); // It has already been injected builder = WebClient.builder(); context = ExecuteContext.forMemberMethod(builder, method, null, null, null); builder.filter(new RetryExchangeFilterFunction()); interceptor.before(context); Assert.assertFalse(context.isSkip()); }
@Override public boolean nukeExistingCluster() throws Exception { log.info("Nuking metadata of existing cluster, ledger root path: {}", ledgersRootPath); if (!store.exists(ledgersRootPath + "/" + INSTANCEID).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS)) { log.info("There is no existing cluster with ledgersRootPath: {}, so exiting nuke operation", ledgersRootPath); return true; } @Cleanup RegistrationClient registrationClient = new PulsarRegistrationClient(store, ledgersRootPath); Collection<BookieId> rwBookies = registrationClient.getWritableBookies() .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS).getValue(); if (rwBookies != null && !rwBookies.isEmpty()) { log.error("Bookies are still up and connected to this cluster, " + "stop all bookies before nuking the cluster"); return false; } Collection<BookieId> roBookies = registrationClient.getReadOnlyBookies() .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS).getValue(); if (roBookies != null && !roBookies.isEmpty()) { log.error("Readonly Bookies are still up and connected to this cluster, " + "stop all bookies before nuking the cluster"); return false; } LayoutManager layoutManager = new PulsarLayoutManager(store, ledgersRootPath); LedgerManagerFactory ledgerManagerFactory = new PulsarLedgerManagerFactory(); ledgerManagerFactory.initialize(conf, layoutManager, LegacyHierarchicalLedgerManagerFactory.CUR_VERSION); return ledgerManagerFactory.validateAndNukeExistingCluster(conf, layoutManager); }
@Test(dataProvider = "impl") public void testNukeNonExistingCluster(String provider, Supplier<String> urlSupplier) throws Exception { methodSetup(urlSupplier); assertClusterNotExists(); assertTrue(registrationManager.nukeExistingCluster()); assertClusterNotExists(); }
@Override public boolean supportsMigration() { return true; }
@Test void postgres_does_supportMigration() { assertThat(underTest.supportsMigration()).isTrue(); }
public Map<String, Object> getConfigurationByPluginType(final String pluginType) { return configurations.stream() .filter(config -> config.type().equalsIgnoreCase(pluginType)) .map(PluginConfiguration::values) .reduce(new HashMap<>(), (accumulator, map) -> { accumulator.putAll(map); return accumulator; }); }
@Test void shouldGetOrderedMergeConfigurationProperties() { // Given PluginConfigurations configurations = new PluginConfigurations(List.of( new PluginConfiguration(0, PLUGIN_TEST, Map.of( "prop1", "v1", "prop2", "v1", "prop3", "v1" )), new PluginConfiguration(2, PLUGIN_TEST, Map.of( "prop1", "v1", "prop2", "v2", "prop3", "v3" )), new PluginConfiguration(1, PLUGIN_TEST, Map.of( "prop1", "v2", "prop2", "v2", "prop3", "v2" )) )); // When Map<String, Object> result = configurations.getConfigurationByPluginType(PLUGIN_TEST); // Then Assertions.assertEquals(Map.of( "prop1", "v1", "prop2", "v2", "prop3", "v3" ), result); }
public T remove(String key) { return delegate.remove(key.toLowerCase()); }
@Test public void testRemove() throws Exception { String someKey = "someKey"; Object someValue = mock(Object.class); when(someMap.remove(someKey.toLowerCase())).thenReturn(someValue); assertEquals(someValue, caseInsensitiveMapWrapper.remove(someKey)); verify(someMap, times(1)).remove(someKey.toLowerCase()); }
public Value evalForValue(String exp) { return context.eval(JS, exp); }
@Test void testJavaFunctionFactory() { Value v = je.evalForValue("Java.type('com.intuit.karate.graal.StaticPojo').sayHelloFactory()"); assertFalse(v.isMetaObject()); assertTrue(v.isHostObject()); assertTrue(v.canExecute()); }
@EventListener @Async void onApplicationEvent(HaloDocumentRebuildRequestEvent event) { getSearchEngine() .doOnNext(SearchEngine::deleteAll) .flatMap(searchEngine -> extensionGetter.getExtensions(HaloDocumentsProvider.class) .flatMap(HaloDocumentsProvider::fetchAll) .buffer(this.bufferSize) .doOnNext(searchEngine::addOrUpdate) .then()) .blockOptional(Duration.ofMinutes(1)); }
@Test void shouldAddDocsWhenReceivingAddRequestEvent() { var searchEngine = mock(SearchEngine.class); when(searchEngine.available()).thenReturn(true); when(extensionGetter.getEnabledExtension(SearchEngine.class)) .thenReturn(Mono.just(searchEngine)); var docs = List.of(new HaloDocument()); listener.onApplicationEvent(new HaloDocumentAddRequestEvent(this, docs)); verify(searchEngine).addOrUpdate(docs); }
@ConstantFunction.List(list = { @ConstantFunction(name = "subtract", argTypes = {DECIMALV2, DECIMALV2}, returnType = DECIMALV2), @ConstantFunction(name = "subtract", argTypes = {DECIMAL32, DECIMAL32}, returnType = DECIMAL32), @ConstantFunction(name = "subtract", argTypes = {DECIMAL64, DECIMAL64}, returnType = DECIMAL64), @ConstantFunction(name = "subtract", argTypes = {DECIMAL128, DECIMAL128}, returnType = DECIMAL128) }) public static ConstantOperator subtractDecimal(ConstantOperator first, ConstantOperator second) { return createDecimalConstant(first.getDecimal().subtract(second.getDecimal())); }
@Test public void subtractDecimal() { assertEquals("0", ScalarOperatorFunctions.subtractDecimal(O_DECIMAL_100, O_DECIMAL_100).getDecimal().toString()); assertEquals("0", ScalarOperatorFunctions.subtractDecimal(O_DECIMAL32P7S2_100, O_DECIMAL32P7S2_100).getDecimal() .toString()); assertEquals("0", ScalarOperatorFunctions.subtractDecimal(O_DECIMAL32P9S0_100, O_DECIMAL32P9S0_100).getDecimal() .toString()); assertEquals("0", ScalarOperatorFunctions.subtractDecimal(O_DECIMAL64P15S10_100, O_DECIMAL64P15S10_100).getDecimal() .toString()); assertEquals("0", ScalarOperatorFunctions.subtractDecimal(O_DECIMAL64P18S15_100, O_DECIMAL64P18S15_100).getDecimal() .toString()); assertEquals("0", ScalarOperatorFunctions.subtractDecimal(O_DECIMAL128P30S2_100, O_DECIMAL128P30S2_100).getDecimal() .toString()); assertEquals("0", ScalarOperatorFunctions.subtractDecimal(O_DECIMAL128P38S20_100, O_DECIMAL128P38S20_100).getDecimal() .toString()); assertTrue(ScalarOperatorFunctions.subtractDecimal(O_DECIMAL128P38S20_100, O_DECIMAL128P38S20_100).getType() .isDecimalV3()); }
public UnitExtension getUnitExtension(String extensionName) { return null; }
@Test void testConstGetSet() { final var name = "testName"; final var unit = new Unit(name); assertEquals(name, unit.getName()); final var newName = "newName"; unit.setName(newName); assertEquals(newName, unit.getName()); assertNull(unit.getUnitExtension("")); assertNull(unit.getUnitExtension("SoldierExtension")); assertNull(unit.getUnitExtension("SergeantExtension")); assertNull(unit.getUnitExtension("CommanderExtension")); }
public static <T> TypeInformation<T> of(Class<T> typeClass) { try { return TypeExtractor.createTypeInfo(typeClass); } catch (InvalidTypesException e) { throw new FlinkRuntimeException( "Cannot extract TypeInformation from Class alone, because generic parameters are missing. " + "Please use TypeInformation.of(TypeHint) instead, or another equivalent method in the API that " + "accepts a TypeHint instead of a Class. " + "For example for a Tuple2<Long, String> pass a 'new TypeHint<Tuple2<Long, String>>(){}'."); } }
@Test void testOfTypeHint() { assertThat(TypeInformation.of(String.class)).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO); assertThat(TypeInformation.of(new TypeHint<String>() {})) .isEqualTo(BasicTypeInfo.STRING_TYPE_INFO); TypeInformation<Tuple3<String, Double, Boolean>> tupleInfo = new TupleTypeInfo<>( BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.DOUBLE_TYPE_INFO, BasicTypeInfo.BOOLEAN_TYPE_INFO); assertThat(TypeInformation.of(new TypeHint<Tuple3<String, Double, Boolean>>() {})) .isEqualTo(tupleInfo); }
@Override public Page<ConfigInfoWrapper> findAllConfigInfoFragment(final long lastMaxId, final int pageSize, boolean needContent) { ConfigInfoMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO); MapperContext context = new MapperContext(0, pageSize); context.putContextParameter(ContextConstant.NEED_CONTENT, String.valueOf(needContent)); context.putWhereParameter(FieldConstant.ID, lastMaxId); MapperResult select = configInfoMapper.findAllConfigInfoFragment(context); PaginationHelper<ConfigInfoWrapper> helper = createPaginationHelper(); return helper.fetchPageLimit(select.getSql(), select.getParamList().toArray(), 1, pageSize, CONFIG_INFO_WRAPPER_ROW_MAPPER); }
@Test void testFindAllConfigInfoFragment() { //mock page list List<ConfigInfoWrapper> mockConfigs = new ArrayList<>(); mockConfigs.add(createMockConfigInfoWrapper(0)); mockConfigs.add(createMockConfigInfoWrapper(1)); mockConfigs.add(createMockConfigInfoWrapper(2)); long lastId = 10111L; when(databaseOperate.queryMany(anyString(), eq(new Object[] {lastId}), eq(CONFIG_INFO_WRAPPER_ROW_MAPPER))).thenReturn(mockConfigs); int pageSize = 100; //execute return mock obj Page<ConfigInfoWrapper> returnConfigPage = embeddedConfigInfoPersistService.findAllConfigInfoFragment(lastId, pageSize, true); //expect check assertEquals(mockConfigs, returnConfigPage.getPageItems()); }
public CardinalityEstimatorConfig setBackupCount(int backupCount) { this.backupCount = checkBackupCount(backupCount, asyncBackupCount); return this; }
@Test(expected = IllegalArgumentException.class) public void testSetBackupCount_withNegativeValue() { config.setBackupCount(-1); }
static InjectorFunction injectorFunction(InjectorFunction existing, InjectorFunction... update) { if (update == null) throw new NullPointerException("injectorFunctions == null"); LinkedHashSet<InjectorFunction> injectorFunctionSet = new LinkedHashSet<InjectorFunction>(Arrays.asList(update)); if (injectorFunctionSet.contains(null)) { throw new NullPointerException("injectorFunction == null"); } injectorFunctionSet.remove(InjectorFunction.NOOP); if (injectorFunctionSet.isEmpty()) return existing; if (injectorFunctionSet.size() == 1) return injectorFunctionSet.iterator().next(); return new CompositeInjectorFunction(injectorFunctionSet.toArray(new InjectorFunction[0])); }
@Test void injectorFunction_composite() { InjectorFunction existing = mock(InjectorFunction.class); CompositeInjectorFunction injectorFunction = (CompositeInjectorFunction) injectorFunction(existing, two, three); assertThat(injectorFunction.injectorFunctions) .containsExactly(two, three); injectorFunction.inject(setter, context, notRequest); assertThat(twoCount.getAndSet(0)).isOne(); assertThat(threeCount.getAndSet(0)).isOne(); }
public static ConfiguredKsqlPlan of( final KsqlPlan plan, final SessionConfig config ) { return new ConfiguredKsqlPlan(plan, config); }
@SuppressWarnings("UnstableApiUsage") @Test public void testEquality() { new EqualsTester() .addEqualityGroup( ConfiguredKsqlPlan.of(plan, config), ConfiguredKsqlPlan.of(plan, config) ) .addEqualityGroup(ConfiguredKsqlPlan.of(plan2, config2)) .testEquals(); }
@Override public String doSharding(final Collection<String> availableTargetNames, final PreciseShardingValue<Comparable<?>> shardingValue) { ShardingSpherePreconditions.checkNotNull(shardingValue.getValue(), NullShardingValueException::new); String columnName = shardingValue.getColumnName(); ShardingSpherePreconditions.checkState(algorithmExpression.contains(columnName), () -> new MismatchedInlineShardingAlgorithmExpressionAndColumnException(algorithmExpression, columnName)); try { return InlineExpressionParserFactory.newInstance(algorithmExpression).evaluateWithArgs(Collections.singletonMap(columnName, shardingValue.getValue())); } catch (final MissingMethodException ignored) { throw new MismatchedInlineShardingAlgorithmExpressionAndColumnException(algorithmExpression, columnName); } }
@Test void assertDoShardingWithNonExistNodes() { List<String> availableTargetNames = Arrays.asList("t_order_0", "t_order_1"); assertThat(inlineShardingAlgorithm.doSharding(availableTargetNames, new PreciseShardingValue<>("t_order", "order_id", DATA_NODE_INFO, 0)), is("t_order_0")); assertThat(inlineShardingAlgorithmWithSimplified.doSharding(availableTargetNames, new PreciseShardingValue<>("t_order", "order_id", DATA_NODE_INFO, 0)), is("t_order_0")); }
@Override public Collection<DatabasePacket> execute() { failedIfContainsMultiStatements(); MetaDataContexts metaDataContexts = ProxyContext.getInstance().getContextManager().getMetaDataContexts(); SQLParserRule sqlParserRule = metaDataContexts.getMetaData().getGlobalRuleMetaData().getSingleRule(SQLParserRule.class); DatabaseType databaseType = TypedSPILoader.getService(DatabaseType.class, "MySQL"); SQLStatement sqlStatement = sqlParserRule.getSQLParserEngine(databaseType).parse(packet.getSQL(), true); if (!MySQLComStmtPrepareChecker.isAllowedStatement(sqlStatement)) { throw new UnsupportedPreparedStatementException(); } SQLStatementContext sqlStatementContext = new SQLBindEngine(ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData(), connectionSession.getCurrentDatabaseName(), packet.getHintValueContext()).bind(sqlStatement, Collections.emptyList()); int statementId = MySQLStatementIdGenerator.getInstance().nextStatementId(connectionSession.getConnectionId()); MySQLServerPreparedStatement serverPreparedStatement = new MySQLServerPreparedStatement(packet.getSQL(), sqlStatementContext, packet.getHintValueContext(), new CopyOnWriteArrayList<>()); connectionSession.getServerPreparedStatementRegistry().addPreparedStatement(statementId, serverPreparedStatement); return createPackets(sqlStatementContext, statementId, serverPreparedStatement); }
@Test void assertPrepareInsertStatement() { String sql = "insert into user (id, name, age) values (1, ?, ?), (?, 'bar', ?)"; when(packet.getSQL()).thenReturn(sql); when(packet.getHintValueContext()).thenReturn(new HintValueContext()); int connectionId = 2; when(connectionSession.getConnectionId()).thenReturn(connectionId); when(connectionSession.getCurrentDatabaseName()).thenReturn("foo_db"); MySQLStatementIdGenerator.getInstance().registerConnection(connectionId); ContextManager contextManager = mockContextManager(); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); Iterator<DatabasePacket> actualIterator = new MySQLComStmtPrepareExecutor(packet, connectionSession).execute().iterator(); assertThat(actualIterator.next(), instanceOf(MySQLComStmtPrepareOKPacket.class)); assertThat(actualIterator.next(), instanceOf(MySQLColumnDefinition41Packet.class)); DatabasePacket firstAgeColumnDefinitionPacket = actualIterator.next(); assertThat(firstAgeColumnDefinitionPacket, instanceOf(MySQLColumnDefinition41Packet.class)); assertThat(getColumnDefinitionFlag((MySQLColumnDefinition41Packet) firstAgeColumnDefinitionPacket), is(MySQLColumnDefinitionFlag.UNSIGNED.getValue())); DatabasePacket idColumnDefinitionPacket = actualIterator.next(); assertThat(idColumnDefinitionPacket, instanceOf(MySQLColumnDefinition41Packet.class)); assertThat(getColumnDefinitionFlag((MySQLColumnDefinition41Packet) idColumnDefinitionPacket), is(MySQLColumnDefinitionFlag.PRIMARY_KEY.getValue() | MySQLColumnDefinitionFlag.UNSIGNED.getValue())); DatabasePacket secondAgeColumnDefinitionPacket = actualIterator.next(); assertThat(secondAgeColumnDefinitionPacket, instanceOf(MySQLColumnDefinition41Packet.class)); assertThat(getColumnDefinitionFlag((MySQLColumnDefinition41Packet) secondAgeColumnDefinitionPacket), is(MySQLColumnDefinitionFlag.UNSIGNED.getValue())); assertThat(actualIterator.next(), instanceOf(MySQLEofPacket.class)); assertFalse(actualIterator.hasNext()); MySQLServerPreparedStatement actualPreparedStatement = connectionSession.getServerPreparedStatementRegistry().getPreparedStatement(1); assertThat(actualPreparedStatement.getSql(), is(sql)); assertThat(actualPreparedStatement.getSqlStatementContext(), instanceOf(InsertStatementContext.class)); assertThat(actualPreparedStatement.getSqlStatementContext().getSqlStatement(), instanceOf(MySQLInsertStatement.class)); MySQLStatementIdGenerator.getInstance().unregisterConnection(connectionId); }
static FEELFnResult<Boolean> matchFunctionWithFlags(String input, String pattern, String flags) { log.debug("Input: {} , Pattern: {}, Flags: {}", input, pattern, flags); if ( input == null ) { throw new InvalidParameterException("input"); } if ( pattern == null ) { throw new InvalidParameterException("pattern"); } final String flagsString; if (flags != null && !flags.isEmpty()) { checkFlags(flags); if(!flags.contains("U")){ flags += "U"; } flagsString = String.format("(?%s)", flags); } else { flagsString = ""; } log.debug("flagsString: {}", flagsString); String stringToBeMatched = flagsString + pattern; log.debug("stringToBeMatched: {}", stringToBeMatched); Pattern p=Pattern.compile(stringToBeMatched); Matcher m = p.matcher( input ); boolean matchFound=m.find(); log.debug("matchFound: {}", matchFound); return FEELFnResult.ofResult(matchFound); }
@Test void invokeWithoutFlagsNotMatch() { FunctionTestUtil.assertResult(MatchesFunction.matchFunctionWithFlags("test", "testt",null), false); FunctionTestUtil.assertResult(MatchesFunction.matchFunctionWithFlags("foobar", "^fo*bb",null), false); FunctionTestUtil.assertResult(MatchesFunction.matchFunctionWithFlags("fo\nbar", "fo.bar",null), false); FunctionTestUtil.assertResult(MatchesFunction.matchFunctionWithFlags("h", "(.)\3",null), false); FunctionTestUtil.assertResult(MatchesFunction.matchFunctionWithFlags("h", "(.)\2",null), false); FunctionTestUtil.assertResult(MatchesFunction.matchFunctionWithFlags("input", "\3",null), false); FunctionTestUtil.assertResult(MatchesFunction.matchFunctionWithFlags("fo\nbar", "(?iU)(?iU)(ab)[|cd]",null), false); FunctionTestUtil.assertResult(MatchesFunction.matchFunctionWithFlags("fo\nbar", "(?x)(?i)hello world","i"), false); FunctionTestUtil.assertResult(MatchesFunction.matchFunctionWithFlags("fo\nbar", "(?xi)hello world",null), false); }
@SuppressWarnings("MethodLength") static void dissectControlRequest( final ArchiveEventCode eventCode, final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder); HEADER_DECODER.wrap(buffer, offset + encodedLength); encodedLength += MessageHeaderDecoder.ENCODED_LENGTH; switch (eventCode) { case CMD_IN_CONNECT: CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendConnect(builder); break; case CMD_IN_CLOSE_SESSION: CLOSE_SESSION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendCloseSession(builder); break; case CMD_IN_START_RECORDING: START_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording(builder); break; case CMD_IN_STOP_RECORDING: STOP_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecording(builder); break; case CMD_IN_REPLAY: REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplay(builder); break; case CMD_IN_STOP_REPLAY: STOP_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplay(builder); break; case CMD_IN_LIST_RECORDINGS: LIST_RECORDINGS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordings(builder); break; case CMD_IN_LIST_RECORDINGS_FOR_URI: LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingsForUri(builder); break; case CMD_IN_LIST_RECORDING: LIST_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecording(builder); break; case CMD_IN_EXTEND_RECORDING: EXTEND_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording(builder); break; case CMD_IN_RECORDING_POSITION: RECORDING_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendRecordingPosition(builder); break; case CMD_IN_TRUNCATE_RECORDING: TRUNCATE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTruncateRecording(builder); break; case CMD_IN_STOP_RECORDING_SUBSCRIPTION: STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingSubscription(builder); break; case CMD_IN_STOP_POSITION: STOP_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopPosition(builder); break; case CMD_IN_FIND_LAST_MATCHING_RECORD: FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendFindLastMatchingRecord(builder); break; case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS: LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingSubscriptions(builder); break; case CMD_IN_START_BOUNDED_REPLAY: BOUNDED_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartBoundedReplay(builder); break; case CMD_IN_STOP_ALL_REPLAYS: STOP_ALL_REPLAYS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopAllReplays(builder); break; case CMD_IN_REPLICATE: REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate(builder); break; case CMD_IN_STOP_REPLICATION: STOP_REPLICATION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplication(builder); break; case CMD_IN_START_POSITION: START_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartPosition(builder); break; case CMD_IN_DETACH_SEGMENTS: DETACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDetachSegments(builder); break; case CMD_IN_DELETE_DETACHED_SEGMENTS: DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDeleteDetachedSegments(builder); break; case CMD_IN_PURGE_SEGMENTS: PURGE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeSegments(builder); break; case CMD_IN_ATTACH_SEGMENTS: ATTACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAttachSegments(builder); break; case CMD_IN_MIGRATE_SEGMENTS: MIGRATE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendMigrateSegments(builder); break; case CMD_IN_AUTH_CONNECT: AUTH_CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAuthConnect(builder); break; case CMD_IN_KEEP_ALIVE: KEEP_ALIVE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendKeepAlive(builder); break; case CMD_IN_TAGGED_REPLICATE: TAGGED_REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTaggedReplicate(builder); break; case CMD_IN_START_RECORDING2: START_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording2(builder); break; case CMD_IN_EXTEND_RECORDING2: EXTEND_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording2(builder); break; case CMD_IN_STOP_RECORDING_BY_IDENTITY: STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingByIdentity(builder); break; case CMD_IN_PURGE_RECORDING: PURGE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeRecording(builder); break; case CMD_IN_REPLICATE2: REPLICATE_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate2(builder); break; case CMD_IN_REQUEST_REPLAY_TOKEN: REPLAY_TOKEN_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplayToken(builder); break; default: builder.append(": unknown command"); } }
@Test void controlRequestKeepAlive() { internalEncodeLogHeader(buffer, 0, 3, 6, () -> 5_500_000_000L); final KeepAliveRequestEncoder requestEncoder = new KeepAliveRequestEncoder(); requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder) .controlSessionId(31) .correlationId(119); dissectControlRequest(CMD_IN_KEEP_ALIVE, buffer, 0, builder); assertEquals("[5.500000000] " + CONTEXT + ": " + CMD_IN_KEEP_ALIVE.name() + " [3/6]:" + " controlSessionId=31" + " correlationId=119", builder.toString()); }
public void setExchange(final ServerWebExchange exchange) { this.exchange = exchange; }
@Test public void testSetExchange() throws NoSuchFieldException, IllegalAccessException { loggingServerHttpResponse.setExchange(exchange); Field field = loggingServerHttpResponse.getClass().getDeclaredField("exchange"); field.setAccessible(true); Assertions.assertEquals(field.get(loggingServerHttpResponse), exchange); }
public static String getType(String fileStreamHexHead) { if(StrUtil.isBlank(fileStreamHexHead)){ return null; } if (MapUtil.isNotEmpty(FILE_TYPE_MAP)) { for (final Entry<String, String> fileTypeEntry : FILE_TYPE_MAP.entrySet()) { if (StrUtil.startWithIgnoreCase(fileStreamHexHead, fileTypeEntry.getKey())) { return fileTypeEntry.getValue(); } } } byte[] bytes = HexUtil.decodeHex(fileStreamHexHead); return FileMagicNumber.getMagicNumber(bytes).getExtension(); }
@Test @Disabled public void docTest() { final File file = FileUtil.file("f:/test/test.doc"); final String type = FileTypeUtil.getType(file); Console.log(type); }