focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public void setFatigue(Fatigue fatigue) { giant.setFatigue(fatigue); }
@Test void testSetFatigue() { final var model = new GiantModel("giant1", Health.HEALTHY, Fatigue.ALERT, Nourishment.SATURATED); Action action = new Action(model); assertEquals(Fatigue.ALERT, model.getFatigue()); var messageFormat = "Giant giant1, The giant looks healthy, %s and saturated."; for (final var fatigue : Fatigue.values()) { action.setFatigue(fatigue); assertEquals(fatigue, model.getFatigue()); assertEquals(String.format(messageFormat, fatigue), model.toString()); } }
public static Table resolveCalciteTable(SchemaPlus schemaPlus, List<String> tablePath) { Schema subSchema = schemaPlus; // subSchema.getSubschema() for all except last for (int i = 0; i < tablePath.size() - 1; i++) { subSchema = subSchema.getSubSchema(tablePath.get(i)); if (subSchema == null) { throw new IllegalStateException( String.format( "While resolving table path %s, no sub-schema found for component %s (\"%s\")", tablePath, i, tablePath.get(i))); } } // for the final one call getTable() return subSchema.getTable(Iterables.getLast(tablePath)); }
@Test public void testMissingTableInSubschema() { String subSchema = "fake_schema"; String tableName = "fake_table"; when(mockSchemaPlus.getSubSchema(subSchema)).thenReturn(innerSchemaPlus); when(innerSchemaPlus.getTable(tableName)).thenReturn(null); Table table = TableResolution.resolveCalciteTable(mockSchemaPlus, ImmutableList.of(subSchema, tableName)); assertThat(table, Matchers.nullValue()); }
@Subscribe public void onChatMessage(ChatMessage event) { if (event.getType() != ChatMessageType.SPAM && event.getType() != ChatMessageType.GAMEMESSAGE && event.getType() != ChatMessageType.MESBOX) { return; } final var msg = event.getMessage(); if (WOOD_CUT_PATTERN.matcher(msg).matches()) { if (session == null) { session = new WoodcuttingSession(); } session.setLastChopping(); session.incrementLogsCut(); } var matcher = ANIMA_BARK_PATTERN.matcher(msg); if (matcher.matches()) { if (session == null) { session = new WoodcuttingSession(); } session.setLastChopping(); int num = Integer.parseInt(matcher.group(1)); session.incrementBark(num); } if (msg.contains("A bird's nest falls out of the tree")) { if (clueTierSpawned == null || clueTierSpawned.ordinal() >= config.clueNestNotifyTier().ordinal()) { notifier.notify(config.showNestNotification(), "A bird nest has spawned!"); } // Clear the clue tier that has previously spawned clueTierSpawned = null; } if (msg.startsWith("The sapling seems to love")) { int ingredientNum = msg.contains("first") ? 1 : (msg.contains("second") ? 2 : (msg.contains("third") ? 3 : -1)); if (ingredientNum == -1) { log.debug("unable to find ingredient index from message: {}", msg); return; } GameObject ingredientObj = saplingIngredients.stream() .filter(obj -> msg.contains(client.getObjectDefinition(obj.getId()).getName().toLowerCase())) .findAny() .orElse(null); if (ingredientObj == null) { log.debug("unable to find ingredient from message: {}", msg); return; } saplingOrder[ingredientNum - 1] = ingredientObj; } if (msg.equals("There are no open, unpollinated flowers on this bush yet.") || msg.equals("The flowers on this bush have not yet opened enough to harvest pollen.") || msg.equals("<col=06600c>The bush is already fruiting and won't benefit from <col=06600c>any more pollen.</col>")) { if (activeFlowers.contains(lastInteractFlower)) { log.debug("Flowers reset"); activeFlowers.clear(); } } }
@Test public void testMushrooms() { ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.SPAM, "", "You get some mushrooms.", "", 0); woodcuttingPlugin.onChatMessage(chatMessage); assertNotNull(woodcuttingPlugin.getSession()); }
CheckpointStatsCounts createSnapshot() { return new CheckpointStatsCounts( numRestoredCheckpoints, numTotalCheckpoints, numInProgressCheckpoints, numCompletedCheckpoints, numFailedCheckpoints); }
@Test void testCreateSnapshot() { CheckpointStatsCounts counts = new CheckpointStatsCounts(); counts.incrementRestoredCheckpoints(); counts.incrementRestoredCheckpoints(); counts.incrementRestoredCheckpoints(); counts.incrementInProgressCheckpoints(); counts.incrementCompletedCheckpoints(); counts.incrementInProgressCheckpoints(); counts.incrementCompletedCheckpoints(); counts.incrementInProgressCheckpoints(); counts.incrementCompletedCheckpoints(); counts.incrementInProgressCheckpoints(); counts.incrementCompletedCheckpoints(); counts.incrementInProgressCheckpoints(); counts.incrementFailedCheckpoints(); long restored = counts.getNumberOfRestoredCheckpoints(); long total = counts.getTotalNumberOfCheckpoints(); long inProgress = counts.getNumberOfInProgressCheckpoints(); long completed = counts.getNumberOfCompletedCheckpoints(); long failed = counts.getNumberOfFailedCheckpoints(); CheckpointStatsCounts snapshot = counts.createSnapshot(); assertThat(snapshot.getNumberOfRestoredCheckpoints()).isEqualTo(restored); assertThat(snapshot.getTotalNumberOfCheckpoints()).isEqualTo(total); assertThat(snapshot.getNumberOfInProgressCheckpoints()).isEqualTo(inProgress); assertThat(snapshot.getNumberOfCompletedCheckpoints()).isEqualTo(completed); assertThat(snapshot.getNumberOfFailedCheckpoints()).isEqualTo(failed); // Update the original counts.incrementRestoredCheckpoints(); counts.incrementRestoredCheckpoints(); counts.incrementInProgressCheckpoints(); counts.incrementCompletedCheckpoints(); counts.incrementInProgressCheckpoints(); counts.incrementFailedCheckpoints(); assertThat(snapshot.getNumberOfRestoredCheckpoints()).isEqualTo(restored); assertThat(snapshot.getTotalNumberOfCheckpoints()).isEqualTo(total); assertThat(snapshot.getNumberOfInProgressCheckpoints()).isEqualTo(inProgress); assertThat(snapshot.getNumberOfCompletedCheckpoints()).isEqualTo(completed); assertThat(snapshot.getNumberOfFailedCheckpoints()).isEqualTo(failed); }
public static String brokerVIPChannel(final boolean isChange, final String brokerAddr) { if (isChange) { int split = brokerAddr.lastIndexOf(":"); String ip = brokerAddr.substring(0, split); String port = brokerAddr.substring(split + 1); String brokerAddrNew = ip + ":" + (Integer.parseInt(port) - 2); return brokerAddrNew; } else { return brokerAddr; } }
@Test public void testBrokerVIPChannel() { assertThat(MixAll.brokerVIPChannel(true, "127.0.0.1:10911")).isEqualTo("127.0.0.1:10909"); }
public boolean accept(DefaultIssue issue, Component component) { if (component.getType() != FILE || (exclusionPatterns.isEmpty() && inclusionPatterns.isEmpty())) { return true; } if (isExclude(issue, component)) { return false; } return isInclude(issue, component); }
@Test public void include_many_rules() { IssueFilter underTest = newIssueFilter(newSettings( Collections.emptyList(), asList("xoo:x1", "**/xoo/File1*", "xoo:x2", "**/xoo/File1*"))); assertThat(underTest.accept(ISSUE_1, COMPONENT_1)).isTrue(); assertThat(underTest.accept(ISSUE_1, COMPONENT_2)).isFalse(); assertThat(underTest.accept(ISSUE_2, COMPONENT_1)).isTrue(); assertThat(underTest.accept(ISSUE_2, COMPONENT_2)).isFalse(); }
public static Builder builder() { return new Builder(); }
@Test public void testEqualsAndHashCode() { AppAuthData appAuthData1 = AppAuthData.builder().appKey("appKey").appSecret("appSecret").enabled(true) .open(true).pathDataList(new ArrayList<>(0)).paramDataList(new ArrayList<>(0)) .build(); AppAuthData appAuthData2 = AppAuthData.builder().appKey("appKey").appSecret("appSecret").enabled(true) .open(true).pathDataList(new ArrayList<>(0)).paramDataList(new ArrayList<>(0)) .build(); Set<AppAuthData> set = new HashSet<>(); set.add(appAuthData1); set.add(appAuthData2); assertThat(set, hasSize(1)); }
@Override @Nullable public byte[] readByteArray(@Nonnull String fieldName) throws IOException { return readIncompatibleField(fieldName, BYTE_ARRAY, super::readByteArray); }
@Test(expected = IncompatibleClassChangeError.class) public void testReadDoubleArray_IncompatibleClass() throws Exception { reader.readByteArray("byte"); }
public Status resetIdsForRestore(GlobalStateMgr globalStateMgr, Database db, int restoreReplicationNum, MvRestoreContext mvRestoreContext) { // copy an origin index id to name map Map<Long, String> origIdxIdToName = Maps.newHashMap(); for (Map.Entry<String, Long> entry : indexNameToId.entrySet()) { origIdxIdToName.put(entry.getValue(), entry.getKey()); } // reset table id setId(globalStateMgr.getNextId()); // reset all 'indexIdToXXX' map for (Map.Entry<Long, String> entry : origIdxIdToName.entrySet()) { long newIdxId = globalStateMgr.getNextId(); if (entry.getValue().equals(name)) { // base index baseIndexId = newIdxId; } indexIdToMeta.put(newIdxId, indexIdToMeta.remove(entry.getKey())); indexIdToMeta.get(newIdxId).setIndexIdForRestore(newIdxId); indexNameToId.put(entry.getValue(), newIdxId); } // generate a partition name to id map Map<String, Long> origPartNameToId = Maps.newHashMap(); for (Partition partition : idToPartition.values()) { origPartNameToId.put(partition.getName(), partition.getId()); LOG.info("partition id {} sub partition {}", partition.getId(), partition.getSubPartitions()); } // reset partition info and idToPartition map if (partitionInfo.isRangePartition()) { RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo; for (Map.Entry<String, Long> entry : origPartNameToId.entrySet()) { long newPartId = globalStateMgr.getNextId(); rangePartitionInfo.idToDataProperty.put(newPartId, rangePartitionInfo.idToDataProperty.remove(entry.getValue())); rangePartitionInfo.idToReplicationNum.remove(entry.getValue()); rangePartitionInfo.idToReplicationNum.put(newPartId, (short) restoreReplicationNum); rangePartitionInfo.getIdToRange(false).put(newPartId, rangePartitionInfo.getIdToRange(false).remove(entry.getValue())); rangePartitionInfo.idToInMemory .put(newPartId, rangePartitionInfo.idToInMemory.remove(entry.getValue())); idToPartition.get(entry.getValue()).getSubPartitions().forEach(physicalPartition -> { physicalPartitionIdToPartitionId.remove(physicalPartition.getId()); physicalPartitionNameToPartitionId.remove(physicalPartition.getName()); }); idToPartition.put(newPartId, idToPartition.remove(entry.getValue())); Partition partition = idToPartition.get(newPartId); partition.setIdForRestore(newPartId); partition.getSubPartitions().forEach(physicalPartition -> { if (physicalPartition.getId() != newPartId) { partition.removeSubPartition(physicalPartition.getId()); physicalPartition.setIdForRestore(globalStateMgr.getNextId()); physicalPartition.setParentId(newPartId); partition.addSubPartition(physicalPartition); } physicalPartitionIdToPartitionId.put(physicalPartition.getId(), newPartId); physicalPartitionNameToPartitionId.put(physicalPartition.getName(), newPartId); }); } } else { // Single partitioned long newPartId = globalStateMgr.getNextId(); for (Map.Entry<String, Long> entry : origPartNameToId.entrySet()) { partitionInfo.idToDataProperty.put(newPartId, partitionInfo.idToDataProperty.remove(entry.getValue())); partitionInfo.idToReplicationNum.remove(entry.getValue()); partitionInfo.idToReplicationNum.put(newPartId, (short) restoreReplicationNum); partitionInfo.idToInMemory.put(newPartId, partitionInfo.idToInMemory.remove(entry.getValue())); idToPartition.get(entry.getValue()).getSubPartitions().forEach(physicalPartition -> { physicalPartitionIdToPartitionId.remove(physicalPartition.getId()); physicalPartitionNameToPartitionId.remove(physicalPartition.getName()); }); idToPartition.put(newPartId, idToPartition.remove(entry.getValue())); Partition partition = idToPartition.get(newPartId); partition.setIdForRestore(newPartId); partition.getSubPartitions().forEach(physicalPartition -> { if (physicalPartition.getId() != newPartId) { partition.removeSubPartition(physicalPartition.getId()); physicalPartition.setIdForRestore(globalStateMgr.getNextId()); physicalPartition.setParentId(newPartId); partition.addSubPartition(physicalPartition); } physicalPartitionIdToPartitionId.put(physicalPartition.getId(), newPartId); physicalPartitionNameToPartitionId.put(physicalPartition.getName(), newPartId); }); } } // reset replication number for olaptable setReplicationNum((short) restoreReplicationNum); // for each partition, reset rollup index map for (Map.Entry<Long, Partition> entry : idToPartition.entrySet()) { Partition partition = entry.getValue(); for (PhysicalPartition physicalPartition : partition.getSubPartitions()) { for (Map.Entry<Long, String> entry2 : origIdxIdToName.entrySet()) { System.out.println("entry2.getValue():" + entry2.getValue() + " baseIndex: " + physicalPartition.getBaseIndex()); MaterializedIndex idx = physicalPartition.getIndex(entry2.getKey()); long newIdxId = indexNameToId.get(entry2.getValue()); int schemaHash = indexIdToMeta.get(newIdxId).getSchemaHash(); idx.setIdForRestore(newIdxId); if (newIdxId != baseIndexId) { // not base table, reset physicalPartition.deleteRollupIndex(entry2.getKey()); physicalPartition.createRollupIndex(idx); } // generate new tablets in origin tablet order int tabletNum = idx.getTablets().size(); idx.clearTabletsForRestore(); Status status = createTabletsForRestore(tabletNum, idx, globalStateMgr, partitionInfo.getReplicationNum(entry.getKey()), physicalPartition.getVisibleVersion(), schemaHash, physicalPartition.getId(), physicalPartition.getShardGroupId(), db); if (!status.ok()) { return status; } } } } return Status.OK; }
@Test public void testSetIdForRestore() { Database db = UnitTestUtil.createDb(1, 2, 3, 4, 5, 6, 7, KeysType.AGG_KEYS); List<Table> tables = db.getTables(); final long id = 0; new MockUp<GlobalStateMgr>() { @Mock long getNextId() { return id; } }; for (Table table : tables) { if (table.getType() != TableType.OLAP) { continue; } ((OlapTable) table).resetIdsForRestore(GlobalStateMgr.getCurrentState(), db, 3, new MvRestoreContext()); } }
public static TableSchema of(Column... columns) { return new AutoValue_TableSchema(Arrays.asList(columns)); }
@Test public void testParseEnum16() { Map<String, Integer> enumValues = ImmutableMap.of( "a", -1, "b", 0, "c", 42); assertEquals( ColumnType.enum16(enumValues), ColumnType.parse("Enum16('a' = -1, 'b' = 0, 'c' = 42)")); }
@Override public void execute() throws MojoExecutionException, MojoFailureException { final DefaultCamelContext context = new DefaultCamelContext(); final ServiceNowComponent component = new ServiceNowComponent(context); for (String objectName : objects) { Map<String, Object> parameters = new HashMap<>(); parameters.put("instanceName", instanceName); parameters.put("userName", userName); parameters.put("password", userPassword); parameters.put("oauthClientId", oauthClientId); parameters.put("oauthClientSecret", oauthClientSecret); parameters.put("objectType", "table"); parameters.put("objectName", objectName); for (Map.Entry<String, String> entry : fields.entrySet()) { parameters.put("object." + entry.getKey() + ".fields", entry.getValue()); } for (Map.Entry<String, String> entry : fieldsExcludePattern.entrySet()) { parameters.put("object." + entry.getKey() + ".fields.exclude.pattern", entry.getValue()); } JsonNode schema = component.getExtension(MetaDataExtension.class) .flatMap(e -> e.meta(parameters)) .flatMap(m -> Optional.ofNullable(m.getPayload(JsonNode.class))) .orElseThrow(() -> new MojoExecutionException("Unable to get grab MetaData for object: " + objectName)); validateSchema(schema); generateBean(objectName, schema); } }
@Test public void testExecute() throws Exception { final CamelServiceNowGenerateMojo mojo = createMojo(); mojo.objects = Arrays.asList("incident"); mojo.fields = Collections.singletonMap("incident", "sys_id"); mojo.fieldsExcludePattern = Collections.singletonMap("incident", "^sys_.*$"); mojo.execute(); assertTrue(mojo.outputDirectory.exists(), "Output directory was not created"); assertTrue(mojo.outputDirectory.list().length > 0, "Output directory is empty"); }
@Udf public String rpad( @UdfParameter(description = "String to be padded") final String input, @UdfParameter(description = "Target length") final Integer targetLen, @UdfParameter(description = "Padding string") final String padding) { if (input == null) { return null; } if (padding == null || padding.isEmpty() || targetLen == null || targetLen < 0) { return null; } final StringBuilder sb = new StringBuilder(targetLen + padding.length()); sb.append(input); final int padChars = Math.max(targetLen - input.length(), 0); for (int i = 0; i < padChars; i += padding.length()) { sb.append(padding); } sb.setLength(targetLen); return sb.toString(); }
@Test public void shouldReturnNullForNullPaddingBytes() { final ByteBuffer result = udf.rpad(BYTES_123, 4, null); assertThat(result, is(nullValue())); }
public void storeNewShortIds( final ReportWorkItemStatusRequest request, final ReportWorkItemStatusResponse reply) { checkArgument( request.getWorkItemStatuses() != null && reply.getWorkItemServiceStates() != null && request.getWorkItemStatuses().size() == reply.getWorkItemServiceStates().size(), "RequestWorkItemStatus request and response are unbalanced, status: %s, states: %s", request.getWorkItemStatuses(), reply.getWorkItemServiceStates()); for (int i = 0; i < request.getWorkItemStatuses().size(); i++) { WorkItemServiceState state = reply.getWorkItemServiceStates().get(i); WorkItemStatus status = request.getWorkItemStatuses().get(i); if (state.getMetricShortId() == null) { continue; } checkArgument( status.getCounterUpdates() != null, "Response has shortids but no corresponding CounterUpdate"); for (MetricShortId shortIdMsg : state.getMetricShortId()) { int metricIndex = MoreObjects.firstNonNull(shortIdMsg.getMetricIndex(), 0); checkArgument( metricIndex < status.getCounterUpdates().size(), "Received aggregate index outside range of sent update %s >= %s", shortIdMsg.getMetricIndex(), status.getCounterUpdates().size()); CounterUpdate update = status.getCounterUpdates().get(metricIndex); cache.insert(update, checkNotNull(shortIdMsg.getShortId(), "Shortid should be non-null")); } } }
@Test public void testValidateAggregateIndexOutOfRange() { CounterShortIdCache cache = new CounterShortIdCache(); ReportWorkItemStatusRequest request = new ReportWorkItemStatusRequest(); ReportWorkItemStatusResponse reply = new ReportWorkItemStatusResponse(); request.setWorkItemStatuses(createWorkStatusNameAndKind(new String[] {"counter"})); reply.setWorkItemServiceStates( createWorkServiceState(new MetricShortId[] {createMetricShortId(1000, 1000L)})); thrown.expect(IllegalArgumentException.class); thrown.expectMessage("Received aggregate index outside range of sent update"); cache.storeNewShortIds(request, reply); }
@Override public ConfigData load(ConfigDataLoaderContext context, PolarisConfigDataResource resource) throws ConfigDataResourceNotFoundException { try { return load(context.getBootstrapContext(), resource); } catch (Exception e) { log.warn("Error getting properties from polaris: " + resource, e); if (!resource.isOptional()) { throw new ConfigDataResourceNotFoundException(resource, e); } return null; } }
@Test public void loadConfigDataCustomConfigFilesTestWithProfile() { try (MockedStatic<ConfigFileServiceFactory> mockedStatic = mockStatic(ConfigFileServiceFactory.class)) { ConfigDataLoaderContext context = mock(ConfigDataLoaderContext.class); PolarisConfigDataResource polarisConfigDataResource = mock(PolarisConfigDataResource.class); ConfigurableBootstrapContext bootstrapContext = mock(ConfigurableBootstrapContext.class); PolarisConfigProperties polarisConfigProperties = mock(PolarisConfigProperties.class); PolarisContextProperties polarisContextProperties = mock(PolarisContextProperties.class); ConfigFileService configFileService = mock(ConfigFileService.class); Profiles profiles = mock(Profiles.class); Map<String, Object> emptyMap = new HashMap<>(); ConfigKVFile emptyConfigFile = new MockedConfigKVFile(emptyMap); when(configFileService.getConfigPropertiesFile(testNamespace, testServiceName, "application.properties")).thenReturn(emptyConfigFile); when(configFileService.getConfigYamlFile(testNamespace, testServiceName, "application.yml")).thenReturn(emptyConfigFile); when(configFileService.getConfigYamlFile(testNamespace, testServiceName, "application.yaml")).thenReturn(emptyConfigFile); when(configFileService.getConfigPropertiesFile(testNamespace, testServiceName, "bootstrap.properties")).thenReturn(emptyConfigFile); when(configFileService.getConfigYamlFile(testNamespace, testServiceName, "bootstrap.yml")).thenReturn(emptyConfigFile); when(configFileService.getConfigYamlFile(testNamespace, testServiceName, "bootstrap.yaml")).thenReturn(emptyConfigFile); String customGroup = "group1"; String customFile1 = "file1.properties"; when(polarisConfigDataResource.getFileName()).thenReturn(customFile1); when(polarisConfigDataResource.getGroupName()).thenReturn(customGroup); when(polarisConfigProperties.getGroups()).thenReturn(null); when(polarisConfigProperties.isInternalEnabled()).thenReturn(true); when(profiles.getActive()).thenReturn(Lists.newArrayList()); // file1.properties Map<String, Object> file1Map = new HashMap<>(); file1Map.put("k1", "v1"); file1Map.put("k2", "v2"); file1Map.put("k3", "v3"); ConfigKVFile file1 = new MockedConfigKVFile(file1Map); when(configFileService.getConfigPropertiesFile(testNamespace, customGroup, customFile1)).thenReturn(file1); when(context.getBootstrapContext()).thenReturn(bootstrapContext); when(polarisContextProperties.getNamespace()).thenReturn(testNamespace); when(polarisContextProperties.getService()).thenReturn(testServiceName); when(polarisConfigProperties.getGroups()).thenReturn(null); when(profiles.getActive()).thenReturn(Lists.newArrayList()); PolarisConfigDataLoader polarisConfigDataLoader = new PolarisConfigDataLoader(new DeferredLogs()); if (INTERNAL_CONFIG_FILES_LOADED.get()) { INTERNAL_CONFIG_FILES_LOADED.compareAndSet(true, false); } if (CUSTOM_POLARIS_CONFIG_FILE_LOADED.get()) { CUSTOM_POLARIS_CONFIG_FILE_LOADED.compareAndSet(true, false); } when(polarisConfigDataResource.getPolarisConfigProperties()).thenReturn(polarisConfigProperties); when(polarisConfigDataResource.getPolarisContextProperties()).thenReturn(polarisContextProperties); when(polarisConfigDataResource.getServiceName()).thenReturn(testServiceName); when(polarisConfigDataResource.getProfiles()).thenReturn(profiles); mockedStatic.when(() -> { ConfigFileServiceFactory.createConfigFileService(any(SDKContext.class)); }).thenReturn(configFileService); ConfigData configData = polarisConfigDataLoader.load(context, polarisConfigDataResource); List<PropertySource<?>> propertySources = configData.getPropertySources(); CompositePropertySource compositePropertySource = new CompositePropertySource(polarisConfigPropertySourceName); propertySources.forEach(compositePropertySource::addPropertySource); assertThat(compositePropertySource.getProperty("k1")).isEqualTo("v1"); assertThat(compositePropertySource.getProperty("k2")).isEqualTo("v2"); assertThat(compositePropertySource.getProperty("k3")).isEqualTo("v3"); } }
public StitchConfiguration getConfiguration() { return configuration; }
@Test void testNormalProperties() { final String uri = "stitch:my_table?token=mytoken&region=north_america"; final StitchEndpoint endpoint = context.getEndpoint(uri, StitchEndpoint.class); assertEquals("my_table", endpoint.getConfiguration().getTableName()); assertEquals("mytoken", endpoint.getConfiguration().getToken()); assertEquals(StitchRegion.NORTH_AMERICA, endpoint.getConfiguration().getRegion()); }
static Void handleEndpointException( final Throwable t, final RoutingContext routingContext, final String logMsg) { if (t instanceof CompletionException) { final Throwable actual = t.getCause(); log.error(logMsg, actual); if (actual instanceof KsqlStatementException) { routingContext.fail( BAD_REQUEST.code(), new KsqlApiException( ((KsqlStatementException) actual).getUnloggedMessage(), ERROR_CODE_BAD_STATEMENT, ((KsqlStatementException) actual).getSqlStatement() ) ); return null; } else if (actual instanceof KsqlRateLimitException) { routingContext.fail(TOO_MANY_REQUESTS.code(), new KsqlApiException(actual.getMessage(), ERROR_CODE_TOO_MANY_REQUESTS)); return null; } else if (actual instanceof KsqlApiException) { routingContext.fail(BAD_REQUEST.code(), actual); return null; } } else { log.error(logMsg, t); } // We don't expose internal error message via public API routingContext.fail(INTERNAL_SERVER_ERROR.code(), new KsqlApiException( "The server encountered an internal error when processing the query." + " Please consult the server logs for more information.", ERROR_CODE_SERVER_ERROR)); return null; }
@Test public void testHandleServerRateLimitException() { String ratelimitErrorMsg = "Host is at rate limit for pull queries. Currently set to 1 QPS"; CompletionException ratelimitException = new CompletionException( new KsqlRateLimitException(ratelimitErrorMsg)); ServerUtils.handleEndpointException(ratelimitException, routingContext, "test msg"); verify(routingContext).fail(eq(TOO_MANY_REQUESTS.code()), argThat(x -> { assertThat(x.getMessage(), equalTo(ratelimitErrorMsg)); assertThat(((KsqlApiException) x).getErrorCode(), equalTo(ERROR_CODE_TOO_MANY_REQUESTS)); return true; })); }
public static String extractAttributeNameNameWithoutArguments(String attributeNameWithArguments) { int start = StringUtil.lastIndexOf(attributeNameWithArguments, '['); int end = StringUtil.lastIndexOf(attributeNameWithArguments, ']'); if (start > 0 && end > 0 && end > start) { return attributeNameWithArguments.substring(0, start); } if (start < 0 && end < 0) { return attributeNameWithArguments; } throw new IllegalArgumentException("Wrong argument input passed " + attributeNameWithArguments); }
@Test public void extractAttributeName_correctArguments() { assertEquals("car.wheel", extractAttributeNameNameWithoutArguments("car.wheel[left-front]")); assertEquals("car.wheel", extractAttributeNameNameWithoutArguments("car.wheel[123]")); assertEquals("car.wheel", extractAttributeNameNameWithoutArguments("car.wheel[.';'.]")); assertEquals("car.wheel", extractAttributeNameNameWithoutArguments("car.wheel[]")); assertEquals("car.wheel", extractAttributeNameNameWithoutArguments("car.wheel")); }
@Override public ListTransactionsRequest.Builder buildBatchedRequest( int brokerId, Set<AllBrokersStrategy.BrokerKey> keys ) { ListTransactionsRequestData request = new ListTransactionsRequestData(); request.setProducerIdFilters(new ArrayList<>(options.filteredProducerIds())); request.setStateFilters(options.filteredStates().stream() .map(TransactionState::toString) .collect(Collectors.toList())); request.setDurationFilter(options.filteredDuration()); return new ListTransactionsRequest.Builder(request); }
@Test public void testBuildRequestWithDurationFilter() { int brokerId = 1; BrokerKey brokerKey = new BrokerKey(OptionalInt.of(brokerId)); ListTransactionsOptions options = new ListTransactionsOptions(); ListTransactionsHandler handler = new ListTransactionsHandler(options, logContext); // case 1: check the default value (-1L) for durationFilter ListTransactionsRequest request = handler.buildBatchedRequest(brokerId, singleton(brokerKey)).build((short) 1); assertEquals(-1L, request.data().durationFilter()); request = handler.buildBatchedRequest(brokerId, singleton(brokerKey)).build((short) 0); assertEquals(-1L, request.data().durationFilter()); // case 2: able to set a valid duration filter when using API version 1 options.filterOnDuration(10L); request = handler.buildBatchedRequest(brokerId, singleton(brokerKey)).build((short) 1); assertEquals(10L, request.data().durationFilter()); assertEquals(Collections.emptyList(), request.data().producerIdFilters()); // case 3: unable to set a valid duration filter when using API version 0 assertThrows(UnsupportedVersionException.class, () -> handler.buildBatchedRequest(brokerId, singleton(brokerKey)).build((short) 0)); // case 4: able to set duration filter to -1L when using API version 0 options.filterOnDuration(-1L); ListTransactionsRequest request1 = handler.buildBatchedRequest(brokerId, singleton(brokerKey)).build((short) 0); assertEquals(-1L, request1.data().durationFilter()); }
boolean isLogTableDefined( TransLogTable logTable ) { return logTable.getDatabaseMeta() != null && !Utils.isEmpty( logTable.getTableName() ); }
@Test public void testIsLogTableDefinedLogTableNotDefined() { DatabaseMeta databaseMeta = mock( DatabaseMeta.class ); doReturn( databaseMeta ).when( transLogTable ).getDatabaseMeta(); assertFalse( delegate.isLogTableDefined( transLogTable ) ); }
public static Object project(Schema source, Object record, Schema target) throws SchemaProjectorException { checkMaybeCompatible(source, target); if (source.isOptional() && !target.isOptional()) { if (target.defaultValue() != null) { if (record != null) { return projectRequiredSchema(source, record, target); } else { return target.defaultValue(); } } else { throw new SchemaProjectorException("Writer schema is optional, however, target schema does not provide a default value."); } } else { if (record != null) { return projectRequiredSchema(source, record, target); } else { return null; } } }
@Test public void testMaybeCompatible() { Schema source = SchemaBuilder.int32().name("source").build(); Schema target = SchemaBuilder.int32().name("target").build(); assertThrows(SchemaProjectorException.class, () -> SchemaProjector.project(source, 12, target), "Source name and target name mismatch."); Schema targetWithParameters = SchemaBuilder.int32().parameters(Collections.singletonMap("key", "value")); assertThrows(SchemaProjectorException.class, () -> SchemaProjector.project(source, 34, targetWithParameters), "Source parameters and target parameters mismatch."); }
@Override public void onIssuesRemoval(String projectUuid, List<String> issueKeys) { issueIndexer.deleteByKeys(projectUuid, issueKeys); }
@Test public void test_onIssuesRemoval() { underTest.onIssuesRemoval("P1", asList("ISSUE1", "ISSUE2")); verify(issueIndexer).deleteByKeys("P1", asList("ISSUE1", "ISSUE2")); }
@SuppressWarnings("deprecation") public static FlowWithSource of(Flow flow, String source) { return FlowWithSource.builder() .tenantId(flow.tenantId) .id(flow.id) .namespace(flow.namespace) .revision(flow.revision) .description(flow.description) .labels(flow.labels) .inputs(flow.inputs) .outputs(flow.outputs) .variables(flow.variables) .tasks(flow.tasks) .errors(flow.errors) .listeners(flow.listeners) .triggers(flow.triggers) .pluginDefaults(flow.pluginDefaults) .disabled(flow.disabled) .deleted(flow.deleted) .source(source) .concurrency(flow.concurrency) .build(); }
@SuppressWarnings("deprecation") @Test void of() { // test that all fields are transmitted to FlowWithSource Flow flow = Flow.builder() .tenantId("tenantId") .id(IdUtils.create()) .namespace("io.kestra.unittest") .description("description") .labels(List.of( new Label("key", "value") )) .inputs(List.of( StringInput.builder().id("strInput").build() )) .variables(Map.of( "varKey", "varValue" )) .tasks(List.of( Log.builder() .id(IdUtils.create()) .type(Log.class.getName()) .message("Hello World") .build() )) .errors(List.of( Log.builder() .id(IdUtils.create()) .type(Log.class.getName()) .message("Error") .build() )) .listeners(List.of( Listener.builder() .conditions(List.of(ExpressionCondition.builder().expression("true").build())) .build() )) .triggers(List.of( Schedule.builder().id("schedule").cron("0 1 9 * * *").build() )) .pluginDefaults(List.of( PluginDefault.builder() .type(Log.class.getName()) .forced(true) .values(Map.of( "message", "Default message" )) .build() )) .concurrency( Concurrency.builder() .behavior(Concurrency.Behavior.CANCEL) .limit(2) .build() ) .build(); String expectedSource = flow.generateSource() + " # additional comment"; FlowWithSource of = FlowWithSource.of(flow, expectedSource); assertThat(of.equalsWithoutRevision(flow), is(true)); assertThat(of.getSource(), is(expectedSource)); }
@Override public void calculate(TradePriceCalculateReqBO param, TradePriceCalculateRespBO result) { if (param.getDeliveryType() == null) { return; } // TODO @puhui999:需要校验,是不是存在商品不能门店自提,或者不能快递发货的情况。就是说,配送方式不匹配哈 if (DeliveryTypeEnum.PICK_UP.getType().equals(param.getDeliveryType())) { calculateByPickUp(param); } else if (DeliveryTypeEnum.EXPRESS.getType().equals(param.getDeliveryType())) { calculateExpress(param, result); } }
@Test @DisplayName("按件计算运费不包邮的情况") public void testCalculate_expressTemplateCharge() { // SKU 1 : 100 * 2 = 200 // SKU 2 :200 * 10 = 2000 // 运费 首件 1000 + 续件 2000 = 3000 // mock 方法 when(deliveryExpressTemplateService.getExpressTemplateMapByIdsAndArea(eq(asSet(1L)), eq(10))) .thenReturn(MapUtil.of(1L, templateRespBO)); // 调用 calculator.calculate(reqBO, resultBO); // 断言 TradePriceCalculateRespBO.Price price = resultBO.getPrice(); assertThat(price) .extracting("totalPrice","discountPrice","couponPrice","pointPrice","deliveryPrice","payPrice") .containsExactly(2200, 0, 0, 0, 3000, 5200); assertThat(resultBO.getItems()).hasSize(3); // 断言:SKU1 assertThat(resultBO.getItems().get(0)) .extracting("price", "count","discountPrice" ,"couponPrice", "pointPrice","deliveryPrice","payPrice") .containsExactly(100, 2, 0, 0, 0, 500, 700); // 断言:SKU2 assertThat(resultBO.getItems().get(1)) .extracting("price", "count","discountPrice" ,"couponPrice", "pointPrice","deliveryPrice","payPrice") .containsExactly(200, 10, 0, 0, 0, 2500, 4500); // 断言:SKU3 未选中 assertThat(resultBO.getItems().get(2)) .extracting("price", "count","discountPrice" ,"couponPrice", "pointPrice","deliveryPrice","payPrice") .containsExactly(300, 1, 0, 0, 0, 0, 300); }
@Override public AttributedList<Path> search(final Path workdir, final Filter<Path> regex, final ListProgressListener listener) throws BackgroundException { return this.search(workdir, regex, listener, new HostPreferences(session.getHost()).getInteger("sds.listing.chunksize")); }
@Test public void testSearch() throws Exception { final String name = new NFDNormalizer().normalize(String.format("ä%s", new AlphanumericRandomStringService().random())).toString(); final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session); final Path room = new SDSDirectoryFeature(session, nodeid).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path directory = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final Path file = new SDSTouchFeature(session, nodeid).touch(new Path(directory, name, EnumSet.of(Path.Type.file)), new TransferStatus()); final SDSSearchFeature feature = new SDSSearchFeature(session, nodeid); assertTrue(feature.search(room, new SearchFilter(name), new DisabledListProgressListener(), 1).contains(file)); assertTrue(feature.search(room, new SearchFilter(StringUtils.substring(name, 2)), new DisabledListProgressListener(), 1).contains(file)); assertTrue(feature.search(room, new SearchFilter(StringUtils.substring(name, 0, name.length() - 2)), new DisabledListProgressListener(), 1).contains(file)); assertTrue(feature.search(directory, new SearchFilter(StringUtils.substring(name, 0, name.length() - 2)), new DisabledListProgressListener(), 1).contains(file)); try { assertFalse(feature.search(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new SearchFilter(name), new DisabledListProgressListener(), 1).contains(file)); fail(); } catch(NotfoundException e) { // } final Path subdir = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); assertNull(feature.search(subdir, new SearchFilter(name), new DisabledListProgressListener(), 1).find(new SimplePathPredicate(file))); final Path filesubdir = new SDSTouchFeature(session, nodeid).touch(new Path(subdir, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); { final AttributedList<Path> result = feature.search(directory, new SearchFilter(filesubdir.getName()), new DisabledListProgressListener(), 1); assertNotNull(result.find(new SimplePathPredicate(filesubdir))); assertEquals(subdir, result.find(new SimplePathPredicate(filesubdir)).getParent()); } new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public void reportJobCompletion(String pluginId, String elasticAgentId, JobIdentifier jobIdentifier, Map<String, String> elasticProfileConfiguration, Map<String, String> clusterProfileConfiguration) { getVersionedElasticAgentExtension(pluginId).jobCompletion(pluginId, elasticAgentId, jobIdentifier, elasticProfileConfiguration, clusterProfileConfiguration); }
@Test public void shouldMakeJobCompletionCall() { when(pluginManager.resolveExtensionVersion(PLUGIN_ID, ELASTIC_AGENT_EXTENSION, SUPPORTED_VERSIONS)).thenReturn("4.0"); final String elasticAgentId = "ea1"; final JobIdentifier jobIdentifier = new JobIdentifier("up42", 2, "Test", "up42_stage", "10", "up42_job"); final Map<String, String> elasticProfileConfiguration = Map.of("Image", "alpine:latest"); final Map<String, String> clusterProfileConfiguration = Map.of("ServerURL", "https://example.com/go"); when(pluginManager.submitTo(eq(PLUGIN_ID), eq(ELASTIC_AGENT_EXTENSION), requestArgumentCaptor.capture())).thenReturn(DefaultGoPluginApiResponse.success(null)); extension.reportJobCompletion(PLUGIN_ID, elasticAgentId, jobIdentifier, elasticProfileConfiguration, clusterProfileConfiguration); verify(pluginManager, times(1)).submitTo(eq(PLUGIN_ID), eq(ELASTIC_AGENT_EXTENSION), any(GoPluginApiRequest.class)); }
public String createULID(Message message) { checkTimestamp(message.getTimestamp().getMillis()); try { return createULID(message.getTimestamp().getMillis(), message.getSequenceNr()); } catch (Exception e) { LOG.error("Exception while creating ULID.", e); return ulid.nextULID(message.getTimestamp().getMillis()); } }
@Test public void testUlidSorting() { final MessageULIDGenerator generator = new MessageULIDGenerator(new ULID()); final long ts = Tools.nowUTC().getMillis(); final long[] wrappedLongs = new long[]{0, 1, Integer.MAX_VALUE, 0xFFFF_FFFEL, 0xFFFF_FFFFL}; final ArrayList<String> ulids = new ArrayList<>(); for (long seq: wrappedLongs) { ulids.add(generator.createULID(ts, (int) seq)); } final List<String> sortedUlids = ulids.stream().sorted().collect(Collectors.toList()); assertThat(ulids).isEqualTo(sortedUlids); }
public NonClosedTracking<RAW, BASE> trackNonClosed(Input<RAW> rawInput, Input<BASE> baseInput) { NonClosedTracking<RAW, BASE> tracking = NonClosedTracking.of(rawInput, baseInput); // 1. match by rule, line, line hash and message match(tracking, LineAndLineHashAndMessage::new); // 2. match issues with same rule, same line and same line hash, but not necessarily with same message match(tracking, LineAndLineHashKey::new); // 3. detect code moves by comparing blocks of codes detectCodeMoves(rawInput, baseInput, tracking); // 4. match issues with same rule, same message and same line hash match(tracking, LineHashAndMessageKey::new); // 5. match issues with same rule, same line and same message match(tracking, LineAndMessageKey::new); // 6. match issues with same rule and same line hash but different line and different message. // See SONAR-2812 match(tracking, LineHashKey::new); return tracking; }
@Test public void recognize_blocks_1() { FakeInput baseInput = FakeInput.createForSourceLines( "package example1;", "", "public class Toto {", "", " public void doSomething() {", " // doSomething", " }", "", " public void doSomethingElse() {", " // doSomethingElse", " }", "}"); Issue base1 = baseInput.createIssueOnLine(7, RULE_SYSTEM_PRINT, "Indentation"); Issue base2 = baseInput.createIssueOnLine(11, RULE_SYSTEM_PRINT, "Indentation"); FakeInput rawInput = FakeInput.createForSourceLines( "package example1;", "", "public class Toto {", "", " public Toto(){}", "", " public void doSomethingNew() {", " // doSomethingNew", " }", "", " public void doSomethingElseNew() {", " // doSomethingElseNew", " }", "", " public void doSomething() {", " // doSomething", " }", "", " public void doSomethingElse() {", " // doSomethingElse", " }", "}"); Issue raw1 = rawInput.createIssueOnLine(9, RULE_SYSTEM_PRINT, "Indentation"); Issue raw2 = rawInput.createIssueOnLine(13, RULE_SYSTEM_PRINT, "Indentation"); Issue raw3 = rawInput.createIssueOnLine(17, RULE_SYSTEM_PRINT, "Indentation"); Issue raw4 = rawInput.createIssueOnLine(21, RULE_SYSTEM_PRINT, "Indentation"); Tracking<Issue, Issue> tracking = tracker.trackNonClosed(rawInput, baseInput); assertThat(tracking.baseFor(raw1)).isNull(); assertThat(tracking.baseFor(raw2)).isNull(); assertThat(tracking.baseFor(raw3)).isSameAs(base1); assertThat(tracking.baseFor(raw4)).isSameAs(base2); assertThat(tracking.getUnmatchedBases()).isEmpty(); }
@Override public void customize(WebServerFactory server) { // When running in an IDE or with ./mvnw spring-boot:run, set location of the static web assets. setLocationForStaticAssets(server); }
@Test void shouldCustomizeServletContainer() { env.setActiveProfiles(JHipsterConstants.SPRING_PROFILE_PRODUCTION); UndertowServletWebServerFactory container = new UndertowServletWebServerFactory(); webConfigurer.customize(container); assertThat(container.getMimeMappings().get("abs")).isEqualTo("audio/x-mpeg"); assertThat(container.getMimeMappings().get("html")).isEqualTo("text/html"); assertThat(container.getMimeMappings().get("json")).isEqualTo("application/json"); if (container.getDocumentRoot() != null) { assertThat(container.getDocumentRoot()).isEqualTo(new File("target/classes/static/")); } }
public Path path() { return path; }
@Test public void basics() { PathIntent intent = createOne(); assertEquals("incorrect id", APPID, intent.appId()); assertEquals("incorrect match", MATCH, intent.selector()); assertEquals("incorrect action", NOP, intent.treatment()); assertEquals("incorrect path", PATH1, intent.path()); assertEquals("incorrect key", KEY, intent.key()); intent = createAnother(); assertEquals("incorrect id", APPID, intent.appId()); assertEquals("incorrect match", MATCH, intent.selector()); assertEquals("incorrect action", NOP, intent.treatment()); assertEquals("incorrect path", PATH2, intent.path()); assertEquals("incorrect key", KEY, intent.key()); intent = createWithResourceGroup(); assertEquals("incorrect id", APPID, intent.appId()); assertEquals("incorrect match", MATCH, intent.selector()); assertEquals("incorrect action", NOP, intent.treatment()); assertEquals("incorrect path", PATH2, intent.path()); assertEquals("incorrect key", KEY, intent.key()); assertEquals("incorrect resource group", RESOURCE_GROUP, intent.resourceGroup()); }
boolean isAcceptable(final long leaderMemberId, final long memberId) { switch (sourceType) { case LEADER: return NULL_VALUE != leaderMemberId && leaderMemberId == memberId; case FOLLOWER: return NULL_VALUE == leaderMemberId || leaderMemberId != memberId; case ANY: return true; } throw new IllegalStateException("Unknown sourceType=" + sourceType); }
@Test void anyLogSourceTypeShouldAny() { final LogSourceValidator logSourceValidator = new LogSourceValidator(ClusterBackup.SourceType.ANY); final long leaderMemberId = 123; final long followerMemberId = 456; assertTrue(logSourceValidator.isAcceptable(leaderMemberId, leaderMemberId)); assertTrue(logSourceValidator.isAcceptable(leaderMemberId, followerMemberId)); assertTrue(logSourceValidator.isAcceptable(NULL_VALUE, NULL_VALUE)); assertTrue(logSourceValidator.isAcceptable(leaderMemberId, NULL_VALUE)); assertTrue(logSourceValidator.isAcceptable(NULL_VALUE, followerMemberId)); }
@Override public Deserializer<T> deserializer() { final Deserializer<T> deserializer = delegate.deserializer(); if (deserializer instanceof LoggingDeserializer<?>) { final LoggingDeserializer<T> loggingDeserializer = (LoggingDeserializer<T>) deserializer; return (topic, data) -> { final DelayedResult<T> staticResult = loggingDeserializer.tryDeserialize(this.topic, data); if (!staticResult.isError()) { return staticResult.get(); } // if both attempts error, then staticResult.get() will log the error to // the processing log and throw - do not call the callback in this case final DelayedResult<T> sourceResult = loggingDeserializer.tryDeserialize(topic, data); if (sourceResult.isError()) { return staticResult.get(); } onFailure.onDeserializationFailure(topic, this.topic, data); return sourceResult.get(); }; } return (topic, data) -> { try { return deserializer.deserialize(this.topic, data); } catch (final Exception e) { final T object = deserializer.deserialize(topic, data); onFailure.onDeserializationFailure(topic, this.topic, data); return object; } }; }
@Test public void shouldUseDelegateDeserializerWithStaticTopic() { // When: final Object deserialized = staticSerde.deserializer().deserialize(SOURCE_TOPIC, SOME_BYTES); // Then: verify(delegateD).deserialize(STATIC_TOPIC, SOME_BYTES); assertThat(deserialized, is(SOME_OBJECT)); verifyNoMoreInteractions(callback); }
@Override public boolean equals( Object obj ) { if ( obj != null && ( obj.getClass().equals( this.getClass() ) ) ) { CalculatorMetaFunction mf = (CalculatorMetaFunction) obj; return ( getXML().equals( mf.getXML() ) ); } return false; }
@Test public void testEquals() { CalculatorMetaFunction meta1 = new CalculatorMetaFunction(); CalculatorMetaFunction meta2 = (CalculatorMetaFunction) meta1.clone(); assertNotSame( meta1, meta2 ); assertFalse( meta1.equals( null ) ); assertFalse( meta1.equals( new Object() ) ); assertTrue( meta1.equals( meta2 ) ); meta2.setCalcType( CalculatorMetaFunction.CALC_ADD_DAYS ); assertFalse( meta1.equals( meta2 ) ); }
protected boolean isClusterVersionGreaterThan(Version version) { Version clusterVersion = getNodeEngine().getClusterService().getClusterVersion(); return clusterVersion.isGreaterThan(version); }
@Test public void testClusterVersion_isGreaterThan_previousVersion() { assertTrue(object.isClusterVersionGreaterThan(VersionsTest.getPreviousClusterVersion())); }
public static Value toZetaSqlValue(@Nullable Object object, FieldType fieldType) { if (object == null) { return Value.createNullValue(toZetaSqlType(fieldType)); } switch (fieldType.getTypeName()) { case INT64: return Value.createInt64Value((Long) object); case DOUBLE: return Value.createDoubleValue((Double) object); case BOOLEAN: return Value.createBoolValue((Boolean) object); case STRING: return Value.createStringValue((String) object); case BYTES: return Value.createBytesValue(ByteString.copyFrom((byte[]) object)); case DECIMAL: return Value.createNumericValue((BigDecimal) object); case DATETIME: return Value.createTimestampValueFromUnixMicros( LongMath.checkedMultiply(((Instant) object).getMillis(), MICROS_PER_MILLI)); case LOGICAL_TYPE: String identifier = fieldType.getLogicalType().getIdentifier(); if (SqlTypes.DATE.getIdentifier().equals(identifier)) { if (object instanceof Long) { // base type return Value.createDateValue(((Long) object).intValue()); } else { // input type return Value.createDateValue((LocalDate) object); } } else if (SqlTypes.TIME.getIdentifier().equals(identifier)) { LocalTime localTime; if (object instanceof Long) { // base type localTime = LocalTime.ofNanoOfDay((Long) object); } else { // input type localTime = (LocalTime) object; } return Value.createTimeValue(localTime); } else if (SqlTypes.DATETIME.getIdentifier().equals(identifier)) { LocalDateTime datetime; if (object instanceof Row) { // base type datetime = LocalDateTime.of( LocalDate.ofEpochDay(((Row) object).getInt64(DateTime.DATE_FIELD_NAME)), LocalTime.ofNanoOfDay(((Row) object).getInt64(DateTime.TIME_FIELD_NAME))); } else { // input type datetime = (LocalDateTime) object; } return Value.createDatetimeValue(datetime); } else { throw new UnsupportedOperationException("Unknown Beam logical type: " + identifier); } case ARRAY: return toZetaSqlArrayValue((List<Object>) object, fieldType.getCollectionElementType()); case ROW: return toZetaSqlStructValue((Row) object, fieldType.getRowSchema()); default: throw new UnsupportedOperationException( "Unknown Beam fieldType: " + fieldType.getTypeName()); } }
@Test public void testJavaObjectToZetaSqlValue() { assertEquals(ZetaSqlBeamTranslationUtils.toZetaSqlValue(TEST_ROW, TEST_FIELD_TYPE), TEST_VALUE); }
@Deprecated public static String toHexStringWithPrefixSafe(BigInteger value) { String result = toHexStringNoPrefix(value); if (result.length() < 2) { result = Strings.zeros(1) + result; } return HEX_PREFIX + result; }
@Test public void testQuantityEncodeLeadingZero() { assertEquals(Numeric.toHexStringWithPrefixSafe(BigInteger.valueOf(0L)), ("0x00")); assertEquals(Numeric.toHexStringWithPrefixSafe(BigInteger.valueOf(1024L)), ("0x400")); assertEquals( Numeric.toHexStringWithPrefixSafe(BigInteger.valueOf(Long.MAX_VALUE)), ("0x7fffffffffffffff")); assertEquals( Numeric.toHexStringWithPrefixSafe( new BigInteger("204516877000845695339750056077105398031")), ("0x99dc848b94efc27edfad28def049810f")); }
@ScalarOperator(LESS_THAN) @SqlType(StandardTypes.BOOLEAN) public static boolean lessThan(@SqlType(StandardTypes.BIGINT) long left, @SqlType(StandardTypes.BIGINT) long right) { return left < right; }
@Test public void testLessThan() { assertFunction("100000000037 < 100000000037", BOOLEAN, false); assertFunction("100000000037 < 100000000017", BOOLEAN, false); assertFunction("100000000017 < 100000000037", BOOLEAN, true); assertFunction("100000000017 < 100000000017", BOOLEAN, false); }
@Override public synchronized void handle(ResourceEvent event) { LocalResourceRequest req = event.getLocalResourceRequest(); LocalizedResource rsrc = localrsrc.get(req); switch (event.getType()) { case LOCALIZED: if (useLocalCacheDirectoryManager) { inProgressLocalResourcesMap.remove(req); } break; case REQUEST: if (rsrc != null && (!isResourcePresent(rsrc))) { LOG.info("Resource " + rsrc.getLocalPath() + " is missing, localizing it again"); removeResource(req); rsrc = null; } if (null == rsrc) { rsrc = new LocalizedResource(req, dispatcher); localrsrc.put(req, rsrc); } break; case RELEASE: if (null == rsrc) { // The container sent a release event on a resource which // 1) Failed // 2) Removed for some reason (ex. disk is no longer accessible) ResourceReleaseEvent relEvent = (ResourceReleaseEvent) event; LOG.info("Container " + relEvent.getContainer() + " sent RELEASE event on a resource request " + req + " not present in cache."); return; } break; case LOCALIZATION_FAILED: /* * If resource localization fails then Localized resource will be * removed from local cache. */ removeResource(req); break; case RECOVERED: if (rsrc != null) { LOG.warn("Ignoring attempt to recover existing resource " + rsrc); return; } rsrc = recoverResource(req, (ResourceRecoveredEvent) event); localrsrc.put(req, rsrc); break; } if (rsrc == null) { LOG.warn("Received " + event.getType() + " event for request " + req + " but localized resource is missing"); return; } rsrc.handle(event); // Remove the resource if its downloading and its reference count has // become 0 after RELEASE. This maybe because a container was killed while // localizing and no other container is referring to the resource. // NOTE: This should NOT be done for public resources since the // download is not associated with a container-specific localizer. if (event.getType() == ResourceEventType.RELEASE) { if (rsrc.getState() == ResourceState.DOWNLOADING && rsrc.getRefCount() <= 0 && rsrc.getRequest().getVisibility() != LocalResourceVisibility.PUBLIC) { removeResource(req); } } if (event.getType() == ResourceEventType.LOCALIZED) { if (rsrc.getLocalPath() != null) { try { stateStore.finishResourceLocalization(user, appId, buildLocalizedResourceProto(rsrc)); } catch (IOException ioe) { LOG.error("Error storing resource state for " + rsrc, ioe); } } else { LOG.warn("Resource " + rsrc + " localized without a location"); } } }
@Test(timeout = 10000) @SuppressWarnings("unchecked") public void testLocalResourceCache() { String user = "testuser"; DrainDispatcher dispatcher = null; try { Configuration conf = new Configuration(); dispatcher = createDispatcher(conf); EventHandler<LocalizerEvent> localizerEventHandler = mock(EventHandler.class); EventHandler<ContainerEvent> containerEventHandler = mock(EventHandler.class); // Registering event handlers. dispatcher.register(LocalizerEventType.class, localizerEventHandler); dispatcher.register(ContainerEventType.class, containerEventHandler); ConcurrentMap<LocalResourceRequest, LocalizedResource> localrsrc = new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>(); LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user, null, dispatcher, localrsrc, true, conf, new NMNullStateStoreService(), null); LocalResourceRequest lr = createLocalResourceRequest(user, 1, 1, LocalResourceVisibility.PUBLIC); // Creating 2 containers for same application which will be requesting // same local resource. // Container 1 requesting local resource. ContainerId cId1 = BuilderUtils.newContainerId(1, 1, 1, 1); LocalizerContext lc1 = new LocalizerContext(user, cId1, null); ResourceEvent reqEvent1 = new ResourceRequestEvent(lr, LocalResourceVisibility.PRIVATE, lc1); // No resource request is initially present in local cache Assert.assertEquals(0, localrsrc.size()); // Container-1 requesting local resource. tracker.handle(reqEvent1); dispatcher.await(); // New localized Resource should have been added to local resource map // and the requesting container will be added to its waiting queue. Assert.assertEquals(1, localrsrc.size()); Assert.assertTrue(localrsrc.containsKey(lr)); Assert.assertEquals(1, localrsrc.get(lr).getRefCount()); Assert.assertTrue(localrsrc.get(lr).ref.contains(cId1)); Assert.assertEquals(ResourceState.DOWNLOADING, localrsrc.get(lr) .getState()); // Container 2 requesting the resource ContainerId cId2 = BuilderUtils.newContainerId(1, 1, 1, 2); LocalizerContext lc2 = new LocalizerContext(user, cId2, null); ResourceEvent reqEvent2 = new ResourceRequestEvent(lr, LocalResourceVisibility.PRIVATE, lc2); tracker.handle(reqEvent2); dispatcher.await(); // Container 2 should have been added to the waiting queue of the local // resource Assert.assertEquals(2, localrsrc.get(lr).getRefCount()); Assert.assertTrue(localrsrc.get(lr).ref.contains(cId2)); // Failing resource localization ResourceEvent resourceFailedEvent = new ResourceFailedLocalizationEvent( lr,(new Exception("test").getMessage())); // Backing up the resource to track its state change as it will be // removed after the failed event. LocalizedResource localizedResource = localrsrc.get(lr); tracker.handle(resourceFailedEvent); dispatcher.await(); // After receiving failed resource event; all waiting containers will be // notified with Container Resource Failed Event. Assert.assertEquals(0, localrsrc.size()); verify(containerEventHandler, timeout(1000).times(2)).handle( isA(ContainerResourceFailedEvent.class)); Assert.assertEquals(ResourceState.FAILED, localizedResource.getState()); // Container 1 trying to release the resource (This resource is already // deleted from the cache. This call should return silently without // exception. ResourceReleaseEvent relEvent1 = new ResourceReleaseEvent(lr, cId1); tracker.handle(relEvent1); dispatcher.await(); // Container-3 now requests for the same resource. This request call // is coming prior to Container-2's release call. ContainerId cId3 = BuilderUtils.newContainerId(1, 1, 1, 3); LocalizerContext lc3 = new LocalizerContext(user, cId3, null); ResourceEvent reqEvent3 = new ResourceRequestEvent(lr, LocalResourceVisibility.PRIVATE, lc3); tracker.handle(reqEvent3); dispatcher.await(); // Local resource cache now should have the requested resource and the // number of waiting containers should be 1. Assert.assertEquals(1, localrsrc.size()); Assert.assertTrue(localrsrc.containsKey(lr)); Assert.assertEquals(1, localrsrc.get(lr).getRefCount()); Assert.assertTrue(localrsrc.get(lr).ref.contains(cId3)); // Container-2 Releases the resource ResourceReleaseEvent relEvent2 = new ResourceReleaseEvent(lr, cId2); tracker.handle(relEvent2); dispatcher.await(); // Making sure that there is no change in the cache after the release. Assert.assertEquals(1, localrsrc.size()); Assert.assertTrue(localrsrc.containsKey(lr)); Assert.assertEquals(1, localrsrc.get(lr).getRefCount()); Assert.assertTrue(localrsrc.get(lr).ref.contains(cId3)); // Sending ResourceLocalizedEvent to tracker. In turn resource should // send Container Resource Localized Event to waiting containers. Path localizedPath = new Path("/tmp/file1"); ResourceLocalizedEvent localizedEvent = new ResourceLocalizedEvent(lr, localizedPath, 123L); tracker.handle(localizedEvent); dispatcher.await(); // Verifying ContainerResourceLocalizedEvent . verify(containerEventHandler, timeout(1000).times(1)).handle( isA(ContainerResourceLocalizedEvent.class)); Assert.assertEquals(ResourceState.LOCALIZED, localrsrc.get(lr) .getState()); Assert.assertEquals(1, localrsrc.get(lr).getRefCount()); // Container-3 releasing the resource. ResourceReleaseEvent relEvent3 = new ResourceReleaseEvent(lr, cId3); tracker.handle(relEvent3); dispatcher.await(); Assert.assertEquals(0, localrsrc.get(lr).getRefCount()); } finally { if (dispatcher != null) { dispatcher.stop(); } } }
public Result parse(final String string) throws DateNotParsableException { return this.parse(string, new Date()); }
@Test @Disabled /** * This test should be ignored for now. The problem is, that the to-date has the hour subtracted by 1 - which is not reasonable * at all in this context but without further digging into Natty not solvable. And that effort would be too much by now. */ public void multipleDaytestParseAlignToAGivenTime() throws Exception { final DateTimeFormatter df = DateTimeFormat.forPattern("HH:mm:ss"); for(String[] test: multipleDaytestsThatAlignToAGivenTime) { NaturalDateParser.Result result = naturalDateParser.parse(test[0]); assertNotNull(result.getFrom()); assertNotNull(result.getTo()); assertThat(df.print(result.getFrom())).as("time part of date should equal " + test[1] + " in").isEqualTo(test[1]); assertThat(df.print(result.getTo())).as("time part of date should equal " + test[1] + " in").isEqualTo(test[1]); } }
public String getId(String name) { // Use the id directly if it is unique and the length is less than max if (name.length() <= maxHashLength && usedIds.add(name)) { return name; } // Pick the last bytes of hashcode and use hex format final String hexString = Integer.toHexString(name.hashCode()); final String origId = hexString.length() <= maxHashLength ? hexString : hexString.substring(Math.max(0, hexString.length() - maxHashLength)); String id = origId; int suffixNum = 2; while (!usedIds.add(id)) { // A duplicate! Retry. id = origId + "-" + suffixNum++; } LOG.info("Name {} is mapped to id {}", name, id); return id; }
@Test public void testLongHash() { final HashIdGenerator idGenerator = new HashIdGenerator(10); String id1 = idGenerator.getId(Count.perKey().getName()); String id2 = idGenerator.getId(Count.perKey().getName()); String id3 = idGenerator.getId(Count.perKey().getName()); String id4 = idGenerator.getId(Count.perKey().getName()); Assert.assertNotEquals(id1, id2); Assert.assertNotEquals(id3, id2); Assert.assertNotEquals(id3, id4); }
@Override public void setValueMeta( int index, ValueMetaInterface valueMeta ) { if ( valueMeta != null ) { lock.writeLock().lock(); try { ValueMetaInterface old = valueMetaList.get( index ); ValueMetaInterface newMeta = valueMeta; // try to check if a ValueMeta with the same name already exists int existsIndex = indexOfValue( valueMeta.getName() ); // if it exists and it's not in the requested position // we need to take care of renaming if ( existsIndex >= 0 && existsIndex != index ) { newMeta = renameValueMetaIfInRow( valueMeta, null ); } valueMetaList.set( index, newMeta ); cache.replaceMapping( old.getName(), newMeta.getName(), index ); needRealClone = null; } finally { lock.writeLock().unlock(); } } }
@Test public void testSetValueMeta() { rowMeta.setValueMeta( 1, charly ); assertEquals( 1, rowMeta.getValueMetaList().indexOf( charly ) ); assertEquals( "There is still 3 elements:", 3, rowMeta.size() ); assertEquals( -1, rowMeta.indexOfValue( "integer" ) ); }
public RuntimeOptionsBuilder parse(String... args) { return parse(Arrays.asList(args)); }
@Test void combines_line_filters_from_repeated_features() { RuntimeOptions options = parser .parse("classpath:somewhere_else.feature:3", "classpath:somewhere_else.feature:5") .build(); assertThat(options.getFeaturePaths(), contains(uri("classpath:somewhere_else.feature"))); Set<Integer> lines = new HashSet<>(asList(3, 5)); assertThat(options.getLineFilters(), hasEntry(uri("classpath:somewhere_else.feature"), lines)); }
public void removeDependency(T from, T to) { long stamp = lock.writeLock(); try { Set<T> dependencies = outgoingEdges.get(from); if (dependencies == null || !dependencies.contains(to)) { throw new IllegalArgumentException("Inexistent dependency"); } dependencies.remove(to); incomingEdges.get(to).remove(from); } finally { lock.unlockWrite(stamp); } }
@Test public void testRemoveDependency() throws CyclicDependencyException { DependencyGraph<String> g = new DependencyGraph<>(); g.addDependency("E", "B"); g.addDependency("E", "C"); g.addDependency("E", "D"); g.addDependency("B", "D"); g.addDependency("B", "C"); g.addDependency("C", "D"); assertEquals(g.topologicalSort(), Arrays.asList("E", "B", "C", "D")); g.removeDependency("E", "B"); g.addDependency("B", "E"); assertEquals(g.topologicalSort(), Arrays.asList("B", "E", "C", "D")); g.clearAll(); assertTrue(g.topologicalSort().isEmpty()); }
public void estimatorStats() { expressionContext.getOp().accept(this, expressionContext); }
@Test public void testLogicalUnion() throws Exception { // child 1 output column ColumnRefOperator v1 = columnRefFactory.create("v1", Type.INT, true); ColumnRefOperator v2 = columnRefFactory.create("v2", Type.INT, true); // child 2 output column ColumnRefOperator v3 = columnRefFactory.create("v3", Type.INT, true); ColumnRefOperator v4 = columnRefFactory.create("v4", Type.INT, true); // union node output column ColumnRefOperator v5 = columnRefFactory.create("v3", Type.INT, true); ColumnRefOperator v6 = columnRefFactory.create("v4", Type.INT, true); // child 1 statistics Statistics.Builder childBuilder1 = Statistics.builder(); childBuilder1.setOutputRowCount(10000); childBuilder1.addColumnStatistics(ImmutableMap.of(v1, new ColumnStatistic(0, 100, 0, 10, 50))); childBuilder1.addColumnStatistics(ImmutableMap.of(v2, new ColumnStatistic(0, 50, 0, 10, 50))); Group childGroup1 = new Group(0); childGroup1.setStatistics(childBuilder1.build()); // child 2 statistics Statistics.Builder childBuilder2 = Statistics.builder(); childBuilder2.setOutputRowCount(20000); childBuilder2.addColumnStatistics(ImmutableMap.of(v3, new ColumnStatistic(100, 200, 0, 10, 50))); childBuilder2.addColumnStatistics(ImmutableMap.of(v4, new ColumnStatistic(0, 100, 0, 10, 100))); Group childGroup2 = new Group(1); childGroup2.setStatistics(childBuilder2.build()); // construct group expression LogicalUnionOperator unionOperator = new LogicalUnionOperator(Lists.newArrayList(v5, v6), Lists.newArrayList(Lists.newArrayList(v1, v2), Lists.newArrayList(v3, v4)), true); GroupExpression groupExpression = new GroupExpression(unionOperator, Lists.newArrayList(childGroup1, childGroup2)); groupExpression.setGroup(new Group(2)); ExpressionContext expressionContext = new ExpressionContext(groupExpression); StatisticsCalculator statisticsCalculator = new StatisticsCalculator(expressionContext, columnRefFactory, optimizerContext); statisticsCalculator.estimatorStats(); ColumnStatistic columnStatisticV5 = expressionContext.getStatistics().getColumnStatistic(v5); ColumnStatistic columnStatisticV6 = expressionContext.getStatistics().getColumnStatistic(v6); Assert.assertEquals(30000, expressionContext.getStatistics().getOutputRowCount(), 0.001); Assert.assertEquals(new StatisticRangeValues(0, 200, 99), StatisticRangeValues.from(columnStatisticV5)); Assert.assertEquals(new StatisticRangeValues(0, 100, 100), StatisticRangeValues.from(columnStatisticV6)); }
@Override public SingleStreamSpiller create(List<Type> types, SpillContext spillContext, LocalMemoryContext memoryContext) { Optional<SpillCipher> spillCipher = Optional.empty(); if (spillEncryptionEnabled) { spillCipher = Optional.of(new AesSpillCipher()); } PagesSerde serde = serdeFactory.createPagesSerdeForSpill(spillCipher); return new FileSingleStreamSpiller(serde, executor, getNextSpillPath(), spillerStats, spillContext, memoryContext, spillCipher); }
@Test public void testDistributesSpillOverPaths() throws Exception { List<Type> types = ImmutableList.of(BIGINT); BlockEncodingSerde blockEncodingSerde = new BlockEncodingManager(); List<Path> spillPaths = ImmutableList.of(spillPath1.toPath(), spillPath2.toPath()); FileSingleStreamSpillerFactory spillerFactory = new FileSingleStreamSpillerFactory( executor, // executor won't be closed, because we don't call destroy() on the spiller factory blockEncodingSerde, new SpillerStats(), spillPaths, 1.0, false, false); assertEquals(listFiles(spillPath1.toPath()).size(), 0); assertEquals(listFiles(spillPath2.toPath()).size(), 0); Page page = buildPage(); List<SingleStreamSpiller> spillers = new ArrayList<>(); for (int i = 0; i < 10; ++i) { SingleStreamSpiller singleStreamSpiller = spillerFactory.create(types, new TestingSpillContext(), newSimpleAggregatedMemoryContext().newLocalMemoryContext("test")); getUnchecked(singleStreamSpiller.spill(page)); spillers.add(singleStreamSpiller); } assertEquals(listFiles(spillPath1.toPath()).size(), 5); assertEquals(listFiles(spillPath2.toPath()).size(), 5); spillers.forEach(SingleStreamSpiller::close); assertEquals(listFiles(spillPath1.toPath()).size(), 0); assertEquals(listFiles(spillPath2.toPath()).size(), 0); }
@Override public BaseB2Response upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final long partSize; if(file.getType().contains(Path.Type.encrypted)) { // For uploads to vault part size must be a multiple of 32 * 1024. Recommended partsize from B2 API may not meet that requirement. partSize = PreferencesFactory.get().getLong("b2.upload.largeobject.size"); } else { partSize = this.partSize; } return this.upload(file, local, throttle, listener, status, callback, partSize < status.getLength() ? partSize : PreferencesFactory.get().getLong("b2.upload.largeobject.size.minimum")); }
@Test public void testUpload() throws Exception { final Path bucket = new Path("test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final Local local = new Local(System.getProperty("java.io.tmpdir"), new AlphanumericRandomStringService().random()); // Each segment, except the last, must be larger than 100MB. final int length = 5 * 1000 * 1000 + 1; final byte[] content = RandomUtils.nextBytes(length); final OutputStream out = local.getOutputStream(false); IOUtils.write(content, out); out.close(); final TransferStatus status = new TransferStatus(); status.setLength(content.length); final Checksum checksum = new SHA1ChecksumCompute().compute(new ByteArrayInputStream(content), new TransferStatus()); status.setChecksum(checksum); final B2VersionIdProvider fileid = new B2VersionIdProvider(session); final B2LargeUploadService upload = new B2LargeUploadService(session, fileid, new B2WriteFeature(session, fileid), 5 * 1000L * 1000L, 5); upload.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener(), status, new DisabledConnectionCallback()); final PathAttributes attr = new B2AttributesFinderFeature(session, fileid).find(test); assertNotEquals(Checksum.NONE, attr.getChecksum()); assertEquals(checksum, attr.getChecksum()); status.validate(); assertTrue(status.isComplete()); assertEquals(content.length, status.getResponse().getSize()); assertTrue(new DefaultFindFeature(session).find(test)); final InputStream in = new B2ReadFeature(session, fileid).read(test, new TransferStatus(), new DisabledConnectionCallback()); final ByteArrayOutputStream buffer = new ByteArrayOutputStream(content.length); new StreamCopier(status, status).transfer(in, buffer); in.close(); buffer.close(); assertArrayEquals(content, buffer.toByteArray()); new B2DeleteFeature(session, fileid).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); local.delete(); }
@SuppressWarnings("unused") static void onTimeoutNowReturned(final ThreadId id, final Status status, final TimeoutNowRequest request, final TimeoutNowResponse response, final boolean stopAfterFinish) { final Replicator r = (Replicator) id.lock(); if (r == null) { return; } final boolean isLogDebugEnabled = LOG.isDebugEnabled(); StringBuilder sb = null; if (isLogDebugEnabled) { sb = new StringBuilder("Node "). // append(r.options.getGroupId()).append(":").append(r.options.getServerId()). // append(" received TimeoutNowResponse from "). // append(r.options.getPeerId()); } if (!status.isOk()) { if (isLogDebugEnabled) { sb.append(" fail:").append(status); LOG.debug(sb.toString()); } notifyReplicatorStatusListener(r, ReplicatorEvent.ERROR, status); if (stopAfterFinish) { r.notifyOnCaughtUp(RaftError.ESTOP.getNumber(), true); r.destroy(); } else { id.unlock(); } return; } if (isLogDebugEnabled) { sb.append(response.getSuccess() ? " success" : " fail"); LOG.debug(sb.toString()); } if (response.getTerm() > r.options.getTerm()) { final NodeImpl node = r.options.getNode(); r.notifyOnCaughtUp(RaftError.EPERM.getNumber(), true); r.destroy(); node.increaseTermTo(response.getTerm(), new Status(RaftError.EHIGHERTERMRESPONSE, "Leader receives higher term timeout_now_response from peer:%s, group:%s", r.options.getPeerId(), r.options.getGroupId())); return; } if (stopAfterFinish) { r.notifyOnCaughtUp(RaftError.ESTOP.getNumber(), true); r.destroy(); } else { id.unlock(); } }
@Test public void testOnTimeoutNowReturnedRpcErrorAndStop() { final Replicator r = getReplicator(); final RpcRequests.TimeoutNowRequest request = createTimeoutnowRequest(); this.id.unlock(); Replicator.onTimeoutNowReturned(this.id, new Status(-1, "test"), request, null, true); assertNull(r.id); }
@ScalarOperator(MULTIPLY) @SqlType(StandardTypes.REAL) public static long multiply(@SqlType(StandardTypes.REAL) long left, @SqlType(StandardTypes.REAL) long right) { return floatToRawIntBits(intBitsToFloat((int) left) * intBitsToFloat((int) right)); }
@Test public void testMultiply() { assertFunction("REAL'12.34' * REAL'56.78'", REAL, 12.34f * 56.78f); assertFunction("REAL'-17.34' * REAL'-22.891'", REAL, -17.34f * -22.891f); assertFunction("REAL'-89.123' * REAL'754.0'", REAL, -89.123f * 754.0f); assertFunction("REAL'-0.0' * REAL'0.0'", REAL, -0.0f * 0.0f); assertFunction("REAL'-17.71' * REAL'-1.0'", REAL, -17.71f * -1.0f); }
@Override public int deserializeKV(DataInputStream in, SizedWritable<?> key, SizedWritable<?> value) throws IOException { if (!in.hasUnReadData()) { return 0; } key.length = in.readInt(); value.length = in.readInt(); keySerializer.deserialize(in, key.length, key.v); valueSerializer.deserialize(in, value.length, value.v); return key.length + value.length + KV_HEAD_LENGTH; }
@Test public void testDeserializerNoData() throws IOException { final DataInputStream in = Mockito.mock(DataInputStream.class); Mockito.when(in.hasUnReadData()).thenReturn(false); Assert.assertEquals(0, serializer.deserializeKV(in, key, value)); }
public int maxValue() { final int initialValue = this.initialValue; int max = 0 == size ? initialValue : Integer.MIN_VALUE; for (final int value : values) { if (initialValue != value) { max = Math.max(max, value); } } return max; }
@Test void shouldHaveNoMaxValueForEmptyCollection() { assertEquals(INITIAL_VALUE, map.maxValue()); }
@Override public void write(MutableSpan span, WriteBuffer b) { b.writeByte('{'); boolean wroteField = false; if (span.traceId() != null) { wroteField = writeFieldBegin(b, "traceId", wroteField); b.writeByte('"'); b.writeAscii(span.traceId()); b.writeByte('"'); } if (span.parentId() != null) { wroteField = writeFieldBegin(b, "parentId", wroteField); b.writeByte('"'); b.writeAscii(span.parentId()); b.writeByte('"'); } if (span.id() != null) { wroteField = writeFieldBegin(b, "id", wroteField); b.writeByte('"'); b.writeAscii(span.id()); b.writeByte('"'); } if (span.kind() != null) { wroteField = writeFieldBegin(b, "kind", wroteField); b.writeByte('"'); b.writeAscii(span.kind().toString()); b.writeByte('"'); } if (span.name() != null) { wroteField = writeFieldBegin(b, "name", wroteField); b.writeByte('"'); jsonEscape(span.name(), b); b.writeByte('"'); } long startTimestamp = span.startTimestamp(), finishTimestamp = span.finishTimestamp(); if (startTimestamp != 0L) { wroteField = writeFieldBegin(b, "timestamp", wroteField); b.writeAscii(startTimestamp); if (finishTimestamp != 0L) { wroteField = writeFieldBegin(b, "duration", wroteField); b.writeAscii(finishTimestamp - startTimestamp); } } if (span.localServiceName() != null || span.localIp() != null) { wroteField = writeFieldBegin(b, "localEndpoint", wroteField); writeEndpoint(b, span.localServiceName(), span.localIp(), span.localPort()); } if (span.remoteServiceName() != null || span.remoteIp() != null) { wroteField = writeFieldBegin(b, "remoteEndpoint", wroteField); writeEndpoint(b, span.remoteServiceName(), span.remoteIp(), span.remotePort()); } int annotationLength = span.annotationCount(); if (annotationLength > 0) { wroteField = writeFieldBegin(b, "annotations", wroteField); b.writeByte('['); for (int i = 0; i < annotationLength; ) { long timestamp = span.annotationTimestampAt(i); String value = span.annotationValueAt(i); writeAnnotation(timestamp, value, b); if (++i < annotationLength) b.writeByte(','); } b.writeByte(']'); } int tagCount = span.tagCount(); String errorValue = errorTag.value(span.error(), null); String errorTagName = errorValue != null ? errorTag.key() : null; boolean writeError = errorTagName != null; if (tagCount > 0 || writeError) { wroteField = writeFieldBegin(b, "tags", wroteField); b.writeByte('{'); for (int i = 0; i < tagCount; ) { String key = span.tagKeyAt(i); if (writeError && key.equals(errorTagName)) writeError = false; writeKeyValue(b, key, span.tagValueAt(i)); if (++i < tagCount) b.writeByte(','); } if (writeError) { if (tagCount > 0) b.writeByte(','); writeKeyValue(b, errorTagName, errorValue); } b.writeByte('}'); } if (Boolean.TRUE.equals(span.debug())) { wroteField = writeFieldBegin(b, "debug", wroteField); b.writeAscii("true"); } if (Boolean.TRUE.equals(span.shared())) { writeFieldBegin(b, "shared", wroteField); b.writeAscii("true"); } b.writeByte('}'); }
@Test void writeClientSpan() { jsonWriter.write(clientSpan, buffer); assertThat(buffer.toString()).isEqualTo("{" + "\"traceId\":\"0000000000000001\",\"parentId\":\"0000000000000002\",\"id\":\"0000000000000003\"," + "\"kind\":\"CLIENT\",\"name\":\"get\",\"timestamp\":1000,\"duration\":200," + "\"localEndpoint\":{\"serviceName\":\"frontend\",\"ipv4\":\"127.0.0.1\"}," + "\"remoteEndpoint\":{\"serviceName\":\"backend\",\"ipv4\":\"192.168.99.101\",\"port\":9000}," + "\"annotations\":[{\"timestamp\":1100,\"value\":\"foo\"}]," + "\"tags\":{\"http.path\":\"/api\",\"clnt/finagle.version\":\"6.45.0\"}" + "}"); }
BackgroundJobRunner getBackgroundJobRunner(Job job) { assertJobExists(job.getJobDetails()); return backgroundJobRunners.stream() .filter(jobRunner -> jobRunner.supports(job)) .findFirst() .orElseThrow(() -> problematicConfigurationException("Could not find a BackgroundJobRunner: either no JobActivator is registered, your Background Job Class is not registered within the IoC container or your Job does not have a default no-arg constructor.")); }
@Test void getBackgroundJobRunnerForNonIoCJobWithoutInstance() { jobActivator.clear(); final Job job = anEnqueuedJob() .<TestService>withJobDetails(ts -> ts.doWork()) .build(); assertThat(backgroundJobServer.getBackgroundJobRunner(job)) .isNotNull() .isInstanceOf(BackgroundJobWithoutIocRunner.class); }
public static Map<String, String> decodeMap(String data) { if (data == null) { return null; } Map<String, String> map = new HashMap<>(); if (StringUtils.isBlank(data)) { return map; } String[] kvPairs = data.split(PAIR_SPLIT); if (kvPairs.length == 0) { return map; } for (String kvPair : kvPairs) { if (StringUtils.isNullOrEmpty(kvPair)) { continue; } String[] kvs = kvPair.split(KV_SPLIT); if (kvs.length != 2) { continue; } map.put(kvs[0], kvs[1]); } return map; }
@Test public void decodeMap() { Assertions.assertNull(CollectionUtils.decodeMap(null)); Map<String, String> map = CollectionUtils.decodeMap(""); Assertions.assertEquals(0, map.size()); map = CollectionUtils.decodeMap("&"); Assertions.assertEquals(0, map.size()); map = CollectionUtils.decodeMap("="); Assertions.assertEquals(0, map.size()); map = CollectionUtils.decodeMap("&="); Assertions.assertEquals(0, map.size()); map = CollectionUtils.decodeMap("x=1"); Assertions.assertEquals(1, map.size()); Assertions.assertEquals("1", map.get("x")); map = CollectionUtils.decodeMap("x=1&y=2"); Assertions.assertEquals(2, map.size()); Assertions.assertEquals("2", map.get("y")); }
@Override public Map<String, Set<String>> readPluginsStorages() { log.debug("Reading extensions storages from plugins"); Map<String, Set<String>> result = new LinkedHashMap<>(); List<PluginWrapper> plugins = pluginManager.getPlugins(); for (PluginWrapper plugin : plugins) { String pluginId = plugin.getPluginId(); log.debug("Reading extensions storages for plugin '{}'", pluginId); final Set<String> bucket = new HashSet<>(); try { Enumeration<URL> urls = findExtensionResource((PluginClassLoader) plugin.getPluginClassLoader()); if (urls.hasMoreElements()) { collectExtensions(urls, bucket); } else { log.debug("Cannot find '{}'", EXTENSIONS_RESOURCE); } debugExtensions(bucket); result.put(pluginId, bucket); } catch (IOException | URISyntaxException e) { log.error(e.getMessage(), e); } } return result; }
@Test void readPluginsStorages() { String pluginId = "testPlugin"; PluginWrapper pluginWrapper = mock(PluginWrapper.class); when(pluginWrapper.getPluginId()).thenReturn(pluginId); when(pluginWrapper.getPluginClassLoader()).thenReturn(null); // not needed for this test PluginManager pluginManager = mock(PluginManager.class); when(pluginManager.getPlugins()).thenReturn(Collections.singletonList(pluginWrapper)); ServiceProviderExtensionFinder finder = new ServiceProviderExtensionFinder(pluginManager) { @Override Enumeration<URL> findExtensionResource(PluginClassLoader classLoader) throws IOException { return getExtensionEnumeration(); } }; Map<String, Set<String>> storages = finder.readPluginsStorages(); assertNotNull(storages); assertTrue(storages.containsKey(pluginId)); Set<String> extensions = storages.get(pluginId); assertEquals(2, extensions.size()); assertThat(extensions, containsInAnyOrder(HELLO_GREETER_EXTENSION, WELCOME_GREETER_EXTENSION)); }
@Override @CacheEvict(value = RedisKeyConstants.PERMISSION_MENU_ID_LIST, key = "#createReqVO.permission", condition = "#createReqVO.permission != null") public Long createMenu(MenuSaveVO createReqVO) { // 校验父菜单存在 validateParentMenu(createReqVO.getParentId(), null); // 校验菜单(自己) validateMenu(createReqVO.getParentId(), createReqVO.getName(), null); // 插入数据库 MenuDO menu = BeanUtils.toBean(createReqVO, MenuDO.class); initMenuProperty(menu); menuMapper.insert(menu); // 返回 return menu.getId(); }
@Test public void testCreateMenu_success() { // mock 数据(构造父菜单) MenuDO menuDO = buildMenuDO(MenuTypeEnum.MENU, "parent", 0L); menuMapper.insert(menuDO); Long parentId = menuDO.getId(); // 准备参数 MenuSaveVO reqVO = randomPojo(MenuSaveVO.class, o -> { o.setParentId(parentId); o.setName("testSonName"); o.setType(MenuTypeEnum.MENU.getType()); }).setId(null); // 防止 id 被赋值 Long menuId = menuService.createMenu(reqVO); // 校验记录的属性是否正确 MenuDO dbMenu = menuMapper.selectById(menuId); assertPojoEquals(reqVO, dbMenu, "id"); }
void handleDirectoriesOffline( int brokerId, long brokerEpoch, List<Uuid> offlineDirs, List<ApiMessageAndVersion> records ) { BrokerRegistration registration = clusterControl.registration(brokerId); List<Uuid> newOfflineDirs = registration.directoryIntersection(offlineDirs); if (!newOfflineDirs.isEmpty()) { for (Uuid newOfflineDir : newOfflineDirs) { TimelineHashSet<TopicIdPartition> parts = directoriesToPartitions.get(newOfflineDir); Iterator<TopicIdPartition> iterator = (parts == null) ? Collections.emptyIterator() : parts.iterator(); generateLeaderAndIsrUpdates( "handleDirectoriesOffline[" + brokerId + ":" + newOfflineDir + "]", brokerId, NO_LEADER, NO_LEADER, records, iterator); } List<Uuid> newOnlineDirs = registration.directoryDifference(offlineDirs); records.add(new ApiMessageAndVersion(new BrokerRegistrationChangeRecord(). setBrokerId(brokerId).setBrokerEpoch(brokerEpoch). setLogDirs(newOnlineDirs), (short) 2)); log.warn("Directories {} in broker {} marked offline, remaining directories: {}", newOfflineDirs, brokerId, newOnlineDirs); } }
@Test void testHandleDirectoriesOffline() { ReplicationControlTestContext ctx = new ReplicationControlTestContext.Builder().build(); int b1 = 101, b2 = 102; Uuid dir1b1 = Uuid.fromString("suitdzfTTdqoWcy8VqmkUg"); Uuid dir2b1 = Uuid.fromString("yh3acnzGSeurSTj8aIhOjw"); Uuid dir1b2 = Uuid.fromString("OmpmJ8RjQliQlEFht56DwQ"); Uuid dir2b2 = Uuid.fromString("w05baLpsT5Oz0LvKTKXoDw"); ctx.registerBrokersWithDirs(b1, asList(dir1b1, dir2b1), b2, asList(dir1b2, dir2b2)); ctx.unfenceBrokers(b1, b2); Uuid topicA = ctx.createTestTopic("a", new int[][]{new int[]{b1, b2}, new int[]{b1, b2}}).topicId(); Uuid topicB = ctx.createTestTopic("b", new int[][]{new int[]{b1, b2}, new int[]{b1, b2}}).topicId(); ctx.assignReplicasToDirs(b1, new HashMap<TopicIdPartition, Uuid>() {{ put(new TopicIdPartition(topicA, 0), dir1b1); put(new TopicIdPartition(topicA, 1), dir2b1); put(new TopicIdPartition(topicB, 0), dir1b1); put(new TopicIdPartition(topicB, 1), dir2b1); }}); ctx.assignReplicasToDirs(b2, new HashMap<TopicIdPartition, Uuid>() {{ put(new TopicIdPartition(topicA, 0), dir1b2); put(new TopicIdPartition(topicA, 1), dir2b2); put(new TopicIdPartition(topicB, 0), dir1b2); put(new TopicIdPartition(topicB, 1), dir2b2); }}); List<ApiMessageAndVersion> records = new ArrayList<>(); ctx.replicationControl.handleDirectoriesOffline(b1, defaultBrokerEpoch(b1), asList( dir1b1, dir1b2 // should not cause update to dir1b2 as it's not registered to b1 ), records); assertEquals( singletonList(new ApiMessageAndVersion(new BrokerRegistrationChangeRecord() .setBrokerId(b1).setBrokerEpoch(defaultBrokerEpoch(b1)) .setLogDirs(singletonList(dir2b1)), (short) 2)), filter(records, BrokerRegistrationChangeRecord.class) ); short partitionChangeRecordVersion = ctx.featureControl.metadataVersion().partitionChangeRecordVersion(); assertEquals( sortPartitionChangeRecords(asList( new ApiMessageAndVersion(new PartitionChangeRecord().setTopicId(topicA).setPartitionId(0) .setLeader(b2).setIsr(singletonList(b2)), partitionChangeRecordVersion), new ApiMessageAndVersion(new PartitionChangeRecord().setTopicId(topicB).setPartitionId(0) .setLeader(b2).setIsr(singletonList(b2)), partitionChangeRecordVersion) )), sortPartitionChangeRecords(filter(records, PartitionChangeRecord.class)) ); assertEquals(3, records.size()); ctx.replay(records); assertEquals(Collections.singletonList(dir2b1), ctx.clusterControl.registration(b1).directories()); }
static Schema getSchema(Class<? extends Message> clazz) { return getSchema(ProtobufUtil.getDescriptorForClass(clazz)); }
@Test public void testMapPrimitiveSchema() { assertEquals( TestProtoSchemas.MAP_PRIMITIVE_SCHEMA, ProtoSchemaTranslator.getSchema(Proto3SchemaMessages.MapPrimitive.class)); }
@Subscribe public void inputUpdated(InputUpdated inputUpdatedEvent) { final String inputId = inputUpdatedEvent.id(); LOG.debug("Input updated: {}", inputId); final Input input; try { input = inputService.find(inputId); } catch (NotFoundException e) { LOG.warn("Received InputUpdated event but could not find input {}", inputId, e); return; } final boolean startInput; final IOState<MessageInput> inputState = inputRegistry.getInputState(inputId); if (inputState != null) { startInput = inputState.getState() == IOState.Type.RUNNING; inputRegistry.remove(inputState); } else { startInput = false; } if (startInput && (input.isGlobal() || this.nodeId.getNodeId().equals(input.getNodeId()))) { startInput(input); } }
@Test public void inputUpdatedRestartsGlobalInputOnAnyNode() throws Exception { final String inputId = "input-id"; final Input input = mock(Input.class); @SuppressWarnings("unchecked") final IOState<MessageInput> inputState = mock(IOState.class); when(inputState.getState()).thenReturn(IOState.Type.RUNNING); when(inputService.find(inputId)).thenReturn(input); when(inputRegistry.getInputState(inputId)).thenReturn(inputState); when(input.getNodeId()).thenReturn(OTHER_NODE_ID); when(input.isGlobal()).thenReturn(true); final MessageInput messageInput = mock(MessageInput.class); when(inputService.getMessageInput(input)).thenReturn(messageInput); listener.inputUpdated(InputUpdated.create(inputId)); verify(inputLauncher, times(1)).launch(messageInput); }
public StatementExecutorResponse execute( final ConfiguredStatement<? extends Statement> statement, final KsqlExecutionContext executionContext, final KsqlSecurityContext securityContext ) { final String commandRunnerWarningString = commandRunnerWarning.get(); if (!commandRunnerWarningString.equals("")) { throw new KsqlServerException("Failed to handle Ksql Statement." + System.lineSeparator() + commandRunnerWarningString); } final InjectorWithSideEffects injector = InjectorWithSideEffects.wrap( injectorFactory.apply(executionContext, securityContext.getServiceContext())); final ConfiguredStatementWithSideEffects<?> injectedWithSideEffects = injector.injectWithSideEffects(statement); try { return executeInjected( injectedWithSideEffects.getStatement(), statement, executionContext, securityContext); } catch (Exception e) { injector.revertSideEffects(injectedWithSideEffects); throw e; } }
@Test public void shouldNotInitTransactionWhenCommandRunnerWarningPresent() { // When: when(commandRunnerWarning.get()).thenReturn(DefaultErrorMessages.COMMAND_RUNNER_DEGRADED_INCOMPATIBLE_COMMANDS_ERROR_MESSAGE); // Then: assertThrows( KsqlServerException.class, () -> distributor.execute(CONFIGURED_STATEMENT, executionContext, securityContext) ); verify(transactionalProducer, never()).initTransactions(); }
public static Ip4Prefix valueOf(int address, int prefixLength) { return new Ip4Prefix(Ip4Address.valueOf(address), prefixLength); }
@Test(expected = NullPointerException.class) public void testInvalidValueOfNullAddress() { Ip4Address ipAddress; Ip4Prefix ipPrefix; ipAddress = null; ipPrefix = Ip4Prefix.valueOf(ipAddress, 24); }
@Override public MapperResult selectGroupInfoBySize(MapperContext context) { String sql = "SELECT id, group_id FROM group_capacity WHERE id > ? LIMIT ?"; return new MapperResult(sql, CollectionUtils.list(context.getWhereParameter(FieldConstant.ID), context.getPageSize())); }
@Test void testSelectGroupInfoBySize() { Object id = 1; context.putWhereParameter(FieldConstant.ID, id); MapperResult mapperResult = groupCapacityMapperByMysql.selectGroupInfoBySize(context); assertEquals("SELECT id, group_id FROM group_capacity WHERE id > ? LIMIT ?", mapperResult.getSql()); context.putWhereParameter(FieldConstant.GMT_CREATE, createTime); assertArrayEquals(new Object[] {id, pageSize}, mapperResult.getParamList().toArray()); }
@Override public ModuleState build() { ModuleState moduleState = new ModuleState(RaftSysConstants.RAFT_STATE); moduleState.newState(RaftSysConstants.RAFT_ELECTION_TIMEOUT_MS, Math.max(stringToInt(RaftSysConstants.RAFT_ELECTION_TIMEOUT_MS, RaftSysConstants.DEFAULT_ELECTION_TIMEOUT), RaftSysConstants.DEFAULT_ELECTION_TIMEOUT)); moduleState.newState(RaftSysConstants.RAFT_SNAPSHOT_INTERVAL_SECS, stringToInt(RaftSysConstants.RAFT_SNAPSHOT_INTERVAL_SECS, RaftSysConstants.DEFAULT_RAFT_SNAPSHOT_INTERVAL_SECS)); moduleState.newState(RaftSysConstants.RAFT_CORE_THREAD_NUM, stringToInt(RaftSysConstants.RAFT_CORE_THREAD_NUM, 8)); moduleState.newState(RaftSysConstants.RAFT_CLI_SERVICE_THREAD_NUM, stringToInt(RaftSysConstants.RAFT_CLI_SERVICE_THREAD_NUM, RaftSysConstants.DEFAULT_RAFT_CLI_SERVICE_THREAD_NUM)); moduleState.newState(RaftSysConstants.RAFT_READ_INDEX_TYPE, getProperty(RaftSysConstants.RAFT_READ_INDEX_TYPE)); moduleState.newState(RaftSysConstants.RAFT_READ_INDEX_TYPE, getProperty(RaftSysConstants.RAFT_READ_INDEX_TYPE)); moduleState.newState(RaftSysConstants.RAFT_RPC_REQUEST_TIMEOUT_MS, stringToInt(RaftSysConstants.RAFT_RPC_REQUEST_TIMEOUT_MS, RaftSysConstants.DEFAULT_RAFT_RPC_REQUEST_TIMEOUT_MS)); moduleState.newState(RaftSysConstants.MAX_BYTE_COUNT_PER_RPC, stringToInt(RaftSysConstants.MAX_BYTE_COUNT_PER_RPC, RaftSysConstants.DEFAULT_MAX_BYTE_COUNT_PER_RPC)); moduleState.newState(RaftSysConstants.MAX_ENTRIES_SIZE, stringToInt(RaftSysConstants.MAX_ENTRIES_SIZE, RaftSysConstants.DEFAULT_MAX_ENTRIES_SIZE)); moduleState.newState(RaftSysConstants.MAX_BODY_SIZE, stringToInt(RaftSysConstants.MAX_BODY_SIZE, RaftSysConstants.DEFAULT_MAX_BODY_SIZE)); moduleState.newState(RaftSysConstants.MAX_APPEND_BUFFER_SIZE, stringToInt(RaftSysConstants.MAX_APPEND_BUFFER_SIZE, RaftSysConstants.DEFAULT_MAX_APPEND_BUFFER_SIZE)); moduleState.newState(RaftSysConstants.MAX_ELECTION_DELAY_MS, stringToInt(RaftSysConstants.MAX_ELECTION_DELAY_MS, RaftSysConstants.DEFAULT_MAX_ELECTION_DELAY_MS)); moduleState.newState(RaftSysConstants.ELECTION_HEARTBEAT_FACTOR, stringToInt(RaftSysConstants.ELECTION_HEARTBEAT_FACTOR, RaftSysConstants.DEFAULT_ELECTION_HEARTBEAT_FACTOR)); moduleState.newState(RaftSysConstants.APPLY_BATCH, stringToInt(RaftSysConstants.APPLY_BATCH, RaftSysConstants.DEFAULT_APPLY_BATCH)); moduleState.newState(RaftSysConstants.SYNC, stringToBoolean(RaftSysConstants.SYNC, RaftSysConstants.DEFAULT_SYNC)); moduleState.newState(RaftSysConstants.SYNC_META, stringToBoolean(RaftSysConstants.SYNC_META, RaftSysConstants.DEFAULT_SYNC_META)); moduleState.newState(RaftSysConstants.DISRUPTOR_BUFFER_SIZE, stringToInt(RaftSysConstants.DISRUPTOR_BUFFER_SIZE, RaftSysConstants.DEFAULT_DISRUPTOR_BUFFER_SIZE)); moduleState.newState(RaftSysConstants.REPLICATOR_PIPELINE, stringToBoolean(RaftSysConstants.REPLICATOR_PIPELINE, RaftSysConstants.DEFAULT_REPLICATOR_PIPELINE)); moduleState.newState(RaftSysConstants.MAX_REPLICATOR_INFLIGHT_MSGS, stringToInt(RaftSysConstants.MAX_REPLICATOR_INFLIGHT_MSGS, RaftSysConstants.DEFAULT_MAX_REPLICATOR_INFLIGHT_MSGS)); moduleState.newState(RaftSysConstants.ENABLE_LOG_ENTRY_CHECKSUM, stringToBoolean(RaftSysConstants.ENABLE_LOG_ENTRY_CHECKSUM, RaftSysConstants.DEFAULT_ENABLE_LOG_ENTRY_CHECKSUM)); return moduleState; }
@Test void testBuild() { ModuleState actual = new RaftModuleStateBuilder().build(); Map<String, Object> states = actual.getStates(); assertEquals(RaftSysConstants.RAFT_STATE, actual.getModuleName()); assertEquals(RaftSysConstants.DEFAULT_ELECTION_TIMEOUT, states.get(RaftSysConstants.RAFT_ELECTION_TIMEOUT_MS)); assertEquals(RaftSysConstants.DEFAULT_RAFT_SNAPSHOT_INTERVAL_SECS, states.get(RaftSysConstants.RAFT_SNAPSHOT_INTERVAL_SECS)); assertEquals(RaftSysConstants.DEFAULT_RAFT_CLI_SERVICE_THREAD_NUM, states.get(RaftSysConstants.RAFT_CLI_SERVICE_THREAD_NUM)); assertNull(states.get(RaftSysConstants.RAFT_READ_INDEX_TYPE)); assertEquals(RaftSysConstants.DEFAULT_RAFT_RPC_REQUEST_TIMEOUT_MS, states.get(RaftSysConstants.RAFT_RPC_REQUEST_TIMEOUT_MS)); assertEquals(RaftSysConstants.DEFAULT_MAX_BYTE_COUNT_PER_RPC, states.get(RaftSysConstants.MAX_BYTE_COUNT_PER_RPC)); assertEquals(RaftSysConstants.DEFAULT_MAX_ENTRIES_SIZE, states.get(RaftSysConstants.MAX_ENTRIES_SIZE)); assertEquals(RaftSysConstants.DEFAULT_MAX_BODY_SIZE, states.get(RaftSysConstants.MAX_BODY_SIZE)); assertEquals(RaftSysConstants.DEFAULT_MAX_APPEND_BUFFER_SIZE, states.get(RaftSysConstants.MAX_APPEND_BUFFER_SIZE)); assertEquals(RaftSysConstants.DEFAULT_MAX_ELECTION_DELAY_MS, states.get(RaftSysConstants.MAX_ELECTION_DELAY_MS)); assertEquals(RaftSysConstants.DEFAULT_ELECTION_HEARTBEAT_FACTOR, states.get(RaftSysConstants.ELECTION_HEARTBEAT_FACTOR)); assertEquals(RaftSysConstants.DEFAULT_APPLY_BATCH, states.get(RaftSysConstants.APPLY_BATCH)); assertEquals(RaftSysConstants.DEFAULT_SYNC, states.get(RaftSysConstants.SYNC)); assertEquals(RaftSysConstants.DEFAULT_SYNC_META, states.get(RaftSysConstants.SYNC_META)); assertEquals(RaftSysConstants.DEFAULT_DISRUPTOR_BUFFER_SIZE, states.get(RaftSysConstants.DISRUPTOR_BUFFER_SIZE)); assertEquals(RaftSysConstants.DEFAULT_REPLICATOR_PIPELINE, states.get(RaftSysConstants.REPLICATOR_PIPELINE)); assertEquals(RaftSysConstants.DEFAULT_MAX_REPLICATOR_INFLIGHT_MSGS, states.get(RaftSysConstants.MAX_REPLICATOR_INFLIGHT_MSGS)); assertEquals(RaftSysConstants.DEFAULT_ENABLE_LOG_ENTRY_CHECKSUM, states.get(RaftSysConstants.ENABLE_LOG_ENTRY_CHECKSUM)); }
public static String getDefNamespace(Reader in) { List<String> defLines = getDefLines(in); String declaredPackage = getDirective(defLines, packageDirectivePattern); String declaredNamespace = getDirective(defLines, namespaceDirectivePattern); return declaredPackage != null ? declaredPackage : declaredNamespace != null ? declaredNamespace : ""; }
@Test public void testGetNamespace() { // namespace after version StringReader reader = new StringReader("version=1\nnamespace=a\nint a default=0"); assertEquals("a", ConfigUtils.getDefNamespace(reader)); // namespace first reader = new StringReader("namespace=a\nversion=1\nint a default=0"); assertEquals("a", ConfigUtils.getDefNamespace(reader)); // package after namespace reader = new StringReader("namespace=a\npackage=b\nint a default=0"); assertEquals("b", ConfigUtils.getDefNamespace(reader)); // package before namespace reader = new StringReader("package=b\nnamespace=a\nint a default=0"); assertEquals("b", ConfigUtils.getDefNamespace(reader)); // no actual package assertEquals("package (or namespace) must consist of one or more segments joined by single dots (.), " + "each starting with a lowercase letter (a-z), and then containing one or more lowercase letters (a-z), " + "digits (0-9), or underscores (_)", assertThrows(IllegalArgumentException.class, () -> ConfigUtils.getDefNamespace(new StringReader("package= \t \nint a default=0"))) .getMessage()); // too relaxed namespace assertEquals("package (or namespace) must consist of one or more segments joined by single dots (.), " + "each starting with a lowercase letter (a-z), and then containing one or more lowercase letters (a-z), " + "digits (0-9), or underscores (_)", assertThrows(IllegalArgumentException.class, () -> ConfigUtils.getDefNamespace(new StringReader("namespace=a/b\nint a default=0"))) .getMessage()); // No namespace reader = new StringReader("version=1\nint a default=0"); assertEquals("", ConfigUtils.getDefNamespace(reader)); // comment lines reader = new StringReader("#comment\nversion=1\n#comment2\nint a default=0"); assertEquals("", ConfigUtils.getDefNamespace(reader)); try { ConfigUtils.getDefNamespace(null); fail(); } catch (IllegalArgumentException e) { // } }
public Optional<File> fetchSnapshot(long offset) { return Optional.ofNullable(snapshots.get(offset)).map(x -> x.file()); }
@Test public void testFetchSnapshotEmptySnapShot() { int offset = 1; assertEquals(Optional.empty(), stateManager.fetchSnapshot(offset)); }
public static void redirectSystemOutAndError(Configuration conf) { SystemOutMode systemOutMode = conf.get(TASK_MANAGER_SYSTEM_OUT_MODE); switch (systemOutMode) { case LOG: redirectToCurrentLog( conf.get(TASK_MANAGER_SYSTEM_OUT_LOG_CACHE_SIZE).getBytes(), conf.get(TASK_MANAGER_SYSTEM_OUT_LOG_THREAD_NAME)); break; case IGNORE: ignoreSystemOutAndError(); break; case DEFAULT: default: break; } }
@Test void testDefaultSystemOutAndErr() { ByteArrayOutputStream outStream = new ByteArrayOutputStream(); ByteArrayOutputStream errStream = new ByteArrayOutputStream(); System.setOut(new PrintStream(outStream)); System.setErr(new PrintStream(errStream)); SystemOutRedirectionUtils.redirectSystemOutAndError(new Configuration()); String logContext = "This is log context!"; System.out.print(logContext); assertThat(outStream.toString()).isEqualTo(logContext); System.err.print(logContext); assertThat(errStream.toString()).isEqualTo(logContext); }
@Override public String getName() { return "CirrusCI"; }
@Test public void getName() { assertThat(underTest.getName()).isEqualTo("CirrusCI"); }
@Override protected int poll() throws Exception { // must reset for each poll shutdownRunningTask = null; pendingExchanges = 0; List<software.amazon.awssdk.services.sqs.model.Message> messages = pollingTask.call(); // okay we have some response from aws so lets mark the consumer as ready forceConsumerAsReady(); Queue<Exchange> exchanges = createExchanges(messages); return processBatch(CastUtils.cast(exchanges)); }
@Test void shouldAutomaticallyCreateQueueOnQueueDoesNotExistExceptionOnce() throws Exception { // given configuration.setAutoCreateQueue(true); sqsClientMock.setReceiveRequestHandler(request -> { throw QueueDoesNotExistException.builder().build(); }); sqsClientMock.setQueueUrl(null); try (var tested = createConsumer(1555)) { // when var polledMessagesCount = tested.poll(); // then assertThat(polledMessagesCount).isZero(); assertThat(receivedExchanges).isEmpty(); // the request execution will be ignored once non-existing queue is detected assertThat(sqsClientMock.getReceiveRequests()).hasSizeLessThanOrEqualTo(156); assertThat(sqsClientMock.getQueueUrlRequests()).containsExactly(GetQueueUrlRequest.builder() .queueName(configuration.getQueueName()) .build()); assertThat(sqsClientMock.getCreateQueueRequets()).containsExactly(CreateQueueRequest.builder() .queueName(configuration.getQueueName()) .attributes(emptyMap()) .build()); } }
public void execute() throws DdlException { Map<String, UserVariable> clonedUserVars = new ConcurrentHashMap<>(); boolean hasUserVar = stmt.getSetListItems().stream().anyMatch(var -> var instanceof UserVariable); boolean executeSuccess = true; if (hasUserVar) { clonedUserVars.putAll(ctx.getUserVariables()); ctx.modifyUserVariablesCopyInWrite(clonedUserVars); } try { for (SetListItem var : stmt.getSetListItems()) { setVariablesOfAllType(var); } } catch (Throwable e) { if (hasUserVar) { executeSuccess = false; } throw e; } finally { //If the set sql contains more than one user variable, //the atomicity of the modification of this set of variables must be ensured. if (hasUserVar) { ctx.resetUserVariableCopyInWrite(); if (executeSuccess) { ctx.modifyUserVariables(clonedUserVars); } } } }
@Test public void testUserDefineVariable2() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String sql = "set @var = cast(10 as decimal)"; SetStmt stmt = (SetStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); SetExecutor executor = new SetExecutor(ctx, stmt); executor.execute(); UserVariable userVariable = ctx.getUserVariable("var"); Assert.assertTrue(userVariable.getEvaluatedExpression().getType().isDecimalV3()); LiteralExpr literalExpr = (LiteralExpr) userVariable.getEvaluatedExpression(); Assert.assertEquals("10", literalExpr.getStringValue()); sql = "set @var = cast(1 as boolean)"; stmt = (SetStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); executor = new SetExecutor(ctx, stmt); executor.execute(); userVariable = ctx.getUserVariable("var"); Assert.assertTrue(userVariable.getEvaluatedExpression().getType().isBoolean()); BoolLiteral literal = (BoolLiteral) userVariable.getEvaluatedExpression(); Assert.assertTrue(literal.getValue()); sql = "set @var = cast(0 as boolean)"; stmt = (SetStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); executor = new SetExecutor(ctx, stmt); executor.execute(); userVariable = ctx.getUserVariable("var"); Assert.assertTrue(userVariable.getEvaluatedExpression().getType().isBoolean()); literal = (BoolLiteral) userVariable.getEvaluatedExpression(); Assert.assertFalse(literal.getValue()); }
public void submit() { //Transmit information to our transfer object to communicate between layers saveToWorker(); //call the service layer to register our worker service.registerWorker(worker); //check for any errors if (worker.getNotification().hasErrors()) { indicateErrors(); LOGGER.info("Not registered, see errors"); } else { LOGGER.info("Registration Succeeded"); } }
@Test void submitWithErrors() { // Set up the worker with a notification containing errors registerWorkerForm = new RegisterWorkerForm(null, null, null); // Submit the form registerWorkerForm.submit(); // Verify that the worker's properties remain unchanged assertNull(registerWorkerForm.worker.getName()); assertNull(registerWorkerForm.worker.getOccupation()); assertNull(registerWorkerForm.worker.getDateOfBirth()); // Verify the presence of errors assertEquals(registerWorkerForm.worker.getNotification().getErrors().size(), 4); }
@Udf public String concat(@UdfParameter final String... jsonStrings) { if (jsonStrings == null) { return null; } final List<JsonNode> nodes = new ArrayList<>(jsonStrings.length); boolean allObjects = true; for (final String jsonString : jsonStrings) { if (jsonString == null) { return null; } final JsonNode node = UdfJsonMapper.parseJson(jsonString); if (node.isMissingNode()) { return null; } if (allObjects && !node.isObject()) { allObjects = false; } nodes.add(node); } JsonNode result = nodes.get(0); if (allObjects) { for (int i = 1; i < nodes.size(); i++) { result = concatObjects((ObjectNode) result, (ObjectNode) nodes.get(i)); } } else { for (int i = 1; i < nodes.size(); i++) { result = concatArrays(toArrayNode(result), toArrayNode(nodes.get(i))); } } return UdfJsonMapper.writeValueAsJson(result); }
@Test public void shouldReturnNullIfTheFirstArgIsNull() { assertNull(udf.concat(null, "1")); }
public void check(@NotNull Set<Long> partitionIds, long currentTimeMs) throws CommitRateExceededException, CommitFailedException { Preconditions.checkNotNull(partitionIds, "partitionIds is null"); // Does not limit the commit rate of compaction transactions if (transactionState.getSourceType() == TransactionState.LoadJobSourceType.LAKE_COMPACTION) { return; } updateWriteDuration(transactionState); setAllowCommitTimeOnce(partitionIds); long txnId = transactionState.getTransactionId(); long abortTime = transactionState.getPrepareTime() + transactionState.getTimeoutMs(); if (transactionState.getAllowCommitTimeMs() >= abortTime) { throw new CommitFailedException("Txn " + txnId + " timed out due to ingestion slowdown", txnId); } if (transactionState.getAllowCommitTimeMs() > currentTimeMs) { LOG.info("delay commit of txn {} for {}ms, write took {}ms", transactionState.getTransactionId(), transactionState.getAllowCommitTimeMs() - currentTimeMs, transactionState.getWriteDurationMs()); throw new CommitRateExceededException(txnId, transactionState.getAllowCommitTimeMs()); } long upperBound = compactionScoreUpperBound(); if (upperBound > 0 && anyCompactionScoreExceedsUpperBound(partitionIds, upperBound)) { throw new CommitRateExceededException(txnId, currentTimeMs + 1000/* delay 1s */); } }
@Test public void testNotThrottled() throws CommitRateExceededException { long partitionId = 54321; Set<Long> partitions = new HashSet<>(Collections.singletonList(partitionId)); long currentTimeMs = System.currentTimeMillis(); transactionState.setPrepareTime(currentTimeMs - 100); transactionState.setWriteEndTimeMs(currentTimeMs); Assert.assertTrue(ratio > 0.01); Assert.assertTrue(threshold > 0); compactionMgr.handleLoadingFinished(new PartitionIdentifier(dbId, tableId, partitionId), 3, currentTimeMs, Quantiles.compute(Lists.newArrayList(threshold))); limiter.check(partitions, currentTimeMs); Assert.assertEquals(transactionState.getWriteEndTimeMs(), transactionState.getAllowCommitTimeMs()); }
@Override public byte[] get(byte[] key) { return read(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key); }
@Test public void testGeo() { RedisTemplate<String, String> redisTemplate = new RedisTemplate<>(); redisTemplate.setConnectionFactory(new RedissonConnectionFactory(redisson)); redisTemplate.afterPropertiesSet(); String key = "test_geo_key"; Point point = new Point(116.401001, 40.119499); redisTemplate.opsForGeo().add(key, point, "a"); point = new Point(111.545998, 36.133499); redisTemplate.opsForGeo().add(key, point, "b"); point = new Point(111.483002, 36.030998); redisTemplate.opsForGeo().add(key, point, "c"); Circle within = new Circle(116.401001, 40.119499, 80000); RedisGeoCommands.GeoRadiusCommandArgs args = RedisGeoCommands.GeoRadiusCommandArgs.newGeoRadiusArgs().includeCoordinates(); GeoResults<RedisGeoCommands.GeoLocation<String>> res = redisTemplate.opsForGeo().radius(key, within, args); assertThat(res.getContent().get(0).getContent().getName()).isEqualTo("a"); }
@Override public int getInterval() { return currentInterval; }
@Test void requireThatPostingListChecksBounds() { PredicateIntervalStore.Builder builder = new PredicateIntervalStore.Builder(); List<Integer> docIds = new ArrayList<>(); List<Integer> dataRefs = new ArrayList<>(); for (int id = 1; id < 100; ++id) { List<IntervalWithBounds> boundsList = new ArrayList<>(); for (int i = 0; i <= id; ++i) { int bounds; if (id < 30) { bounds = 0x80000000 | i; // diff >= i } else if (id < 60) { bounds = 0x40000000 | i; // diff < i } else { bounds = (i << 16) | (i + 10); // i < diff < i + 10 } boundsList.add(new IntervalWithBounds((i + 1) << 16 | 0xffff, bounds)); } docIds.add(id); dataRefs.add(builder.insert(boundsList.stream().flatMap(IntervalWithBounds::stream).toList())); } PredicateIntervalStore store = builder.build(); BoundsPostingList postingList = new BoundsPostingList( store, Ints.toArray(docIds), Ints.toArray(dataRefs), 0xffffffffffffffffL, 5); assertEquals(-1, postingList.getDocId()); assertEquals(0, postingList.getInterval()); assertEquals(0xffffffffffffffffL, postingList.getSubquery()); checkNext(postingList, 0, 1, 2); // [0..] .. [1..] checkNext(postingList, 1, 2, 3); // [0..] .. [2..] checkNext(postingList, 10, 11, 6); // [0..] .. [5..] checkNext(postingList, 20, 21, 6); checkNext(postingList, 30, 31, 26); // [..5] .. [..30] checkNext(postingList, 50, 51, 46); checkNext(postingList, 60, 61, 6); // [0..10] .. [5..15] postingList = new BoundsPostingList(store, Ints.toArray(docIds), Ints.toArray(dataRefs), 0xffffffffffffffffL, 40); checkNext(postingList, 0, 1, 2); checkNext(postingList, 20, 21, 22); checkNext(postingList, 30, 31, 0); // skip ahead to match checkNext(postingList, 32, 33, 0); // skip ahead to match checkNext(postingList, 33, 34, 0); // skip ahead to match checkNext(postingList, 40, 41, 1); checkNext(postingList, 50, 51, 11); // [..40] .. [..50] checkNext(postingList, 60, 61, 10); // [31..40] .. [40..49] }
public void addVpls(VplsConfig vpls) { ObjectNode vplsNode = JsonNodeFactory.instance.objectNode(); vplsNode.put(NAME, vpls.name()); ArrayNode ifacesNode = vplsNode.putArray(INTERFACE); vpls.ifaces().forEach(ifacesNode::add); vplsNode.put(ENCAPSULATION, vpls.encap().toString()); ArrayNode vplsArray = vplss().isEmpty() ? initVplsConfiguration() : (ArrayNode) object.get(VPLS); vplsArray.add(vplsNode); }
@Test public void addVpls() { int initialSize = vplsAppConfig.vplss().size(); VplsConfig newVpls = createNewVpls(); vplsAppConfig.addVpls(newVpls); assertEquals("The new VPLS has not been added correctly to the list of" + "existing VPLSs", initialSize + 1, vplsAppConfig.vplss().size()); vplss.add(newVpls); }
@Override public void onProjectsRekeyed(Set<RekeyedProject> rekeyedProjects) { checkNotNull(rekeyedProjects, "rekeyedProjects can't be null"); if (rekeyedProjects.isEmpty()) { return; } Arrays.stream(listeners) .forEach(safelyCallListener(listener -> listener.onProjectsRekeyed(rekeyedProjects))); }
@Test public void onProjectsRekeyed_throws_NPE_if_set_is_null() { assertThatThrownBy(() -> underTestWithListeners.onProjectsRekeyed(null)) .isInstanceOf(NullPointerException.class) .hasMessage("rekeyedProjects can't be null"); }
public KeyspaceDTO.ClusterKeyspaceListResult generalKeyspaceList(String clusterId) { CqlSession session = cqlSessionFactory.get(clusterId); List<KeyspaceResult> keyspaceList = new ArrayList<>(); for (Map.Entry<CqlIdentifier, KeyspaceMetadata> entry : session.getMetadata().getKeyspaces().entrySet()) { String keyspaceName = entry.getKey().asCql(true); keyspaceList.add( KeyspaceResult.builder() .keyspaceName(keyspaceName) .durableWrites(entry.getValue().isDurableWrites()) .replication(entry.getValue().getReplication()) .systemKeyspace(ClusterUtils.isSystemKeyspace(session.getContext(), keyspaceName)) .build() ); } return KeyspaceDTO.ClusterKeyspaceListResult.builder() .wasApplied(true) .keyspaceList(keyspaceList) .build(); }
@Test void generalKeyspaceList() { //system keyspace 체크 KeyspaceDTO.ClusterKeyspaceListResult keyspaceNameList = clusterKeyspaceCommander.generalKeyspaceList(CLUSTER_ID); for (KeyspaceResult keyspaceResult : keyspaceNameList.getKeyspaceList()) { Assertions.assertFalse( ClusterUtils.isSystemKeyspace(makeSession().getContext(), keyspaceResult.getKeyspaceName()), "include system table" ); } }
public static boolean checkVersionIsNew(Context context, String currVersion) { try { String localVersion = SAStoreManager.getInstance().getString(SHARED_PREF_APP_VERSION, ""); if (!TextUtils.isEmpty(currVersion) && !currVersion.equals(localVersion)) { SAStoreManager.getInstance().setString(SHARED_PREF_APP_VERSION, currVersion); return true; } } catch (Exception ex) { SALog.printStackTrace(ex); return true; } return false; }
@Test public void checkVersionIsNew() { Assert.assertTrue(SensorsDataUtils.checkVersionIsNew(mApplication, "5.1.0-pre")); Assert.assertTrue(SensorsDataUtils.checkVersionIsNew(mApplication, "15.1.0-pre")); }
public ImmutableList<PluginMatchingResult<VulnDetector>> getVulnDetectors( ReconnaissanceReport reconnaissanceReport) { return tsunamiPlugins.entrySet().stream() .filter(entry -> isVulnDetector(entry.getKey())) .map(entry -> matchAllVulnDetectors(entry.getKey(), entry.getValue(), reconnaissanceReport)) .flatMap(Streams::stream) .collect(toImmutableList()); }
@Test public void getVulnDetectors_whenRemoteDetectorWithServiceNameHasNoMatch_returnsNoServices() { NetworkService httpsService = NetworkService.newBuilder() .setNetworkEndpoint(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 443)) .setTransportProtocol(TransportProtocol.TCP) .setServiceName("https") .build(); ReconnaissanceReport fakeReconnaissanceReport = ReconnaissanceReport.newBuilder() .setTargetInfo(TargetInfo.getDefaultInstance()) .addNetworkServices(httpsService) .build(); PluginManager pluginManager = Guice.createInjector( new FakePortScannerBootstrapModule(), new FakeServiceFingerprinterBootstrapModule(), FakeFilteringRemoteDetector.getModule()) .getInstance(PluginManager.class); ImmutableList<PluginMatchingResult<VulnDetector>> vulnDetectors = pluginManager.getVulnDetectors(fakeReconnaissanceReport); assertThat(vulnDetectors).hasSize(1); ImmutableList<MatchedPlugin> matchedResult = ((FakeFilteringRemoteDetector) vulnDetectors.get(0).tsunamiPlugin()).getMatchedPlugins(); assertThat(matchedResult.get(0).getServicesList()).isEmpty(); }
static Properties adminClientConfiguration(String bootstrapHostnames, PemTrustSet kafkaCaTrustSet, PemAuthIdentity authIdentity, Properties config) { if (config == null) { throw new InvalidConfigurationException("The config parameter should not be null"); } config.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapHostnames); // configuring TLS encryption if requested if (kafkaCaTrustSet != null) { config.putIfAbsent(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, "SSL"); config.setProperty(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "PEM"); config.setProperty(SslConfigs.SSL_TRUSTSTORE_CERTIFICATES_CONFIG, kafkaCaTrustSet.trustedCertificatesString()); } // configuring TLS client authentication if (authIdentity != null) { config.putIfAbsent(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, "SSL"); config.setProperty(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "PEM"); config.setProperty(SslConfigs.SSL_KEYSTORE_CERTIFICATE_CHAIN_CONFIG, authIdentity.certificateChainAsPem()); config.setProperty(SslConfigs.SSL_KEYSTORE_KEY_CONFIG, authIdentity.privateKeyAsPem()); } config.putIfAbsent(AdminClientConfig.METADATA_MAX_AGE_CONFIG, "30000"); config.putIfAbsent(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "10000"); config.putIfAbsent(AdminClientConfig.RETRIES_CONFIG, "3"); config.putIfAbsent(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, "40000"); return config; }
@Test public void testMTlsConnection() { Properties config = DefaultAdminClientProvider.adminClientConfiguration("my-kafka:9092", mockPemTrustSet(), mockPemAuthIdentity(), new Properties()); assertThat(config.size(), is(11)); assertDefaultConfigs(config); assertThat(config.get(AdminClientConfig.SECURITY_PROTOCOL_CONFIG), is("SSL")); assertThat(config.get(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG), is("PEM")); assertThat(config.get(SslConfigs.SSL_TRUSTSTORE_CERTIFICATES_CONFIG).toString(), containsString("ca1")); // The order is not deterministic. So we check both certificates are present assertThat(config.get(SslConfigs.SSL_TRUSTSTORE_CERTIFICATES_CONFIG).toString(), containsString("ca2")); assertThat(config.get(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG).toString(), is("PEM")); assertThat(config.get(SslConfigs.SSL_KEYSTORE_CERTIFICATE_CHAIN_CONFIG).toString(), is("user-cert")); assertThat(config.get(SslConfigs.SSL_KEYSTORE_KEY_CONFIG).toString(), is("user-key")); }
public MutableDataset<T> load(Path csvPath, String responseName) throws IOException { return new MutableDataset<>(loadDataSource(csvPath, responseName)); }
@Test public void testLoad() throws IOException { URL path = CSVLoaderTest.class.getResource("/org/tribuo/data/csv/test.csv"); CSVLoader<MockOutput> loader = new CSVLoader<>(new MockOutputFactory()); checkDataTestCsv(loader.loadDataSource(path, "RESPONSE")); checkDataTestCsv(loader.loadDataSource(path, Collections.singleton("RESPONSE"))); }
void decode(int streamId, ByteBuf in, Http2Headers headers, boolean validateHeaders) throws Http2Exception { Http2HeadersSink sink = new Http2HeadersSink( streamId, headers, maxHeaderListSize, validateHeaders); // Check for dynamic table size updates, which must occur at the beginning: // https://www.rfc-editor.org/rfc/rfc7541.html#section-4.2 decodeDynamicTableSizeUpdates(in); decode(in, sink); // Now that we've read all of our headers we can perform the validation steps. We must // delay throwing until this point to prevent dynamic table corruption. sink.finish(); }
@Test public void testLiteralHuffmanEncodedWithPaddingNotCorrespondingToMSBThrows() throws Http2Exception { byte[] input = {0, (byte) 0x81, 0}; final ByteBuf in = Unpooled.wrappedBuffer(input); try { assertThrows(Http2Exception.class, new Executable() { @Override public void execute() throws Throwable { hpackDecoder.decode(0, in, mockHeaders, true); } }); } finally { in.release(); } }
static <K, V> StateSerdes<K, V> prepareStoreSerde(final StateStoreContext context, final String storeName, final String changelogTopic, final Serde<K> keySerde, final Serde<V> valueSerde, final PrepareFunc<V> prepareValueSerdeFunc) { return new StateSerdes<>( changelogTopic, prepareSerde(WrappingNullableUtils::prepareKeySerde, storeName, keySerde, new SerdeGetter(context), true, context.taskId()), prepareSerde(prepareValueSerdeFunc, storeName, valueSerde, new SerdeGetter(context), false, context.taskId()) ); }
@Test public void shouldThrowStreamsExceptionOnUndefinedValueSerdeForProcessorContext() { final MockInternalNewProcessorContext<String, String> context = new MockInternalNewProcessorContext<>(); utilsMock.when(() -> WrappingNullableUtils.prepareValueSerde(any(), any())) .thenThrow(new ConfigException("Please set StreamsConfig#DEFAULT_VALUE_SERDE_CLASS_CONFIG")); final Throwable exception = assertThrows(StreamsException.class, () -> StoreSerdeInitializer.prepareStoreSerde((ProcessorContext) context, "myStore", "topic", new Serdes.StringSerde(), new Serdes.StringSerde(), WrappingNullableUtils::prepareValueSerde)); assertThat(exception.getMessage(), equalTo("Failed to initialize value serdes for store myStore")); assertThat(exception.getCause().getMessage(), equalTo("Please set StreamsConfig#DEFAULT_VALUE_SERDE_CLASS_CONFIG")); }
@Nullable public abstract DirectoryStateHandle completeSnapshotAndGetHandle() throws IOException;
@Test void completeSnapshotAndGetHandle() throws Exception { File folderRoot = temporaryFolder.toFile(); File folderA = new File(folderRoot, String.valueOf(UUID.randomUUID())); assertThat(folderA.mkdirs()).isTrue(); Path folderAPath = folderA.toPath(); SnapshotDirectory snapshotDirectory = SnapshotDirectory.permanent(folderAPath); // check that completed checkpoint dirs are not deleted as incomplete. DirectoryStateHandle handle = snapshotDirectory.completeSnapshotAndGetHandle(); assertThat(handle).isNotNull(); assertThat(snapshotDirectory.cleanup()).isTrue(); assertThat(folderA).isDirectory(); assertThat(handle.getDirectory()).isEqualTo(folderAPath); handle.discardState(); assertThat(folderA).doesNotExist(); assertThat(folderA.mkdirs()).isTrue(); SnapshotDirectory newSnapshotDirectory = SnapshotDirectory.permanent(folderAPath); assertThat(newSnapshotDirectory.cleanup()).isTrue(); assertThatThrownBy(newSnapshotDirectory::completeSnapshotAndGetHandle) .isInstanceOf(IOException.class); }
@HighFrequencyInvocation public Optional<EncryptTable> findEncryptTable(final String tableName) { return Optional.ofNullable(tables.get(tableName)); }
@Test void assertFindEncryptTable() { assertTrue(new EncryptRule("foo_db", createEncryptRuleConfiguration()).findEncryptTable("t_encrypt").isPresent()); }
@Override public ParsedSchema fromConnectSchema(final Schema schema) { final Schema avroCompatibleSchema = AvroSchemas .getAvroCompatibleConnectSchema(schema, formatProps.getFullSchemaName()); try { return new AvroSchema(avroData.fromConnectSchema(avroCompatibleSchema)); } catch (final SchemaParseException e) { throw new KsqlException("Schema is not compatible with Avro: " + e.getMessage(), e); } }
@Test public void shouldThrowWhenBuildingAvroSchemafSchemaContainsInvalidAvroNames() { // Given: final Schema schema = SchemaBuilder.struct() .field("2Bad", Schema.OPTIONAL_INT32_SCHEMA) .build(); // When: final Exception e = assertThrows( KsqlException.class, () -> translator.fromConnectSchema(schema) ); // Then: assertThat(e.getMessage(), is("Schema is not compatible with Avro: Illegal initial character: 2Bad")); }
public static TableSchema toSchema(RowType rowType) { TableSchema.Builder builder = TableSchema.builder(); for (RowType.RowField field : rowType.getFields()) { builder.field(field.getName(), TypeConversions.fromLogicalToDataType(field.getType())); } return builder.build(); }
@Test public void testConvertFlinkSchemaWithPrimaryKeys() { Schema icebergSchema = new Schema( Lists.newArrayList( Types.NestedField.required(1, "int", Types.IntegerType.get()), Types.NestedField.required(2, "string", Types.StringType.get())), Sets.newHashSet(1, 2)); TableSchema tableSchema = FlinkSchemaUtil.toSchema(icebergSchema); assertThat(tableSchema.getPrimaryKey()) .isPresent() .get() .satisfies(k -> assertThat(k.getColumns()).containsExactly("int", "string")); }
@Override public ProtobufSystemInfo.Section toProtobuf() { ProtobufSystemInfo.Section.Builder protobuf = ProtobufSystemInfo.Section.newBuilder(); protobuf.setName("System"); setAttribute(protobuf, "Server ID", server.getId()); setAttribute(protobuf, "Version", getVersion()); setAttribute(protobuf, "Edition", sonarRuntime.getEdition().getLabel()); setAttribute(protobuf, NCLOC.getName(), statisticsSupport.getLinesOfCode()); setAttribute(protobuf, "Container", containerSupport.isRunningInContainer()); setAttribute(protobuf, "External Users and Groups Provisioning", commonSystemInformation.getManagedInstanceProviderName()); setAttribute(protobuf, "External User Authentication", commonSystemInformation.getExternalUserAuthentication()); addIfNotEmpty(protobuf, "Accepted external identity providers", commonSystemInformation.getEnabledIdentityProviders()); addIfNotEmpty(protobuf, "External identity providers whose users are allowed to sign themselves up", commonSystemInformation.getAllowsToSignUpEnabledIdentityProviders()); setAttribute(protobuf, "High Availability", false); setAttribute(protobuf, "Official Distribution", officialDistribution.check()); setAttribute(protobuf, "Force authentication", commonSystemInformation.getForceAuthentication()); setAttribute(protobuf, "Home Dir", config.get(PATH_HOME.getKey()).orElse(null)); setAttribute(protobuf, "Data Dir", config.get(PATH_DATA.getKey()).orElse(null)); setAttribute(protobuf, "Temp Dir", config.get(PATH_TEMP.getKey()).orElse(null)); setAttribute(protobuf, "Processors", Runtime.getRuntime().availableProcessors()); return protobuf.build(); }
@Test public void official_distribution() { when(officialDistribution.check()).thenReturn(true); ProtobufSystemInfo.Section protobuf = underTest.toProtobuf(); assertThatAttributeIs(protobuf, "Official Distribution", true); }
@Override public String doSharding(final Collection<String> availableTargetNames, final PreciseShardingValue<Comparable<?>> shardingValue) { ShardingSpherePreconditions.checkNotNull(shardingValue.getValue(), NullShardingValueException::new); String suffix = String.valueOf(hashShardingValue(shardingValue.getValue()) % shardingCount); return ShardingAutoTableAlgorithmUtils.findMatchedTargetName(availableTargetNames, suffix, shardingValue.getDataNodeInfo()).orElse(null); }
@Test void assertPreciseDoSharding() { List<String> availableTargetNames = Arrays.asList("t_order_0", "t_order_1", "t_order_2", "t_order_3"); assertThat(shardingAlgorithm.doSharding(availableTargetNames, new PreciseShardingValue<>("t_order", "order_type", DATA_NODE_INFO, "a")), is("t_order_1")); }
public static Response createJerseyExceptionResponse(Response.Status status, Throwable ex) { Map<String, Object> json = new LinkedHashMap<String, Object>(); json.put(ERROR_MESSAGE_JSON, getOneLineMessage(ex)); json.put(ERROR_EXCEPTION_JSON, ex.getClass().getSimpleName()); json.put(ERROR_CLASSNAME_JSON, ex.getClass().getName()); Map<String, Object> response = Collections.singletonMap(ERROR_JSON, json); return Response.status(status).type(MediaType.APPLICATION_JSON). entity(response).build(); }
@Test public void testCreateJerseyException() throws IOException { Exception ex = new IOException("Hello IOEX"); Response response = HttpExceptionUtils.createJerseyExceptionResponse( Response.Status.INTERNAL_SERVER_ERROR, ex); Assert.assertEquals(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(), response.getStatus()); Assert.assertArrayEquals( Arrays.asList(MediaType.APPLICATION_JSON_TYPE).toArray(), response.getMetadata().get("Content-Type").toArray()); Map entity = (Map) response.getEntity(); entity = (Map) entity.get(HttpExceptionUtils.ERROR_JSON); Assert.assertEquals(IOException.class.getName(), entity.get(HttpExceptionUtils.ERROR_CLASSNAME_JSON)); Assert.assertEquals(IOException.class.getSimpleName(), entity.get(HttpExceptionUtils.ERROR_EXCEPTION_JSON)); Assert.assertEquals("Hello IOEX", entity.get(HttpExceptionUtils.ERROR_MESSAGE_JSON)); }
@Override // add synchronized to avoid process 2 or more stmts at same time public synchronized ShowResultSet process(List<AlterClause> alterClauses, Database dummyDb, OlapTable dummyTbl) throws UserException { Preconditions.checkArgument(alterClauses.size() == 1); AlterClause alterClause = alterClauses.get(0); alterClause.accept(SystemHandler.Visitor.getInstance(), null); return null; }
@Test public void testDecommissionInvalidBackend() throws UserException { List<String> hostAndPorts = Lists.newArrayList("192.168.1.11:1234"); DecommissionBackendClause decommissionBackendClause = new DecommissionBackendClause(hostAndPorts); Analyzer.analyze(new AlterSystemStmt(decommissionBackendClause), new ConnectContext()); expectedException.expect(RuntimeException.class); expectedException.expectMessage("Backend does not exist"); systemHandler.process(Lists.newArrayList(decommissionBackendClause), null, null); }
public static int isHexadecimal(String str) { if (str == null || str.isEmpty()) { return -1; } return str.matches("^-?(0[xX])?[0-9a-fA-F]+$") ? HEX_RADIX : -1; }
@Test public void isHexadecimal_Test() { Assertions.assertEquals(16, TbUtils.isHexadecimal("F5D7039")); Assertions.assertEquals(-1, TbUtils.isHexadecimal("K100110")); }
public static HostAndPort toHostAndPort(NetworkEndpoint networkEndpoint) { switch (networkEndpoint.getType()) { case IP: return HostAndPort.fromHost(networkEndpoint.getIpAddress().getAddress()); case IP_PORT: return HostAndPort.fromParts( networkEndpoint.getIpAddress().getAddress(), networkEndpoint.getPort().getPortNumber()); case HOSTNAME: case IP_HOSTNAME: return HostAndPort.fromHost(networkEndpoint.getHostname().getName()); case HOSTNAME_PORT: case IP_HOSTNAME_PORT: return HostAndPort.fromParts( networkEndpoint.getHostname().getName(), networkEndpoint.getPort().getPortNumber()); case UNRECOGNIZED: case TYPE_UNSPECIFIED: throw new AssertionError("Type for NetworkEndpoint must be specified."); } throw new AssertionError( String.format( "Should never happen. Unchecked NetworkEndpoint type: %s", networkEndpoint.getType())); }
@Test public void toHostAndPort_withIpAddressAndPort_returnsHostWithIpAndPort() { NetworkEndpoint ipV4AndPortEndpoint = NetworkEndpoint.newBuilder() .setType(NetworkEndpoint.Type.IP_PORT) .setPort(Port.newBuilder().setPortNumber(8888)) .setIpAddress( IpAddress.newBuilder().setAddress("1.2.3.4").setAddressFamily(AddressFamily.IPV4)) .build(); assertThat(NetworkEndpointUtils.toHostAndPort(ipV4AndPortEndpoint)) .isEqualTo(HostAndPort.fromParts("1.2.3.4", 8888)); }
public Result parse(final String string) throws DateNotParsableException { return this.parse(string, new Date()); }
@Test public void testParseLastMonday() throws Exception { DateTime reference = DateTimeFormat.forPattern("dd.MM.yyyy HH:mm:ss").withZoneUTC().parseDateTime("12.06.2021 09:45:23"); NaturalDateParser.Result result = naturalDateParser.parse("last monday", reference.toDate()); DateTime lastMonday = DateTimeFormat.forPattern("dd.MM.yyyy HH:mm:ss").withZoneUTC().parseDateTime("31.05.2021 00:00:00"); assertThat(result.getFrom()).as("should be equal to").isEqualTo(lastMonday); assertThat(result.getTo()).as("should differ from").isNotEqualTo(lastMonday); }
public static String printDuration(Duration uptime) { return printDuration(uptime, false); }
@Test public void testPrintDuration() { assertEquals("0s", TimeUtils.printDuration(123)); assertEquals("123ms", TimeUtils.printDuration(123, true)); assertEquals("1s", TimeUtils.printDuration(1250)); assertEquals("1s250ms", TimeUtils.printDuration(1250, true)); assertEquals("33s", TimeUtils.printDuration(33000)); assertEquals("33s", TimeUtils.printDuration(33001)); assertEquals("33s1ms", TimeUtils.printDuration(33001, true)); assertEquals("33s", TimeUtils.printDuration(33444)); assertEquals("33s444ms", TimeUtils.printDuration(33444, true)); assertEquals("1m0s", TimeUtils.printDuration(60000)); assertEquals("1m1s", TimeUtils.printDuration(61000)); assertEquals("1m1s", TimeUtils.printDuration(61002)); assertEquals("1m1s2ms", TimeUtils.printDuration(61002, true)); assertEquals("30m55s", TimeUtils.printDuration(1855123)); assertEquals("30m55s123ms", TimeUtils.printDuration(1855123, true)); assertEquals("1h30m", TimeUtils.printDuration(5400000)); assertEquals("1h30m1s", TimeUtils.printDuration(5401000, true)); assertEquals("2d23h", TimeUtils.printDuration(259032000)); assertEquals("2d23h57m12s", TimeUtils.printDuration(259032000, true)); }
public static Slice multiply(Slice left, Slice right) { Slice result = unscaledDecimal(); multiply(left, right, result); return result; }
@Test public void testMultiply() { assertEquals(multiply(unscaledDecimal(0), MAX_DECIMAL), unscaledDecimal(0)); assertEquals(multiply(unscaledDecimal(1), MAX_DECIMAL), MAX_DECIMAL); assertEquals(multiply(unscaledDecimal(1), MIN_DECIMAL), MIN_DECIMAL); assertEquals(multiply(unscaledDecimal(-1), MAX_DECIMAL), MIN_DECIMAL); assertEquals(multiply(unscaledDecimal(-1), MIN_DECIMAL), MAX_DECIMAL); assertEquals(multiply(wrappedIntArray(0xFFFFFFFF, 0xFFFFFFFF, 0, 0), wrappedIntArray(0xFFFFFFFF, 0x00FFFFFF, 0, 0)), wrappedLongArray(0xff00000000000001L, 0xfffffffffffffeL)); assertEquals(multiply(wrappedLongArray(0xFFFFFF0096BFB800L, 0), wrappedLongArray(0x39003539D9A51600L, 0)), wrappedLongArray(0x1CDBB17E11D00000L, 0x39003500FB00AB76L)); assertEquals(multiply(unscaledDecimal(Integer.MAX_VALUE), unscaledDecimal(Integer.MIN_VALUE)), unscaledDecimal((long) Integer.MAX_VALUE * Integer.MIN_VALUE)); assertEquals(multiply(unscaledDecimal("99999999999999"), unscaledDecimal("-1000000000000000000000000")), unscaledDecimal("-99999999999999000000000000000000000000")); assertEquals(multiply(unscaledDecimal("12380837221737387489365741632769922889"), unscaledDecimal("3")), unscaledDecimal("37142511665212162468097224898309768667")); }
@Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { return inject(statement, new TopicProperties.Builder()); }
@Test public void shouldThrowIfCleanupPolicyConfigPresentInCreateTableAs() { // Given: givenStatement("CREATE TABLE foo_bar WITH (kafka_topic='foo', partitions=1, cleanup_policy='whatever') AS SELECT * FROM SOURCE;"); // When: final Exception e = assertThrows( KsqlException.class, () -> injector.inject(statement, builder) ); // Then: assertThat( e.getMessage(), containsString("Invalid config variable in the WITH clause: CLEANUP_POLICY.\n" + "The CLEANUP_POLICY config is automatically inferred based on the type of source (STREAM or TABLE).\n" + "Users can't set the CLEANUP_POLICY config manually.")); }
public static boolean isCreditCodeSimple(CharSequence creditCode) { if (StrUtil.isBlank(creditCode)) { return false; } return ReUtil.isMatch(CREDIT_CODE_PATTERN, creditCode); }
@Test public void isCreditCodeBySimple() { String testCreditCode = "91310115591693856A"; assertTrue(CreditCodeUtil.isCreditCodeSimple(testCreditCode)); }
public Future<Void> reconcile() { LOGGER.infoCr(reconciliation, "Deleting all the ZooKeeper related resources"); return jmxSecret() .compose(i -> deleteNetworkPolicy()) .compose(i -> deleteServiceAccount()) .compose(i -> deleteService()) .compose(i -> deleteHeadlessService()) .compose(i -> deleteCertificateSecret()) .compose(i -> deleteLoggingAndMetricsConfigMap()) .compose(i -> deletePodDisruptionBudget()) .compose(i -> deletePodSet()) .compose(i -> deletePersistentClaims()); }
@Test public void testZookeeperEraserReconcileFailedDueToServiceAccountDeletionTimeout(VertxTestContext context) { ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); ServiceAccountOperator mockSaOps = supplier.serviceAccountOperations; ServiceOperator mockServiceOps = supplier.serviceOperations; NetworkPolicyOperator mockNetPolicyOps = supplier.networkPolicyOperator; ConfigMapOperator mockCmOps = supplier.configMapOperations; StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator; SecretOperator mockSecretOps = supplier.secretOperations; PvcOperator mockPvcOps = supplier.pvcOperations; SharedEnvironmentProvider sharedEnvironmentProvider = supplier.sharedEnvironmentProvider; ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(RECONCILIATION, KAFKA, VERSIONS, sharedEnvironmentProvider); ArgumentCaptor<String> podSetDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockPodSetOps.deleteAsync(any(), anyString(), podSetDeletionCaptor.capture(), anyBoolean())).thenAnswer(i -> Future.succeededFuture()); ArgumentCaptor<String> secretDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockSecretOps.deleteAsync(any(), anyString(), secretDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); ArgumentCaptor<String> saDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockSaOps.deleteAsync(any(), anyString(), saDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.failedFuture(new TimeoutException("Timed out while deleting the resource"))); ArgumentCaptor<String> serviceDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockServiceOps.deleteAsync(any(), anyString(), serviceDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); ArgumentCaptor<String> netPolicyDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockNetPolicyOps.deleteAsync(any(), anyString(), netPolicyDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); ArgumentCaptor<String> cmDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockCmOps.deleteAsync(any(), anyString(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); ArgumentCaptor<String> pdbDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockPdbOps.deleteAsync(any(), anyString(), pdbDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); // Mock the PVC Operator Map<String, PersistentVolumeClaim> zkPvcs = createZooPvcs(NAMESPACE, zkCluster.getStorage(), zkCluster.nodes(), (replica, storageId) -> VolumeUtils.DATA_VOLUME_NAME + "-" + KafkaResources.zookeeperPodName(KAFKA.getMetadata().getName(), replica), deleteClaim(KAFKA.getSpec().getZookeeper().getStorage())); ArgumentCaptor<PersistentVolumeClaim> pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockPvcOps.getAsync(anyString(), ArgumentMatchers.startsWith("data-"))) .thenAnswer(invocation -> { String pvcName = invocation.getArgument(1); if (pvcName.contains(zkCluster.getComponentName())) { return Future.succeededFuture(zkPvcs.get(pvcName)); } return Future.succeededFuture(null); }); when(mockPvcOps.listAsync(anyString(), ArgumentMatchers.any(Labels.class))) .thenAnswer(invocation -> Future.succeededFuture(zkPvcs.values().stream().toList())); // test reconcile ZooKeeperEraser zkEraser = new ZooKeeperEraser( RECONCILIATION, supplier ); Checkpoint async = context.checkpoint(); zkEraser.reconcile() .onComplete(context.failing(v -> context.verify(() -> { verify(mockCmOps, never()).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockSaOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockServiceOps, never()).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockSecretOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockNetPolicyOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockPodSetOps, never()).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockPdbOps, never()).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockPvcOps, never()).getAsync(any(), any()); verify(mockPvcOps, never()).listAsync(any(), ArgumentMatchers.any(Labels.class)); // no reconcile since there was no PVC deletion verify(mockPvcOps, never()).reconcile(any(), any(), any(), any()); assertThat(pvcCaptor.getAllValues().size(), is(0)); assertThat(netPolicyDeletionCaptor.getAllValues(), is(List.of("my-cluster-network-policy-zookeeper"))); assertThat(saDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper"))); assertThat(secretDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper-jmx"))); // asserting error message assertThat(v.getMessage(), is("Timed out while deleting the resource")); async.flag(); }))); }
@Override public void start() { executorService.scheduleAtFixedRate(this::checks, getInitialDelay(), getEnqueueDelay(), SECONDS); }
@Test public void doNothingIfLocked() { when(lockManager.tryLock(any(), anyInt())).thenReturn(false); underTest.start(); executorService.runCommand(); verifyNoInteractions(dbClient); }
public static boolean hasServiceDiscoveryRegistryProtocol(URL url) { return SERVICE_REGISTRY_PROTOCOL.equalsIgnoreCase(url.getProtocol()); }
@Test public void testHasServiceDiscoveryRegistryProtocol() { String address1 = "http://root:alibaba@127.0.0.1:9090/dubbo.test.api"; URL url1 = UrlUtils.parseURL(address1, null); String address2 = "service-discovery-registry://root:alibaba@127.0.0.1:9090/dubbo.test.api"; URL url2 = UrlUtils.parseURL(address2, null); assertFalse(UrlUtils.hasServiceDiscoveryRegistryProtocol(url1)); assertTrue(UrlUtils.hasServiceDiscoveryRegistryProtocol(url2)); }
public static FilterChain buildProviderChain(ProviderConfig<?> providerConfig, FilterInvoker lastFilter) { return new FilterChain(selectActualFilters(providerConfig, PROVIDER_AUTO_ACTIVES), lastFilter, providerConfig); }
@Test public void buildProviderChain() { ProviderConfig providerConfig = new ProviderConfig(); providerConfig.setInterfaceId(Serializer.class.getName()); ConsumerConfig consumerConfig = new ConsumerConfig(); ArrayList<Filter> list = new ArrayList<Filter>(); list.add(new TestChainFilter1()); list.add(new TestChainFilter2()); list.add(new TestChainFilter3()); list.add(new TestChainFilter4()); list.add(new ExcludeFilter("-testChainFilter5")); consumerConfig.setFilterRef(list); consumerConfig.setInterfaceId(Serializer.class.getName()); // mock provider chain (0,6,7) FilterChain providerChain = FilterChain.buildProviderChain(providerConfig, new TestProviderFilterInvoker(providerConfig)); // mock consumer chain(0,7,2,4) FilterChain consumerChain = FilterChain.buildConsumerChain(consumerConfig, new TestConsumerFilterInvoker(consumerConfig, providerChain)); Assert.assertNotNull(consumerChain.getChain()); SofaRequest request = new SofaRequest(); request.setMethodArgs(new String[] { "xxx" }); request.setInvokeType("sync"); String result = (String) consumerChain.invoke(request).getAppResponse(); Assert.assertEquals("xxx_q0_q7_q2_q4_q0_q6_q7_s7_s6_s0_s4_s2_s7_s0", result); request = new SofaRequest(); request.setMethodArgs(new String[] { "xxx" }); request.setInvokeType("callback"); SofaResponse response = consumerChain.invoke(request); consumerChain.onAsyncResponse(consumerConfig, request, response, null); result = (String) response.getAppResponse(); Assert.assertEquals("xxx_q0_q7_q2_q4_q0_q6_q7_a4_a2_a7_a0", result); }