focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void customize(WebServerFactory server) { // When running in an IDE or with ./mvnw spring-boot:run, set location of the static web assets. setLocationForStaticAssets(server); }
@Test void shouldCustomizeServletContainer() { env.setActiveProfiles(JHipsterConstants.SPRING_PROFILE_PRODUCTION); UndertowServletWebServerFactory container = new UndertowServletWebServerFactory(); webConfigurer.customize(container); assertThat(container.getMimeMappings().get("abs")).isEqualTo("audio/x-mpeg"); assertThat(container.getMimeMappings().get("html")).isEqualTo("text/html"); assertThat(container.getMimeMappings().get("json")).isEqualTo("application/json"); if (container.getDocumentRoot() != null) { assertThat(container.getDocumentRoot()).isEqualTo(new File("target/classes/static/")); } }
public long periodBarriersCrossed(long start, long end) { if (start > end) throw new IllegalArgumentException("Start cannot come before end"); long startFloored = getStartOfCurrentPeriodWithGMTOffsetCorrection(start, getTimeZone()); long endFloored = getStartOfCurrentPeriodWithGMTOffsetCorrection(end, getTimeZone()); long diff = endFloored - startFloored; switch (periodicityType) { case TOP_OF_MILLISECOND: return diff; case TOP_OF_SECOND: return diff / MILLIS_IN_ONE_SECOND; case TOP_OF_MINUTE: return diff / MILLIS_IN_ONE_MINUTE; case TOP_OF_HOUR: return diff / MILLIS_IN_ONE_HOUR; case TOP_OF_DAY: return diff / MILLIS_IN_ONE_DAY; case TOP_OF_WEEK: return diff / MILLIS_IN_ONE_WEEK; case TOP_OF_MONTH: return diffInMonths(start, end); default: throw new IllegalStateException("Unknown periodicity type."); } }
@Test public void testPeriodBarriersCrossedWhenGoingIntoDaylightSaving() { RollingCalendar rc = new RollingCalendar(dailyPattern, TimeZone.getTimeZone("CET"), Locale.US); // Sun Mar 26 00:02:03 CET 2017, GMT offset = -1h long start = 1490482923333L; // Mon Mar 27 00:02:03 CEST 2017, GMT offset = -2h long end = 1490565723333L; assertEquals(1, rc.periodBarriersCrossed(start, end)); }
@Override public OpticalConnectivityId setupPath(Path path, Bandwidth bandwidth, Duration latency) { checkNotNull(path); log.debug("setupPath({}, {}, {})", path, bandwidth, latency); // map of cross connect points (optical port -> packet port) Map<ConnectPoint, ConnectPoint> crossConnectPointMap = new HashMap<>(); // list of (src, dst) pair of optical ports between which optical path should be installed List<Pair<ConnectPoint, ConnectPoint>> crossConnectPoints = new ArrayList<>(); // Scan path to find pairs of connect points between which optical intent is installed // opticalSrcPort works as a flag parameter to show scanning status ConnectPoint opticalSrcPort = null; for (Link link : path.links()) { if (!isCrossConnectLink(link)) { continue; } if (opticalSrcPort != null) { // opticalSrcPort!=null means src port was already found // in this case link.src() is optical layer, and link.dst() is packet layer // Check if types of src port and dst port matches Device srcDevice = checkNotNull(deviceService.getDevice(opticalSrcPort.deviceId()), "Unknown device ID"); Device dstDevice = checkNotNull(deviceService.getDevice(link.src().deviceId()), "Unknown device ID"); if (srcDevice.type() != dstDevice.type()) { log.error("Unsupported mix of cross connect points : {}, {}", srcDevice.type(), dstDevice.type()); return null; } // Update cross connect points map crossConnectPointMap.put(link.src(), link.dst()); // Add optical ports pair to list crossConnectPoints.add(Pair.of(opticalSrcPort, link.src())); // Reset flag parameter opticalSrcPort = null; } else { // opticalSrcPort==null means src port was not found yet // in this case link.src() is packet layer, and link.dst() is optical layer // Update cross connect points map crossConnectPointMap.put(link.dst(), link.src()); // Set opticalSrcPort to src of link (optical port) opticalSrcPort = link.dst(); } } // create intents from cross connect points List<Intent> intents = createIntents(crossConnectPoints); if (intents.isEmpty()) { log.error("No intents produced from {}", crossConnectPoints); return null; } // create set of PacketLinkRealizedByOptical Set<PacketLinkRealizedByOptical> packetLinks = createPacketLinkSet(crossConnectPoints, intents, crossConnectPointMap); // create OpticalConnectivity object and store information to distributed store OpticalConnectivity connectivity = createConnectivity(path, bandwidth, latency, packetLinks); // store cross connect port usage path.links().stream().filter(this::isCrossConnectLink) .forEach(usedCrossConnectLinkSet::add); // Submit the intents for (Intent i : intents) { intentService.submit(i); log.debug("Submitted an intent: {}", i); } return connectivity.id(); }
@Test public void testSetupPath() { Bandwidth bandwidth = Bandwidth.bps(100); Duration latency = Duration.ofMillis(10); List<Link> links = Stream.of(LINK1, LINK2, LINK3, LINK4, LINK5, LINK6) .collect(Collectors.toList()); Path path = new DefaultPath(PROVIDER_ID, links, new ScalarWeight(0)); OpticalConnectivityId cid = target.setupPath(path, bandwidth, latency); assertNotNull(cid); // Checks intents are installed as expected assertEquals(1, intentService.submitted.size()); assertEquals(OpticalConnectivityIntent.class, intentService.submitted.get(0).getClass()); OpticalConnectivityIntent connIntent = (OpticalConnectivityIntent) intentService.submitted.get(0); assertEquals(CP31, connIntent.getSrc()); assertEquals(CP52, connIntent.getDst()); }
public void handleAssignment(final Map<TaskId, Set<TopicPartition>> activeTasks, final Map<TaskId, Set<TopicPartition>> standbyTasks) { log.info("Handle new assignment with:\n" + "\tNew active tasks: {}\n" + "\tNew standby tasks: {}\n" + "\tExisting active tasks: {}\n" + "\tExisting standby tasks: {}", activeTasks.keySet(), standbyTasks.keySet(), activeTaskIds(), standbyTaskIds()); topologyMetadata.addSubscribedTopicsFromAssignment( activeTasks.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()), logPrefix ); final Map<TaskId, Set<TopicPartition>> activeTasksToCreate = new HashMap<>(activeTasks); final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate = new HashMap<>(standbyTasks); final Map<Task, Set<TopicPartition>> tasksToRecycle = new HashMap<>(); final Set<Task> tasksToCloseClean = new TreeSet<>(Comparator.comparing(Task::id)); final Set<TaskId> tasksToLock = tasks.allTaskIds().stream() .filter(x -> activeTasksToCreate.containsKey(x) || standbyTasksToCreate.containsKey(x)) .collect(Collectors.toSet()); maybeLockTasks(tasksToLock); // first put aside those unrecognized tasks because of unknown named-topologies tasks.clearPendingTasksToCreate(); tasks.addPendingActiveTasksToCreate(pendingTasksToCreate(activeTasksToCreate)); tasks.addPendingStandbyTasksToCreate(pendingTasksToCreate(standbyTasksToCreate)); // first rectify all existing tasks: // 1. for tasks that are already owned, just update input partitions / resume and skip re-creating them // 2. for tasks that have changed active/standby status, just recycle and skip re-creating them // 3. otherwise, close them since they are no longer owned final Map<TaskId, RuntimeException> failedTasks = new LinkedHashMap<>(); if (stateUpdater == null) { handleTasksWithoutStateUpdater(activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean); } else { handleTasksWithStateUpdater( activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean, failedTasks ); failedTasks.putAll(collectExceptionsAndFailedTasksFromStateUpdater()); } final Map<TaskId, RuntimeException> taskCloseExceptions = closeAndRecycleTasks(tasksToRecycle, tasksToCloseClean); maybeUnlockTasks(tasksToLock); failedTasks.putAll(taskCloseExceptions); maybeThrowTaskExceptions(failedTasks); createNewTasks(activeTasksToCreate, standbyTasksToCreate); }
@Test public void shouldConvertStandbyTaskToActiveTask() { final StandbyTask standbyTask = mock(StandbyTask.class); when(standbyTask.id()).thenReturn(taskId00); when(standbyTask.isActive()).thenReturn(false); when(standbyTask.prepareCommit()).thenReturn(Collections.emptyMap()); final StreamTask activeTask = mock(StreamTask.class); when(activeTask.id()).thenReturn(taskId00); when(activeTask.inputPartitions()).thenReturn(taskId00Partitions); when(standbyTaskCreator.createTasks(taskId00Assignment)).thenReturn(singletonList(standbyTask)); when(activeTaskCreator.createActiveTaskFromStandby(eq(standbyTask), eq(taskId00Partitions), any())) .thenReturn(activeTask); taskManager.handleAssignment(Collections.emptyMap(), taskId00Assignment); taskManager.handleAssignment(taskId00Assignment, Collections.emptyMap()); verify(activeTaskCreator, times(2)).createTasks(any(), eq(emptyMap())); verify(standbyTaskCreator).createTasks(Collections.emptyMap()); verifyNoInteractions(consumer); }
@Override public boolean enablePlugin(String pluginId) { throw new IllegalAccessError(PLUGIN_PREFIX + currentPluginId + " tried to execute enablePlugin!"); }
@Test public void enablePlugin() { pluginManager.loadPlugins(); assertThrows(IllegalAccessError.class, () -> wrappedPluginManager.enablePlugin(OTHER_PLUGIN_ID)); assertThrows(IllegalAccessError.class, () -> wrappedPluginManager.enablePlugin(THIS_PLUGIN_ID)); }
@Deprecated public static boolean isExpired(Date startDate, DateField dateField, int timeLength, Date endDate) { final Date offsetDate = offset(startDate, dateField, timeLength); return offsetDate.after(endDate); }
@Test public void isExpiredTest() { final DateTime startDate = DateUtil.parse("2019-12-01 17:02:30"); final DateTime endDate = DateUtil.parse("2019-12-02 17:02:30"); final int length = 3; //noinspection deprecation final boolean expired = DateUtil.isExpired(startDate, DateField.DAY_OF_YEAR, length, endDate); assertTrue(expired); }
public String getContent(String path) { String htmlPath = HTML_PATHS.contains(path) ? path : INDEX_HTML_PATH; checkState(servletContext != null, "init has not been called"); // Optimization to not have to call platform.currentStatus on each call if (Objects.equals(status, UP)) { return indexHtmlByPath.get(htmlPath); } Status currentStatus = platform.status(); if (!Objects.equals(status, currentStatus)) { generate(currentStatus); } return indexHtmlByPath.get(htmlPath); }
@Test public void content_is_updated_when_status_has_changed() { doInit(); when(platform.status()).thenReturn(STARTING); assertThat(underTest.getContent("/foo")) .contains(STARTING.name()); when(platform.status()).thenReturn(UP); assertThat(underTest.getContent("/foo")) .contains(UP.name()); }
@Override protected boolean hasLeadership(String componentId, UUID leaderSessionId) { synchronized (lock) { if (leaderElectionDriver != null) { if (leaderContenderRegistry.containsKey(componentId)) { return leaderElectionDriver.hasLeadership() && leaderSessionId.equals(issuedLeaderSessionID); } else { LOG.debug( "hasLeadership is called for component '{}' while there is no contender registered under that ID in the service, returning false.", componentId); return false; } } else { LOG.debug("hasLeadership is called after the service is closed, returning false."); return false; } } }
@Test void testHasLeadershipWithLeadershipLostAndRevokeEventProcessed() throws Exception { new Context() { { runTestWithSynchronousEventHandling( () -> { final UUID expectedSessionID = UUID.randomUUID(); grantLeadership(expectedSessionID); revokeLeadership(); applyToBothContenderContexts( ctx -> { assertThat( leaderElectionService.hasLeadership( ctx.componentId, expectedSessionID)) .isFalse(); assertThat( leaderElectionService.hasLeadership( ctx.componentId, UUID.randomUUID())) .isFalse(); }); }); } }; }
public static BigInteger decodeMPI(byte[] mpi, boolean hasLength) { byte[] buf; if (hasLength) { int length = (int) readUint32BE(mpi, 0); buf = new byte[length]; System.arraycopy(mpi, 4, buf, 0, length); } else buf = mpi; if (buf.length == 0) return BigInteger.ZERO; boolean isNegative = (buf[0] & 0x80) == 0x80; if (isNegative) buf[0] &= 0x7f; BigInteger result = new BigInteger(buf); return isNegative ? result.negate() : result; }
@Test public void testDecodeMPI() { assertEquals(BigInteger.ZERO, ByteUtils.decodeMPI(new byte[]{}, false)); }
@Override public <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream, final ValueJoiner<? super V, ? super VO, ? extends VR> joiner, final JoinWindows windows) { return leftJoin(otherStream, toValueJoinerWithKey(joiner), windows); }
@SuppressWarnings("deprecation") @Test public void shouldNotAllowNullValueJoinerWithKeyOnLeftJoin() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.leftJoin(testStream, (ValueJoinerWithKey<? super String, ? super String, ? super String, ?>) null, JoinWindows.of(ofMillis(10)))); assertThat(exception.getMessage(), equalTo("joiner can't be null")); }
@JsonIgnore public boolean canHaveProfile() { return !this.indexTemplateType().map(TEMPLATE_TYPES_FOR_INDEX_SETS_WITH_IMMUTABLE_FIELD_TYPES::contains).orElse(false); }
@Test public void testEventIndexWithProfileSetIsIllegal() { assertFalse(testIndexSetConfig(EVENT_TEMPLATE_TYPE, null, "profile").canHaveProfile()); }
public static KiePMMLDroolsAST getKiePMMLDroolsAST(final List<Field<?>> fields, final TreeModel model, final Map<String, KiePMMLOriginalTypeGeneratedType> fieldTypeMap, final List<KiePMMLDroolsType> types) { logger.trace("getKiePMMLDroolsAST {} {}", fields, model); DATA_TYPE targetType = getTargetFieldType(fields, model); List<OutputField> outputFields = model.getOutput() != null ? model.getOutput().getOutputFields() : Collections.emptyList(); List<KiePMMLDroolsRule> rules = KiePMMLTreeModelNodeASTFactory.factory(fieldTypeMap, outputFields, model.getNoTrueChildStrategy(), targetType).declareRulesFromRootNode(model.getNode(), ""); return new KiePMMLDroolsAST(types, rules); }
@Test void getKiePMMLDroolsGolfingAST() { final Map<String, KiePMMLOriginalTypeGeneratedType> fieldTypeMap = getFieldTypeMap(golfingPmml.getDataDictionary(), golfingPmml.getTransformationDictionary(), golfingModel.getLocalTransformations()); List<KiePMMLDroolsType> types = Collections.emptyList(); KiePMMLDroolsAST retrieved = KiePMMLTreeModelASTFactory.getKiePMMLDroolsAST(getFieldsFromDataDictionary(golfingPmml.getDataDictionary()), golfingModel, fieldTypeMap, types); assertThat(retrieved).isNotNull(); assertThat(retrieved.getTypes()).isEqualTo(types); assertThat(retrieved.getRules()).isNotEmpty(); }
@Override protected Pair<CompletableFuture, ExecutorService> startService() { LOG.info("Starting async archive service..."); return Pair.of(CompletableFuture.supplyAsync(() -> { writeClient.archive(); return true; }, executor), executor); }
@Test void startServiceShouldInvokeCallArchiveMethod() throws ExecutionException, InterruptedException { AsyncArchiveService service = new AsyncArchiveService(writeClient); assertEquals(true, service.startService().getLeft().get()); verify(writeClient).archive(); }
public static <K, V> PerKey<K, V> perKey() { return new AutoValue_ApproximateCountDistinct_PerKey.Builder<K, V>() .setPrecision(HllCount.DEFAULT_PRECISION) .build(); }
@Test @Category(NeedsRunner.class) public void testStandardTypesPerKeyForStrings() { List<KV<Integer, String>> strings = new ArrayList<>(); for (int i = 0; i < 3; i++) { for (int k : INTS1) { strings.add(KV.of(i, String.valueOf(k))); } } PCollection<KV<Integer, Long>> result = p.apply("Str", Create.of(strings)).apply("StrHLL", ApproximateCountDistinct.perKey()); PAssert.that(result) .containsInAnyOrder( ImmutableList.of( KV.of(0, INTS1_ESTIMATE), KV.of(1, INTS1_ESTIMATE), KV.of(2, INTS1_ESTIMATE))); p.run(); }
public static UserAgent parse(String userAgentString) { return UserAgentParser.parse(userAgentString); }
@Test public void parseChromeOnWindowsServer2012R2Test() { final String uaStr = "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"; final UserAgent ua = UserAgentUtil.parse(uaStr); assertEquals("Chrome", ua.getBrowser().toString()); assertEquals("63.0.3239.132", ua.getVersion()); assertEquals("Webkit", ua.getEngine().toString()); assertEquals("537.36", ua.getEngineVersion()); assertEquals("Windows 8.1 or Windows Server 2012R2", ua.getOs().toString()); assertEquals("6.3", ua.getOsVersion()); assertEquals("Windows", ua.getPlatform().toString()); assertFalse(ua.isMobile()); }
public static void checkDrivingLicenceMrz(String mrz) { if (mrz.charAt(0) != 'D') { throw new VerificationException("MRZ should start with D"); } if (mrz.charAt(1) != '1') { throw new VerificationException("Only BAP configuration is supported (1)"); } if (!mrz.substring(2, 5).equals("NLD")) { throw new VerificationException("Only Dutch driving licence supported"); } if (mrz.length() != 30) { throw new VerificationException("Dutch MRZ should have length of 30"); } checkMrzCheckDigit(mrz); }
@Test public void checkDrivingLicenceMrzCheckDigitWrong() { assertThrows(VerificationException.class, () -> { MrzUtils.checkDrivingLicenceMrz("PPPPPPPPPPPPPPPPPPPPPPPPPPPPPP"); }); }
public boolean setSeverity(DefaultIssue issue, String severity, IssueChangeContext context) { checkState(!issue.manualSeverity(), "Severity can't be changed"); if (!Objects.equals(severity, issue.severity())) { issue.setFieldChange(context, SEVERITY, issue.severity(), severity); issue.setSeverity(severity); issue.setUpdateDate(context.date()); issue.setChanged(true); return true; } return false; }
@Test void not_change_severity() { issue.setSeverity("MINOR"); boolean updated = underTest.setSeverity(issue, "MINOR", context); assertThat(updated).isFalse(); assertThat(issue.mustSendNotifications()).isFalse(); assertThat(issue.currentChange()).isNull(); }
@Override public UserDetails loadUserByUsername(String userId) throws UsernameNotFoundException { User user = null; try { user = this.identityService.createUserQuery() .userId(userId) .singleResult(); } catch (FlowableException ex) { // don't care } if (null == user) { throw new UsernameNotFoundException( String.format("user (%s) could not be found", userId)); } return createFlowableUser(user); }
@Test public void testSerializingUserDetailsShouldWorkCorrectly() throws IOException, ClassNotFoundException { UserDetails kermit = userDetailsService.loadUserByUsername("kermit"); byte[] serialized; ByteArrayOutputStream buffer = new ByteArrayOutputStream(); ObjectOutputStream outputStream = new ObjectOutputStream(buffer); outputStream.writeObject(kermit); outputStream.close(); serialized = buffer.toByteArray(); ByteArrayInputStream inputStream = new ByteArrayInputStream(serialized); ObjectInputStream stream = new ObjectInputStream(inputStream); Object deserialized = stream.readObject(); stream.close(); assertThat(deserialized).isInstanceOf(FlowableUserDetails.class); kermit = (UserDetails) deserialized; assertThat(kermit.isCredentialsNonExpired()).as("credentialsNonExpired").isTrue(); assertThat(kermit.isAccountNonLocked()).as("accountNonLocked").isTrue(); assertThat(kermit.isAccountNonExpired()).as("accountNonExpired").isTrue(); assertThat(kermit.isEnabled()).as("enabled").isTrue(); assertThat(kermit.getUsername()).as("username").isEqualTo("kermit"); assertThat(kermit.getPassword()).as("password").isEqualTo("kermit"); assertThat(kermit.getAuthorities()) .extracting(GrantedAuthority::getAuthority) .as("granted authorities") .containsExactly( "access admin application", "access modeler application", "start processes" ); FlowableUserDetails kermitFlowable = (FlowableUserDetails) kermit; User user = kermitFlowable.getUser(); assertThat(user.getId()).isEqualTo("kermit"); assertThat(user.getFirstName()).isEqualTo("Kermit"); assertThat(user.getLastName()).isEqualTo("the Frog"); assertThat(user.getDisplayName()).isEqualTo("Kermit the Frog"); assertThat(user.getEmail()).isEqualTo("kermit@muppetshow.com"); assertThat(user.getPassword()).isEqualTo("kermit"); assertThat(kermitFlowable.getGroups()) .extracting(Group::getId, Group::getName, Group::getType) .as("Groups") .containsExactlyInAnyOrder( tuple("admins", "Admins", "user"), tuple("sales", "Sales", "user"), tuple("engineering", "Engineering", "tech") ); }
public boolean isLaunchIntentOfNotification(Intent intent) { return intent.getBooleanExtra(LAUNCH_FLAG_KEY_NAME, false); }
@Test public void isLaunchIntentOfNotification_noFlagInBundle_returnFalse() throws Exception { Intent intent = mock(Intent.class); final AppLaunchHelper uut = getUUT(); boolean result = uut.isLaunchIntentOfNotification(intent); assertFalse(result); }
@Override public Set<OpenstackVtap> getVtapsByDeviceId(DeviceId deviceId) { return store.getVtapsByDeviceId(deviceId); }
@Test public void testGetDeviceIdsFromVtap() { assertTrue(ERR_NOT_FOUND, target.getVtapsByDeviceId(DEVICE_ID_3).contains(VTAP_2)); assertTrue(ERR_NOT_FOUND, target.getVtapsByDeviceId(DEVICE_ID_4).contains(VTAP_2)); }
@Override public void cancel() { context.goToCanceling( getExecutionGraph(), getExecutionGraphHandler(), getOperatorCoordinatorHandler(), getFailures()); }
@Test void testTransitionToCancelingOnCancel() throws Exception { try (MockFailingContext ctx = new MockFailingContext()) { StateTrackingMockExecutionGraph meg = new StateTrackingMockExecutionGraph(); Failing failing = createFailingState(ctx, meg); ctx.setExpectCanceling(assertNonNull()); failing.cancel(); } }
@Override public DirectoryTimestamp getDirectoryTimestamp() { return DirectoryTimestamp.explicit; }
@Test public void testFeatures() { assertEquals(Protocol.Case.sensitive, new IRODSProtocol().getCaseSensitivity()); assertEquals(Protocol.DirectoryTimestamp.explicit, new IRODSProtocol().getDirectoryTimestamp()); }
@JsonProperty public String getSource() { return message.getSource(); }
@Test public void testGetSource() throws Exception { assertEquals(message.getSource(), messageSummary.getSource()); }
@Override public List<KsqlPartitionLocation> locate( final List<KsqlKey> keys, final RoutingOptions routingOptions, final RoutingFilterFactory routingFilterFactory, final boolean isRangeScan ) { if (isRangeScan && keys.isEmpty()) { throw new IllegalStateException("Query is range scan but found no range keys."); } final ImmutableList.Builder<KsqlPartitionLocation> partitionLocations = ImmutableList.builder(); final Set<Integer> filterPartitions = routingOptions.getPartitions(); final Optional<Set<KsqlKey>> keySet = keys.isEmpty() ? Optional.empty() : Optional.of(Sets.newHashSet(keys)); // Depending on whether this is a key-based lookup, determine which metadata method to use. // If we don't have keys, find the metadata for all partitions since we'll run the query for // all partitions of the state store rather than a particular one. //For issue #7174. Temporarily turn off metadata finding for a partition with keys //if there are more than one key. final List<PartitionMetadata> metadata; if (keys.size() == 1 && keys.get(0).getKey().size() == 1 && !isRangeScan) { metadata = getMetadataForKeys(keys, filterPartitions); } else { metadata = getMetadataForAllPartitions(filterPartitions, keySet); } if (metadata.isEmpty()) { final MaterializationException materializationException = new MaterializationException( "Cannot determine which host contains the required partitions to serve the pull query. \n" + "The underlying persistent query may be restarting (e.g. as a result of " + "ALTER SYSTEM) view the status of your by issuing <DESCRIBE foo>."); LOG.debug(materializationException.getMessage()); throw materializationException; } // Go through the metadata and group them by partition. for (PartitionMetadata partitionMetadata : metadata) { LOG.debug("Handling pull query for partition {} of state store {}.", partitionMetadata.getPartition(), storeName); final HostInfo activeHost = partitionMetadata.getActiveHost(); final Set<HostInfo> standByHosts = partitionMetadata.getStandbyHosts(); final int partition = partitionMetadata.getPartition(); final Optional<Set<KsqlKey>> partitionKeys = partitionMetadata.getKeys(); LOG.debug("Active host {}, standby {}, partition {}.", activeHost, standByHosts, partition); // For a given partition, find the ordered, filtered list of hosts to consider final List<KsqlNode> filteredHosts = getFilteredHosts(routingOptions, routingFilterFactory, activeHost, standByHosts, partition); partitionLocations.add(new PartitionLocation(partitionKeys, partition, filteredHosts)); } return partitionLocations.build(); }
@Test public void shouldReturnStandBysWhenActiveDown() { // Given: getActiveAndStandbyMetadata(); when(livenessFilter.filter(eq(ACTIVE_HOST))) .thenReturn(Host.exclude(ACTIVE_HOST, "liveness")); // When: final List<KsqlPartitionLocation> result = locator.locate(ImmutableList.of(KEY), routingOptions, routingFilterFactoryStandby, false); // Then: List<KsqlNode> nodeList = result.get(0).getNodes().stream() .filter(node -> node.getHost().isSelected()) .collect(Collectors.toList());; assertThat(nodeList.size(), is(2)); assertThat(nodeList, containsInAnyOrder(standByNode1, standByNode2)); }
@Override public void addSourceMap(String key, String value) { sourcesMap.put(key, value); }
@Test void addSourceMap() { Map<String, String> retrieved = kiePMMLModelWithSources.getSourcesMap(); assertThat(retrieved).isEmpty(); kiePMMLModelWithSources.addSourceMap("KEY", "VALUE"); retrieved = kiePMMLModelWithSources.getSourcesMap(); assertThat(retrieved).containsKey("KEY"); assertThat(retrieved.get("KEY")).isEqualTo("VALUE"); }
protected boolean isClusterVersionUnknownOrLessOrEqual(Version version) { Version clusterVersion = getNodeEngine().getClusterService().getClusterVersion(); return clusterVersion.isUnknownOrLessOrEqual(version); }
@Test public void testClusterVersion_isUnknownLessOrEqual_currentVersion() { assertTrue(object.isClusterVersionUnknownOrLessOrEqual(CURRENT_CLUSTER_VERSION)); }
@Override public Object evaluateLiteralExpression(String rawExpression, String className, List<String> genericClasses) { if (isStructuredInput(className)) { return convertResult(rawExpression, className, genericClasses); } else { return internalLiteralEvaluation(rawExpression, className); } }
@Test public void evaluateLiteralExpression() { assertThat(expressionEvaluator.evaluateLiteralExpression(null, String.class.getCanonicalName(), null)).isNull(); assertThat(expressionEvaluator.evaluateLiteralExpression(null, List.class.getCanonicalName(), null)).isNull(); assertThat(expressionEvaluator.evaluateLiteralExpression(null, Map.class.getCanonicalName(), null)).isNull(); }
public static Getter newMethodGetter(Object object, Getter parent, Method method, String modifier) throws Exception { return newGetter(object, parent, modifier, method.getReturnType(), method::invoke, (t, et) -> new MethodGetter(parent, method, modifier, t, et)); }
@Test public void newMethodGetter_whenExtractingFromNull_Array_FieldAndParentIsNonEmptyMultiResult_thenInferReturnType() throws Exception { OuterObject object = new OuterObject("name", InnerObject.nullInner("inner")); Getter parentGetter = GetterFactory.newMethodGetter(object, null, innersArrayMethod, "[any]"); Getter innerObjectNameGetter = GetterFactory.newMethodGetter(object, parentGetter, innerAttributesArrayMethod, "[any]"); Class<?> returnType = innerObjectNameGetter.getReturnType(); assertEquals(Integer.class, returnType); }
@Override public List<Intent> compile(PointToPointIntent intent, List<Intent> installable) { log.trace("compiling {} {}", intent, installable); ConnectPoint ingressPoint = intent.filteredIngressPoint().connectPoint(); ConnectPoint egressPoint = intent.filteredEgressPoint().connectPoint(); //TODO: handle protected path case with suggested path!! //Idea: use suggested path as primary and another path from path service as protection if (intent.suggestedPath() != null && intent.suggestedPath().size() > 0) { Path path = new DefaultPath(PID, intent.suggestedPath(), new ScalarWeight(1)); //Check intent constraints against suggested path and suggested path availability if (checkPath(path, intent.constraints()) && pathAvailable(intent)) { allocateIntentBandwidth(intent, path); return asList(createLinkCollectionIntent(ImmutableSet.copyOf(intent.suggestedPath()), DEFAULT_COST, intent)); } } if (ingressPoint.deviceId().equals(egressPoint.deviceId())) { return createZeroHopLinkCollectionIntent(intent); } // proceed with no protected paths if (!ProtectionConstraint.requireProtectedPath(intent)) { return createUnprotectedLinkCollectionIntent(intent); } try { // attempt to compute and implement backup path return createProtectedIntent(ingressPoint, egressPoint, intent, installable); } catch (PathNotFoundException e) { log.warn("Could not find disjoint Path for {}", intent); // no disjoint path extant -- maximum one path exists between devices return createSinglePathIntent(ingressPoint, egressPoint, intent, installable); } }
@Test public void testSuggestedPathBandwidthConstrainedIntentFailure() { final double bpsTotal = 10.0; final ResourceService resourceService = MockResourceService.makeCustomBandwidthResourceService(bpsTotal); final List<Constraint> constraints = Collections.singletonList(new BandwidthConstraint(Bandwidth.bps(BPS_TO_RESERVE))); String[] suggestedPathHops = {S1, S4, S5, S3}; List<Link> suggestedPath = NetTestTools.createPath(suggestedPathHops).links(); try { final PointToPointIntent intent = makeIntentSuggestedPath( new ConnectPoint(DID_1, PORT_1), new ConnectPoint(DID_3, PORT_2), suggestedPath, constraints); String[][] paths = {{S1, S2, S3}, suggestedPathHops}; final PointToPointIntentCompiler compiler = makeCompilerSuggestedPath(paths, resourceService); compiler.compile(intent, null); fail("Point to Point compilation with insufficient bandwidth does " + "not throw exception."); } catch (PathNotFoundException noPath) { assertThat(noPath.getMessage(), containsString("No path")); } }
@Override public void calculate(TradePriceCalculateReqBO param, TradePriceCalculateRespBO result) { // 默认使用积分为 0 result.setUsePoint(0); // 1.1 校验是否使用积分 if (!BooleanUtil.isTrue(param.getPointStatus())) { result.setUsePoint(0); return; } // 1.2 校验积分抵扣是否开启 MemberConfigRespDTO config = memberConfigApi.getConfig(); if (!isDeductPointEnable(config)) { return; } // 1.3 校验用户积分余额 MemberUserRespDTO user = memberUserApi.getUser(param.getUserId()); if (user.getPoint() == null || user.getPoint() <= 0) { return; } // 2.1 计算积分优惠金额 int pointPrice = calculatePointPrice(config, user.getPoint(), result); // 2.2 计算分摊的积分、抵扣金额 List<TradePriceCalculateRespBO.OrderItem> orderItems = filterList(result.getItems(), TradePriceCalculateRespBO.OrderItem::getSelected); List<Integer> dividePointPrices = TradePriceCalculatorHelper.dividePrice(orderItems, pointPrice); List<Integer> divideUsePoints = TradePriceCalculatorHelper.dividePrice(orderItems, result.getUsePoint()); // 3.1 记录优惠明细 TradePriceCalculatorHelper.addPromotion(result, orderItems, param.getUserId(), "积分抵扣", PromotionTypeEnum.POINT.getType(), StrUtil.format("积分抵扣:省 {} 元", TradePriceCalculatorHelper.formatPrice(pointPrice)), dividePointPrices); // 3.2 更新 SKU 优惠金额 for (int i = 0; i < orderItems.size(); i++) { TradePriceCalculateRespBO.OrderItem orderItem = orderItems.get(i); orderItem.setPointPrice(dividePointPrices.get(i)); orderItem.setUsePoint(divideUsePoints.get(i)); TradePriceCalculatorHelper.recountPayPrice(orderItem); } TradePriceCalculatorHelper.recountAllPrice(result); }
@Test public void testCalculate_UserPointNotEnough() { // 准备参数 TradePriceCalculateReqBO param = new TradePriceCalculateReqBO() .setUserId(233L).setPointStatus(true) // 是否使用积分 .setItems(asList( new TradePriceCalculateReqBO.Item().setSkuId(10L).setCount(2).setSelected(true), // 使用积分 new TradePriceCalculateReqBO.Item().setSkuId(20L).setCount(3).setSelected(true), // 使用积分 new TradePriceCalculateReqBO.Item().setSkuId(30L).setCount(5).setSelected(false) // 未选中,不使用积分 )); TradePriceCalculateRespBO result = new TradePriceCalculateRespBO() .setType(TradeOrderTypeEnum.NORMAL.getType()) .setPrice(new TradePriceCalculateRespBO.Price()) .setPromotions(new ArrayList<>()) .setItems(asList( new TradePriceCalculateRespBO.OrderItem().setSkuId(10L).setCount(2).setSelected(true) .setPrice(100).setSpuId(1L), new TradePriceCalculateRespBO.OrderItem().setSkuId(20L).setCount(3).setSelected(true) .setPrice(50).setSpuId(2L), new TradePriceCalculateRespBO.OrderItem().setSkuId(30L).setCount(5).setSelected(false) .setPrice(30).setSpuId(3L) )); // 保证价格被初始化上 TradePriceCalculatorHelper.recountPayPrice(result.getItems()); TradePriceCalculatorHelper.recountAllPrice(result); // mock 方法(积分配置 信息) MemberConfigRespDTO memberConfig = randomPojo(MemberConfigRespDTO.class, o -> o.setPointTradeDeductEnable(true) // 启用积分折扣 .setPointTradeDeductUnitPrice(1) // 1 积分抵扣多少金额(单位分) .setPointTradeDeductMaxPrice(100)); // 积分抵扣最大值 when(memberConfigApi.getConfig()).thenReturn(memberConfig); // mock 方法(会员 信息) MemberUserRespDTO user = randomPojo(MemberUserRespDTO.class, o -> o.setId(param.getUserId()).setPoint(0)); when(memberUserApi.getUser(user.getId())).thenReturn(user); // 调用 tradePointUsePriceCalculator.calculate(param, result); // 断言:没有使用积分 assertNotUsePoint(result); }
public static void forRunningTasks(String tableNameWithType, String taskType, ClusterInfoAccessor clusterInfoAccessor, Consumer<Map<String, String>> taskConfigConsumer) { Map<String, TaskState> taskStates = clusterInfoAccessor.getTaskStates(taskType); for (Map.Entry<String, TaskState> entry : taskStates.entrySet()) { if (TASK_FINAL_STATES.contains(entry.getValue())) { continue; } String taskName = entry.getKey(); for (PinotTaskConfig pinotTaskConfig : clusterInfoAccessor.getTaskConfigs(taskName)) { Map<String, String> config = pinotTaskConfig.getConfigs(); String tableNameFromTaskConfig = config.get(MinionConstants.TABLE_NAME_KEY); if (tableNameWithType.equals(tableNameFromTaskConfig)) { taskConfigConsumer.accept(config); } } } }
@Test public void testForRunningTasks() { String tableName = "mytable_OFFLINE"; String taskType = "myTaskType"; ClusterInfoAccessor mockClusterInfoAccessor = createMockClusterInfoAccessor(); Map<String, TaskState> taskStatesMap = new HashMap<>(); String taskID = System.currentTimeMillis() + "_0"; when(mockClusterInfoAccessor.getTaskStates(taskType)).thenReturn(taskStatesMap); when(mockClusterInfoAccessor.getTaskConfigs(taskID)) .thenReturn(Collections.singletonList(createTaskConfig(taskType, tableName, taskID))); int[] count = new int[1]; TaskState[] nonFinalTaskStates = new TaskState[]{ TaskState.NOT_STARTED, TaskState.IN_PROGRESS, TaskState.FAILING, TaskState.STOPPING, TaskState.STOPPED, TaskState.TIMING_OUT }; for (TaskState taskState : nonFinalTaskStates) { taskStatesMap.put(taskID, taskState); TaskGeneratorUtils.forRunningTasks(tableName, taskType, mockClusterInfoAccessor, taskConfig -> { assertEquals(taskConfig.get(MinionConstants.TABLE_NAME_KEY), tableName); assertEquals(taskConfig.get("taskID"), taskID); count[0]++; }); } assertEquals(count[0], nonFinalTaskStates.length); for (TaskState taskState : new TaskState[]{ TaskState.COMPLETED, TaskState.FAILED, TaskState.ABORTED, TaskState.TIMED_OUT }) { taskStatesMap.put(taskID, taskState); TaskGeneratorUtils.forRunningTasks(tableName, taskType, mockClusterInfoAccessor, taskConfig -> { fail("Task should be in final state"); }); } TaskGeneratorUtils.forRunningTasks("fooTable", taskType, mockClusterInfoAccessor, taskConfig -> { fail("Different table name"); }); TaskGeneratorUtils.forRunningTasks(tableName, "fooTask", mockClusterInfoAccessor, taskConfig -> { fail("Different task type"); }); }
public static void writeBinaryCodedLengthBytes(byte[] data, ByteArrayOutputStream out) throws IOException { // 1. write length byte/bytes if (data.length < 252) { out.write((byte) data.length); } else if (data.length < (1 << 16L)) { out.write((byte) 252); writeUnsignedShortLittleEndian(data.length, out); } else if (data.length < (1 << 24L)) { out.write((byte) 253); writeUnsignedMediumLittleEndian(data.length, out); } else { out.write((byte) 254); writeUnsignedIntLittleEndian(data.length, out); } // 2. write real data followed length byte/bytes out.write(data); }
@Test public void testWriteBinaryCodedLengthBytes4() throws IOException { ByteArrayOutputStream out = new ByteArrayOutputStream(); ByteHelper.writeBinaryCodedLengthBytes(new byte[16777216], out); byte[] expected = new byte[16777221]; expected[0] = -2; expected[4] = 1; Assert.assertArrayEquals(expected, (out.toByteArray())); }
public boolean shouldCareAbout(Object entity) { if (entity != null) return getParameterizedClass().isAssignableFrom(entity.getClass()); return false; }
@Test public void shouldNotCareAboutEntityOfADifferentTypeFromTheOneTheListenerIsParameterizedWith() { EntityConfigChangedListener entityConfigChangedListenerForA = new EntityConfigChangedListener<A>() { @Override public void onEntityConfigChange(A entity) { } }; assertThat(entityConfigChangedListenerForA.shouldCareAbout(new B()), is(false)); }
@Override public void close() { }
@Test public void shouldSucceed_gapDetectedLocal_retry() throws ExecutionException, InterruptedException { // Given: final AtomicReference<Set<KsqlNode>> nodes = new AtomicReference<>( ImmutableSet.of(ksqlNodeLocal, ksqlNodeRemote)); final PushRouting routing = new PushRouting(sqr -> nodes.get(), 50, true); AtomicReference<TestLocalPublisher> localPublisher = new AtomicReference<>(); AtomicInteger localCount = new AtomicInteger(0); when(pushPhysicalPlanManager.execute()).thenAnswer(a -> { localPublisher.set(new TestLocalPublisher(context)); localCount.incrementAndGet(); if (localCount.get() == 2) { localPublisher.get().accept(LOCAL_ROW2); } return localPublisher.get(); }); doAnswer(a -> { final Optional<PushOffsetRange> newOffsetRange = a.getArgument(0); assertThat(newOffsetRange.isPresent(), is(true)); assertThat(newOffsetRange.get().getEndOffsets(), is(ImmutableList.of(0L, 3L))); return null; }).when(pushPhysicalPlanManager).reset(any()); // When: final PushConnectionsHandle handle = handlePushRouting(routing); context.runOnContext(v -> { localPublisher.get().accept(LOCAL_CONTINUATION_TOKEN1); localPublisher.get().accept(LOCAL_ROW1); localPublisher.get().accept(LOCAL_CONTINUATION_TOKEN_GAP); }); Set<List<?>> rows = waitOnRows(2); handle.close(); // Then: verify(pushPhysicalPlanManager, times(2)).execute(); assertThat(rows.contains(LOCAL_ROW1.value().values()), is(true)); assertThat(rows.contains(LOCAL_ROW2.value().values()), is(true)); }
@Override public float getProgress() { readLock.lock(); try { TaskAttempt bestAttempt = selectBestAttempt(); if (bestAttempt == null) { return 0f; } return bestAttempt.getProgress(); } finally { readLock.unlock(); } }
@Test public void testTaskProgress() { LOG.info("--- START: testTaskProgress ---"); mockTask = createMockTask(TaskType.MAP); // launch task TaskId taskId = getNewTaskID(); scheduleTaskAttempt(taskId); float progress = 0f; assert(mockTask.getProgress() == progress); launchTaskAttempt(getLastAttempt().getAttemptId()); // update attempt1 progress = 50f; updateLastAttemptProgress(progress); assert(mockTask.getProgress() == progress); progress = 100f; updateLastAttemptProgress(progress); assert(mockTask.getProgress() == progress); progress = 0f; // mark first attempt as killed updateLastAttemptState(TaskAttemptState.KILLED); assert(mockTask.getProgress() == progress); // kill first attempt // should trigger a new attempt // as no successful attempts killRunningTaskAttempt(getLastAttempt().getAttemptId()); assert(taskAttempts.size() == 2); assert(mockTask.getProgress() == 0f); launchTaskAttempt(getLastAttempt().getAttemptId()); progress = 50f; updateLastAttemptProgress(progress); assert(mockTask.getProgress() == progress); }
public static boolean isTemporaryFileName(String path) { return TEMPORARY_FILE_NAME.matcher(path).matches(); }
@Test public void isTemporaryFileName() { assertTrue(PathUtils.isTemporaryFileName(PathUtils.temporaryFileName(0, "/"))); assertTrue( PathUtils.isTemporaryFileName(PathUtils.temporaryFileName(0xFFFFFFFFFFFFFFFFL, "/"))); assertTrue(PathUtils.isTemporaryFileName("foo.alluxio.0x0123456789ABCDEF.tmp")); assertFalse(PathUtils.isTemporaryFileName("foo.alluxio.0x 0123456789.tmp")); assertFalse(PathUtils.isTemporaryFileName("foo.alluxio.0x0123456789ABCDEFG.tmp")); assertFalse(PathUtils.isTemporaryFileName("foo.alluxio.0x0123456789ABCDE.tmp")); assertFalse(PathUtils.isTemporaryFileName("foo.0x0123456789ABCDEFG.tmp")); assertFalse(PathUtils.isTemporaryFileName("alluxio.0x0123456789ABCDEFG")); }
@Override public int compare(String indexName1, String indexName2) { int separatorPosition = indexName1.lastIndexOf(separator); int index1Number; final String indexPrefix1 = separatorPosition != -1 ? indexName1.substring(0, separatorPosition) : indexName1; try { index1Number = Integer.parseInt(indexName1.substring(separatorPosition + 1)); } catch (Exception e) { index1Number = Integer.MIN_VALUE; //wrongly formatted index names go last } separatorPosition = indexName2.lastIndexOf(separator); int index2Number; final String indexPrefix2 = separatorPosition != -1 ? indexName2.substring(0, separatorPosition) : indexName2; try { index2Number = Integer.parseInt(indexName2.substring(separatorPosition + 1)); } catch (NumberFormatException e) { index2Number = Integer.MIN_VALUE; //wrongly formatted index names go last } final int prefixComparisonResult = indexPrefix1.compareTo(indexPrefix2); if (prefixComparisonResult == 0) { return -Integer.compare(index1Number, index2Number); } else { return prefixComparisonResult; } }
@Test void indexPrefixIsMoreImportantThanNumberWhileSorting() { assertTrue(comparator.compare("abc_5", "bcd_3") < 0); assertTrue(comparator.compare("abc", "bcd_3") < 0); assertTrue(comparator.compare("zzz_1", "aaa") > 0); assertTrue(comparator.compare("zzz", "aaa") > 0); }
public static Set<String> getRpcParams() { return Collections.unmodifiableSet(CONFIG_NAMES); }
@Test void testGetRpcParams() { Field[] declaredFields = RpcConstants.class.getDeclaredFields(); int i = 0; for (Field declaredField : declaredFields) { declaredField.setAccessible(true); if (declaredField.getType().equals(String.class) && null != declaredField.getAnnotation( RpcConstants.RpcConfigLabel.class)) { i++; } } assertEquals(i, RpcConstants.getRpcParams().size()); }
@SuppressWarnings("unchecked") @Override public void punctuate(final ProcessorNode<?, ?, ?, ?> node, final long timestamp, final PunctuationType type, final Punctuator punctuator) { if (processorContext.currentNode() != null) { throw new IllegalStateException(String.format("%sCurrent node is not null", logPrefix)); } // when punctuating, we need to preserve the timestamp (this can be either system time or event time) // while other record context are set as dummy: null topic, -1 partition, -1 offset and empty header final ProcessorRecordContext recordContext = new ProcessorRecordContext( timestamp, -1L, -1, null, new RecordHeaders() ); updateProcessorContext(node, time.milliseconds(), recordContext); if (log.isTraceEnabled()) { log.trace("Punctuating processor {} with timestamp {} and punctuation type {}", node.name(), timestamp, type); } try { maybeMeasureLatency(() -> punctuator.punctuate(timestamp), time, punctuateLatencySensor); } catch (final TimeoutException timeoutException) { if (!eosEnabled) { throw timeoutException; } else { record = null; throw new TaskCorruptedException(Collections.singleton(id)); } } catch (final FailedProcessingException e) { throw createStreamsException(node.name(), e.getCause()); } catch (final TaskCorruptedException | TaskMigratedException e) { throw e; } catch (final RuntimeException processingException) { final ErrorHandlerContext errorHandlerContext = new DefaultErrorHandlerContext( null, recordContext.topic(), recordContext.partition(), recordContext.offset(), recordContext.headers(), node.name(), id() ); final ProcessingExceptionHandler.ProcessingHandlerResponse response; try { response = Objects.requireNonNull( processingExceptionHandler.handle(errorHandlerContext, null, processingException), "Invalid ProcessingExceptionHandler response." ); } catch (final RuntimeException fatalUserException) { log.error( "Processing error callback failed after processing error for record: {}", errorHandlerContext, processingException ); throw new FailedProcessingException("Fatal user code error in processing error callback", fatalUserException); } if (response == ProcessingExceptionHandler.ProcessingHandlerResponse.FAIL) { log.error("Processing exception handler is set to fail upon" + " a processing error. If you would rather have the streaming pipeline" + " continue after a processing error, please set the " + PROCESSING_EXCEPTION_HANDLER_CLASS_CONFIG + " appropriately."); throw createStreamsException(node.name(), processingException); } else { droppedRecordsSensor.record(); } } finally { processorContext.setCurrentNode(null); } }
@Test public void punctuateShouldThrowFailedProcessingExceptionWhenProcessingExceptionHandlerThrowsAnException() { when(stateManager.taskId()).thenReturn(taskId); when(stateManager.taskType()).thenReturn(TaskType.ACTIVE); task = createStatelessTask(createConfig( AT_LEAST_ONCE, "100", LogAndFailExceptionHandler.class.getName(), CrashingProcessingExceptionHandler.class.getName() )); final FailedProcessingException streamsException = assertThrows( FailedProcessingException.class, () -> task.punctuate(processorStreamTime, 1, PunctuationType.STREAM_TIME, timestamp -> { throw new KafkaException("KABOOM!"); }) ); assertEquals("Fatal user code error in processing error callback", streamsException.getMessage()); assertEquals("KABOOM from ProcessingExceptionHandlerMock!", streamsException.getCause().getMessage()); }
public static int read(final AtomicBuffer buffer, final ErrorConsumer consumer) { return read(buffer, consumer, 0); }
@Test void shouldReadSummarisedObservation() { final ErrorConsumer consumer = mock(ErrorConsumer.class); final long timestampOne = 7; final long timestampTwo = 10; final RuntimeException error = new RuntimeException("Test Error"); final StringWriter stringWriter = new StringWriter(); error.printStackTrace(new PrintWriter(stringWriter)); final String errorAsString = stringWriter.toString(); when(clock.time()).thenReturn(timestampOne).thenReturn(timestampTwo); log.record(error); log.record(error); assertThat(ErrorLogReader.read(buffer, consumer), is(1)); verify(consumer).accept(eq(2), eq(timestampOne), eq(timestampTwo), eq(errorAsString)); }
public void clear() { length = 0; textLength = -1; }
@Test public void testClear() throws Exception { // Test lengths on an empty text object Text text = new Text(); assertEquals( "Actual string on an empty text object must be an empty string", "", text.toString()); assertEquals("Underlying byte array length must be zero", 0, text.getBytes().length); assertEquals("String's length must be zero", 0, text.getLength()); assertEquals("String's text length must be zero", 0, text.getTextLength()); // Test if clear works as intended text = new Text("abcd\u20acbdcd\u20ac"); int len = text.getLength(); text.clear(); assertEquals("String must be empty after clear()", "", text.toString()); assertTrue( "Length of the byte array must not decrease after clear()", text.getBytes().length >= len); assertEquals("Length of the string must be reset to 0 after clear()", 0, text.getLength()); assertEquals("Text length of the string must be reset to 0 after clear()", 0, text.getTextLength()); }
public WriteFileP( @Nonnull String directoryName, @Nonnull FunctionEx<? super T, ? extends String> toStringFn, @Nonnull String charset, @Nullable String dateFormatter, long maxFileSize, boolean exactlyOnce, @Nonnull LongSupplier clock ) { this.directory = Paths.get(directoryName); this.toStringFn = toStringFn; this.charset = Charset.forName(charset); this.dateFormatter = dateFormatter != null ? DateTimeFormatter.ofPattern(dateFormatter).withZone(ZoneId.from(ZoneOffset.UTC)) : null; this.maxFileSize = maxFileSize; this.exactlyOnce = exactlyOnce; this.clock = clock; }
@Test public void when_slowSource_then_fileFlushedAfterEachItem() { // Given int numItems = 10; DAG dag = new DAG(); Vertex source = dag.newVertex("source", () -> new SlowSourceP(semaphore, numItems)) .localParallelism(1); Vertex sink = dag.newVertex("sink", writeFileP(directory.toString(), StandardCharsets.UTF_8, null, DISABLE_ROLLING, true, Object::toString)) .localParallelism(1); dag.edge(between(source, sink)); Job job = instance().getJet().newJob(dag); for (int i = 0; i < numItems; i++) { // When semaphore.release(); int finalI = i; // Then assertTrueEventually(() -> checkFileContents(0, finalI + 1, false, false, true), 5); } // wait for the job to finish job.join(); }
@Override public void route(ProcessContext context) throws FrameworkException { try { ProcessType processType = matchProcessType(context); if (processType == null) { if (LOGGER.isWarnEnabled()) { LOGGER.warn("Process type not found, context= {}", context); } throw new FrameworkException(FrameworkErrorCode.ProcessTypeNotFound); } ProcessRouter processRouter = processRouters.get(processType.getCode()); if (processRouter == null) { LOGGER.error("Cannot find process router by type {}, context = {}", processType.getCode(), context); throw new FrameworkException(FrameworkErrorCode.ProcessRouterNotFound); } Instruction instruction = processRouter.route(context); if (instruction == null) { LOGGER.info("route instruction is null, process end"); } else { context.setInstruction(instruction); eventPublisher.publish(context); } } catch (FrameworkException e) { throw e; } catch (Exception ex) { throw new FrameworkException(ex, ex.getMessage(), FrameworkErrorCode.UnknownAppError); } }
@Test public void testRouteOfFrameworkException() { ProcessContextImpl context = new ProcessContextImpl(); DefaultRouterHandler defaultRouterHandler = new DefaultRouterHandler(); Assertions.assertThrows(FrameworkException.class, () -> defaultRouterHandler.route(context)); }
static void readFullyHeapBuffer(InputStream f, ByteBuffer buf) throws IOException { readFully(f, buf.array(), buf.arrayOffset() + buf.position(), buf.remaining()); buf.position(buf.limit()); }
@Test public void testHeapReadFullyLargeBuffer() throws Exception { final ByteBuffer readBuffer = ByteBuffer.allocate(20); final MockInputStream stream = new MockInputStream(); TestUtils.assertThrows("Should throw EOFException", EOFException.class, () -> { DelegatingSeekableInputStream.readFullyHeapBuffer(stream, readBuffer); return null; }); Assert.assertEquals(0, readBuffer.position()); Assert.assertEquals(20, readBuffer.limit()); }
public ActorSystem getActorSystem() { return actorSystem; }
@Test void testRpcServiceShutDownWithFailingRpcEndpoints() throws Exception { final PekkoRpcService pekkoRpcService = startRpcService(); final int numberActors = 5; final RpcServiceShutdownTestHelper rpcServiceShutdownTestHelper = startStopNCountingAsynchronousOnStopEndpoints(pekkoRpcService, numberActors); final Iterator<CompletableFuture<Void>> iterator = rpcServiceShutdownTestHelper.getStopFutures().iterator(); for (int i = 0; i < numberActors - 1; i++) { iterator.next().complete(null); } iterator.next().completeExceptionally(new OnStopException("onStop exception occurred.")); assertThatThrownBy(rpcServiceShutdownTestHelper::waitForRpcServiceTermination) .satisfies(FlinkAssertions.anyCauseMatches(OnStopException.class)); assertThat(pekkoRpcService.getActorSystem().whenTerminated().isCompleted()).isTrue(); }
@Override public GcsPath getFileName() { int nameCount = getNameCount(); if (nameCount < 2) { throw new UnsupportedOperationException( "Can't get filename from root path in the bucket: " + this); } return getName(nameCount - 1); }
@Test public void testGetFileName() { assertEquals("foo", GcsPath.fromUri("gs://bucket/bar/foo").getFileName().toString()); assertEquals("foo", GcsPath.fromUri("gs://bucket/foo").getFileName().toString()); thrown.expect(UnsupportedOperationException.class); GcsPath.fromUri("gs://bucket/").getFileName(); }
@Override public void handlerPlugin(final PluginData pluginData) { Map<String, String> configMap = GsonUtils.getInstance().toObjectMap(pluginData.getConfig(), String.class); final String endpoint = Optional.ofNullable(configMap.get("endpoint")).orElse(""); final String clientSecrect = Optional.ofNullable(configMap.get("client_secrect")).orElse(""); final String clientId = Optional.ofNullable(configMap.get("client_id")).orElse(""); String certificate = Optional.ofNullable(configMap.get("certificate")).orElse(""); certificate = certificate.replace("\\n", "\n"); String organization = Optional.ofNullable(configMap.get("organization-name")).orElse(""); String application = Optional.ofNullable(configMap.get("application-name")).orElse(""); CasdoorConfig casdoorConfig = new CasdoorConfig(endpoint, clientId, clientSecrect, certificate, organization, application); CasdoorAuthService casdoorAuthService = new CasdoorAuthService(casdoorConfig); Singleton.INST.single(CasdoorAuthService.class, casdoorAuthService); }
@Test public void handlerPlugin() { final PluginData pluginData = new PluginData("pluginId", "pluginName", "{\"organization-name\":\"test\",\"application-name\":\"app-test\",\"endpoint\":\"http://localhost:8000\",\"client_secrect\":\"a4209d412a33a842b7a9c05a3446e623cbb7262d\",\"client_id\":\"6e3a84154e73d1fb156a\",\"certificate\":\"-----BEGIN CERTIFICATE-----\\n\"}", "0", false, null); casdoorPluginDateHandlerTest.handlerPlugin(pluginData); CasdoorAuthService casdoorAuthService = Singleton.INST.get(CasdoorAuthService.class); String redirect = "http://localhost:9195/http/hi"; assertEquals(casdoorAuthService.getSigninUrl(redirect), "http://localhost:8000/login/oauth/authorize?client_id=6e3a84154e73d1fb156a&response_type=code&redirect_uri=http%3A%2F%2Flocalhost%3A9195%2Fhttp%2Fhi&scope=read&state=app-test"); }
protected boolean isShadedJar(Dependency dependency, Dependency nextDependency) { if (dependency == null || dependency.getFileName() == null || nextDependency == null || nextDependency.getFileName() == null || dependency.getSoftwareIdentifiers().isEmpty() || nextDependency.getSoftwareIdentifiers().isEmpty()) { return false; } final String mainName = dependency.getFileName().toLowerCase(); final String nextName = nextDependency.getFileName().toLowerCase(); if (mainName.endsWith(".jar") && nextName.endsWith("pom.xml")) { return dependency.getSoftwareIdentifiers().containsAll(nextDependency.getSoftwareIdentifiers()); } else if (nextName.endsWith(".jar") && mainName.endsWith("pom.xml")) { return nextDependency.getSoftwareIdentifiers().containsAll(dependency.getSoftwareIdentifiers()); } return false; }
@Test public void testIsShaded() throws MalformedPackageURLException { DependencyBundlingAnalyzer instance = new DependencyBundlingAnalyzer(); Dependency left = null; Dependency right = null; boolean expResult = false; boolean result = instance.isShadedJar(left, right); assertEquals(expResult, result); left = new Dependency(); expResult = false; result = instance.isShadedJar(left, right); assertEquals(expResult, result); left = new Dependency(new File("/path/jar.jar"), true); expResult = false; result = instance.isShadedJar(left, right); assertEquals(expResult, result); right = new Dependency(); expResult = false; result = instance.isShadedJar(left, right); assertEquals(expResult, result); right = new Dependency(new File("/path/pom.xml"), true); expResult = false; result = instance.isShadedJar(left, right); assertEquals(expResult, result); left.addSoftwareIdentifier(new PurlIdentifier("maven", "test", "test", "1.0", Confidence.HIGHEST)); expResult = false; result = instance.isShadedJar(left, right); assertEquals(expResult, result); right.addSoftwareIdentifier(new PurlIdentifier("maven", "next", "next", "1.0", Confidence.HIGHEST)); expResult = false; result = instance.isShadedJar(left, right); assertEquals(expResult, result); left.addSoftwareIdentifier(new PurlIdentifier("maven", "next", "next", "1.0", Confidence.HIGHEST)); expResult = true; result = instance.isShadedJar(left, right); assertEquals(expResult, result); left = new Dependency(new File("/path/pom.xml"), true); left.addSoftwareIdentifier(new PurlIdentifier("maven", "test", "test", "1.0", Confidence.HIGHEST)); right = new Dependency(new File("/path/jar.jar"), true); right.addSoftwareIdentifier(new PurlIdentifier("maven", "next", "next", "1.0", Confidence.HIGHEST)); expResult = false; result = instance.isShadedJar(left, right); assertEquals(expResult, result); right.addSoftwareIdentifier(new PurlIdentifier("maven", "test", "test", "1.0", Confidence.HIGHEST)); expResult = true; result = instance.isShadedJar(left, right); assertEquals(expResult, result); left = new Dependency(new File("/path/other.jar"), true); left.addSoftwareIdentifier(new PurlIdentifier("maven", "test", "test", "1.0", Confidence.HIGHEST)); right = new Dependency(new File("/path/jar.jar"), true); right.addSoftwareIdentifier(new PurlIdentifier("maven", "next", "next", "1.0", Confidence.HIGHEST)); expResult = false; result = instance.isShadedJar(left, right); assertEquals(expResult, result); }
@Override public boolean remove(Object objectToRemove) { return remove(objectToRemove, objectToRemove.hashCode()); }
@Test(expected = NullPointerException.class) public void testRemoveWithHashNull() { final OAHashSet<Integer> set = new OAHashSet<>(8); set.remove(null, 1); }
public T send() throws IOException { return web3jService.send(this, responseType); }
@Test public void testEthUninstallFilter() throws Exception { web3j.ethUninstallFilter(Numeric.toBigInt("0xb")).send(); verifyResult( "{\"jsonrpc\":\"2.0\",\"method\":\"eth_uninstallFilter\"," + "\"params\":[\"0xb\"],\"id\":1}"); }
public static String[] list(File dir) throws IOException { if (!canRead(dir)) { throw new AccessDeniedException(dir.toString(), null, FSExceptionMessages.PERMISSION_DENIED); } String[] fileNames = dir.list(); if(fileNames == null) { throw new IOException("Invalid directory or I/O error occurred for dir: " + dir.toString()); } return fileNames; }
@Test (timeout = 30000) public void testListAPI() throws IOException { //Test existing files case String[] files = FileUtil.list(partitioned); Assert.assertEquals("Unexpected number of pre-existing files", 2, files.length); //Test existing directory with no files case File newDir = new File(tmp.getPath(),"test"); Verify.mkdir(newDir); Assert.assertTrue("Failed to create test dir", newDir.exists()); files = FileUtil.list(newDir); Assert.assertEquals("New directory unexpectedly contains files", 0, files.length); assertTrue(newDir.delete()); Assert.assertFalse("Failed to delete test dir", newDir.exists()); //Test non-existing directory case, this throws //IOException try { files = FileUtil.list(newDir); Assert.fail("IOException expected on list() for non-existent dir " + newDir.toString()); } catch(IOException ioe) { //Expected an IOException } }
public void removeEntry(String id) { UrlWhitelist modified = removeEntry(getWhitelist(), id); saveWhitelist(modified); }
@Test public void removeEntry() { final WhitelistEntry entry = LiteralWhitelistEntry.create("a", "a", "a"); final UrlWhitelist whitelist = UrlWhitelist.createEnabled(Collections.singletonList(entry)); assertThat(urlWhitelistService.removeEntry(whitelist, null)).isEqualTo(whitelist); assertThat(urlWhitelistService.removeEntry(whitelist, "b")).isEqualTo(whitelist); assertThat(urlWhitelistService.removeEntry(whitelist, "a")).isEqualTo( UrlWhitelist.createEnabled(Collections.emptyList())); }
public Resource getIncrementAllocation() { Long memory = null; Integer vCores = null; Map<String, Long> others = new HashMap<>(); ResourceInformation[] resourceTypes = ResourceUtils.getResourceTypesArray(); for (int i=0; i < resourceTypes.length; ++i) { String name = resourceTypes[i].getName(); String propertyKey = getAllocationIncrementPropKey(name); String propValue = get(propertyKey); if (propValue != null) { Matcher matcher = RESOURCE_REQUEST_VALUE_PATTERN.matcher(propValue); if (matcher.matches()) { long value = Long.parseLong(matcher.group(1)); String unit = matcher.group(2); long valueInDefaultUnits = getValueInDefaultUnits(value, unit, name); others.put(name, valueInDefaultUnits); } else { throw new IllegalArgumentException("Property " + propertyKey + " is not in \"value [unit]\" format: " + propValue); } } } if (others.containsKey(ResourceInformation.MEMORY_MB.getName())) { memory = others.get(ResourceInformation.MEMORY_MB.getName()); if (get(RM_SCHEDULER_INCREMENT_ALLOCATION_MB) != null) { String overridingKey = getAllocationIncrementPropKey( ResourceInformation.MEMORY_MB.getName()); LOG.warn("Configuration " + overridingKey + "=" + get(overridingKey) + " is overriding the " + RM_SCHEDULER_INCREMENT_ALLOCATION_MB + "=" + get(RM_SCHEDULER_INCREMENT_ALLOCATION_MB) + " property"); } others.remove(ResourceInformation.MEMORY_MB.getName()); } else { memory = getLong( RM_SCHEDULER_INCREMENT_ALLOCATION_MB, DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB); } if (others.containsKey(ResourceInformation.VCORES.getName())) { vCores = others.get(ResourceInformation.VCORES.getName()).intValue(); if (get(RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES) != null) { String overridingKey = getAllocationIncrementPropKey( ResourceInformation.VCORES.getName()); LOG.warn("Configuration " + overridingKey + "=" + get(overridingKey) + " is overriding the " + RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES + "=" + get(RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES) + " property"); } others.remove(ResourceInformation.VCORES.getName()); } else { vCores = getInt( RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES, DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES); } return Resource.newInstance(memory, vCores, others); }
@Test public void testCpuIncrementConfiguredViaMultipleProperties() { TestAppender testAppender = new TestAppender(); Logger logger = LogManager.getRootLogger(); logger.addAppender(testAppender); try { Configuration conf = new Configuration(); conf.set("yarn.scheduler.increment-allocation-vcores", "7"); conf.set(YarnConfiguration.RESOURCE_TYPES + "." + ResourceInformation.VCORES.getName() + FairSchedulerConfiguration.INCREMENT_ALLOCATION, "13"); FairSchedulerConfiguration fsc = new FairSchedulerConfiguration(conf); Resource increment = fsc.getIncrementAllocation(); Assert.assertEquals(13, increment.getVirtualCores()); assertTrue("Warning message is not logged when specifying CPU vCores " + "increment via multiple properties", testAppender.getLogEvents().stream().anyMatch( e -> e.getLevel() == Level.WARN && ("Configuration " + "yarn.resource-types.vcores.increment-allocation=13 is " + "overriding the yarn.scheduler.increment-allocation-vcores=7 " + "property").equals(e.getMessage()))); } finally { logger.removeAppender(testAppender); } }
@SuppressWarnings("unchecked") public static <T> AgentServiceLoader<T> getServiceLoader(final Class<T> service) { return (AgentServiceLoader<T>) LOADERS.computeIfAbsent(service, AgentServiceLoader::new); }
@Test void assertGetServiceLoaderWithNullValue() { assertThrows(NullPointerException.class, () -> AgentServiceLoader.getServiceLoader(null)); }
public final List<E> findAll(E key) { if (key == null || size() == 0) { return Collections.emptyList(); } ArrayList<E> results = new ArrayList<>(); int slot = slot(elements, key); for (int seen = 0; seen < elements.length; seen++) { Element element = elements[slot]; if (element == null) { break; } if (key.elementKeysAreEqual(element)) { @SuppressWarnings("unchecked") E result = (E) elements[slot]; results.add(result); } slot = (slot + 1) % elements.length; } return results; }
@Test public void testInsertDelete() { ImplicitLinkedHashMultiCollection<TestElement> multiSet = new ImplicitLinkedHashMultiCollection<>(100); TestElement e1 = new TestElement(1); TestElement e2 = new TestElement(1); TestElement e3 = new TestElement(2); multiSet.mustAdd(e1); multiSet.mustAdd(e2); multiSet.mustAdd(e3); assertFalse(multiSet.add(e3)); assertEquals(3, multiSet.size()); expectExactTraversal(multiSet.findAll(e1).iterator(), e1, e2); expectExactTraversal(multiSet.findAll(e3).iterator(), e3); multiSet.remove(e2); expectExactTraversal(multiSet.findAll(e1).iterator(), e1); assertTrue(multiSet.contains(e2)); }
public SchemaCommand(Logger console) { super(console); }
@Test public void testSchemaCommand() throws IOException { File file = parquetFile(); SchemaCommand command = new SchemaCommand(createLogger()); command.targets = Arrays.asList(file.getAbsolutePath()); command.setConf(new Configuration()); Assert.assertEquals(0, command.run()); }
public void applyClientConfiguration(String account, DataLakeFileSystemClientBuilder builder) { String sasToken = adlsSasTokens.get(account); if (sasToken != null && !sasToken.isEmpty()) { builder.sasToken(sasToken); } else if (namedKeyCreds != null) { builder.credential( new StorageSharedKeyCredential(namedKeyCreds.getKey(), namedKeyCreds.getValue())); } else { builder.credential(new DefaultAzureCredentialBuilder().build()); } // apply connection string last so its parameters take precedence, e.g. SAS token String connectionString = adlsConnectionStrings.get(account); if (connectionString != null && !connectionString.isEmpty()) { builder.endpoint(connectionString); } else { builder.endpoint("https://" + account); } }
@Test public void testWithConnectionString() { AzureProperties props = new AzureProperties(ImmutableMap.of("adls.connection-string.account1", "http://endpoint")); DataLakeFileSystemClientBuilder clientBuilder = mock(DataLakeFileSystemClientBuilder.class); props.applyClientConfiguration("account1", clientBuilder); verify(clientBuilder).endpoint("http://endpoint"); }
public long getMax() { return maxDate; }
@Test public void max_is_zero_if_no_dates() { assertThat(collector.getMax()).isZero(); }
@Override public PollResult poll(long currentTimeMs) { return pollInternal( prepareFetchRequests(), this::handleFetchSuccess, this::handleFetchFailure ); }
@Test public void testFetchDiscardedAfterPausedPartitionResumedAndSeekedToNewOffset() { buildFetcher(); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); assertEquals(1, sendFetches()); subscriptions.pause(tp0); client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0)); subscriptions.seek(tp0, 3); subscriptions.resume(tp0); networkClientDelegate.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches(), "Should have 1 entry in completed fetches"); Fetch<byte[], byte[]> fetch = collectFetch(); assertEquals(emptyMap(), fetch.records(), "Should not return any records because we seeked to a new offset"); assertFalse(fetch.positionAdvanced()); assertFalse(fetcher.hasCompletedFetches(), "Should have no completed fetches"); }
public static Read read() { return new AutoValue_AmqpIO_Read.Builder().setMaxNumRecords(Long.MAX_VALUE).build(); }
@Test public void testRead() throws Exception { PCollection<Message> output = pipeline.apply( AmqpIO.read() .withMaxNumRecords(100) .withAddresses(Collections.singletonList(broker.getQueueUri("testRead")))); PAssert.thatSingleton(output.apply(Count.globally())).isEqualTo(100L); Messenger sender = Messenger.Factory.create(); sender.start(); for (int i = 0; i < 100; i++) { Message message = Message.Factory.create(); message.setAddress(broker.getQueueUri("testRead")); message.setBody(new AmqpValue("Test " + i)); sender.put(message); sender.send(); } sender.stop(); pipeline.run(); }
public String run() throws IOException { Process process = new ProcessBuilder(mCommand).redirectErrorStream(true).start(); BufferedReader inReader = new BufferedReader(new InputStreamReader(process.getInputStream(), Charset.defaultCharset())); try { // read the output of the command StringBuilder output = new StringBuilder(); String line = inReader.readLine(); while (line != null) { output.append(line); output.append("\n"); line = inReader.readLine(); } // wait for the process to finish and check the exit code int exitCode = process.waitFor(); if (exitCode != 0) { throw new ShellUtils.ExitCodeException(exitCode, output.toString()); } return output.toString(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IOException(e); } finally { // close the input stream try { // JDK 7 tries to automatically drain the input streams for us // when the process exits, but since close is not synchronized, // it creates a race if we close the stream first and the same // fd is recycled. the stream draining thread will attempt to // drain that fd!! it may block, OOM, or cause bizarre behavior // see: https://bugs.openjdk.java.net/browse/JDK-8024521 // issue is fixed in build 7u60 InputStream stdout = process.getInputStream(); synchronized (stdout) { inReader.close(); } } catch (IOException e) { LOG.warn(String.format("Error while closing the input stream of process %s: %s", process, e.getMessage())); } process.destroy(); } }
@Test public void execCommand() throws Exception { String testString = "alluxio"; // Execute echo for testing command execution. String[] cmd = new String[]{"bash", "-c", "echo " + testString}; String result = new ShellCommand(cmd).run(); assertEquals(testString + "\n", result); }
protected static PrivateKey toPrivateKey(File keyFile, String keyPassword) throws NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeySpecException, InvalidAlgorithmParameterException, KeyException, IOException { return toPrivateKey(keyFile, keyPassword, true); }
@Test public void testPkcs8Pbes2() throws Exception { PrivateKey key = SslContext.toPrivateKey(new File(getClass().getResource("rsa_pbes2_enc_pkcs8.key") .getFile()), "12345678", false); assertNotNull(key); }
@Override public String loadPlugin(Path pluginPath) { throw new IllegalAccessError(PLUGIN_PREFIX + currentPluginId + " tried to execute loadPlugin!"); }
@Test public void loadPlugin() { assertThrows(IllegalAccessError.class, () -> wrappedPluginManager.loadPlugin(thisPlugin.path())); }
public static <T> RBFNetwork<T> fit(T[] x, double[] y, RBF<T>[] rbf) { return fit(x, y, rbf, false); }
@Test public void testLongley() throws Exception { System.out.println("longley"); MathEx.setSeed(19650218); // to get repeatable results. double[][] x = MathEx.clone(Longley.x); MathEx.standardize(x); RegressionMetrics metrics = LOOCV.regression(x, Longley.y, (xi, yi) -> RBFNetwork.fit(xi, yi, RBF.fit(xi, 10, 5.0))); System.out.println(metrics); assertEquals(4.922188709128203, metrics.rmse, 1E-4); RBFNetwork<double[]> model = RBFNetwork.fit(Longley.x, Longley.y, RBF.fit(Longley.x, 10, 5.0)); java.nio.file.Path temp = Write.object(model); Read.object(temp); }
@Override public void logoutFailure(HttpRequest request, String errorMessage) { checkRequest(request); requireNonNull(errorMessage, "error message can't be null"); if (!LOGGER.isDebugEnabled()) { return; } LOGGER.debug("logout failure [error|{}][IP|{}|{}]", emptyIfNull(errorMessage), request.getRemoteAddr(), getAllIps(request)); }
@Test public void logout_creates_DEBUG_log_with_error() { underTest.logoutFailure(mockRequest(), "bad token"); verifyLog("logout failure [error|bad token][IP||]", Set.of("login", "logout success")); }
public String getOriginDisplayName() { return getOrigin() != null ? getOrigin().displayName() : new FileConfigOrigin().displayName(); }
@Test public void shouldReturnConfigRepoOriginDisplayNameWhenOriginIsNotSet() { PipelineConfig pipelineConfig = new PipelineConfig(); assertThat(pipelineConfig.getOriginDisplayName(), is("cruise-config.xml")); }
@Override public String getMetaServerAddress(Env targetEnv) { return addresses.get(targetEnv); }
@Test public void testGetMetaServerAddress() { String address = databasePortalMetaServerProvider.getMetaServerAddress(Env.DEV); assertEquals("http://server.com:8080", address); String newMetaServerAddress = "http://another-server.com:8080"; metaServiceMap.put("dev", newMetaServerAddress); databasePortalMetaServerProvider.reload(); assertEquals(newMetaServerAddress, databasePortalMetaServerProvider.getMetaServerAddress(Env.DEV)); }
@Override public Message request(final Message msg, final long timeout) throws RequestTimeoutException, MQClientException, RemotingException, MQBrokerException, InterruptedException { msg.setTopic(withNamespace(msg.getTopic())); return this.defaultMQProducerImpl.request(msg, timeout); }
@Test(expected = RequestTimeoutException.class) public void testRequestMessage_RequestTimeoutException() throws RemotingException, RequestTimeoutException, MQClientException, InterruptedException, MQBrokerException { when(mQClientAPIImpl.getTopicRouteInfoFromNameServer(anyString(), anyLong())).thenReturn(createTopicRoute()); Message result = producer.request(message, 3 * 1000L); }
public static void delete(File fileOrDir) throws IOException { if (fileOrDir == null) { return; } if (fileOrDir.isDirectory()) { cleanDirectory(fileOrDir); } else { if (fileOrDir.exists()) { boolean isDeleteOk = fileOrDir.delete(); if (!isDeleteOk) { throw new IOException("delete fail"); } } } }
@Test public void testDelete() throws IOException { File deleteFile = new File(tempDir.toFile(), "delete.txt"); deleteFile.createNewFile(); Assert.assertTrue(deleteFile.exists()); IoUtil.delete(deleteFile); Assert.assertFalse(deleteFile.exists()); File deleteDir = new File(tempDir.toFile(), "delete"); deleteDir.mkdirs(); Assert.assertTrue(deleteDir.exists()); File deleteDirFile = new File(deleteDir, "delete.txt"); deleteDirFile.createNewFile(); Assert.assertTrue(deleteDirFile.exists()); IoUtil.delete(deleteDir); Assert.assertTrue(deleteDir.exists()); Assert.assertFalse(deleteDirFile.exists()); }
public static Db use() { return use(DSFactory.get()); }
@Test public void findByTest() throws SQLException { List<Entity> find = Db.use().findBy("user", Condition.parse("age", "> 18"), Condition.parse("age", "< 100") ); for (Entity entity : find) { StaticLog.debug("{}", entity); } assertEquals("unitTestUser", find.get(0).get("name")); }
public FEELFnResult<List> invoke(@ParameterName("list") List list, @ParameterName("position") BigDecimal position, @ParameterName("newItem") Object newItem) { if (list == null) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", CANNOT_BE_NULL)); } if (position == null) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", CANNOT_BE_NULL)); } int intPosition = position.intValue(); if (intPosition == 0 || Math.abs(intPosition) > list.size()) { String paramProblem = String.format("%s outside valid boundaries (1-%s)", intPosition, list.size()); return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", paramProblem)); } Object e = NumberEvalHelper.coerceNumber(newItem); List toReturn = new ArrayList(list); int replacementPosition = intPosition > 0 ? intPosition -1 : list.size() - Math.abs(intPosition); toReturn.set(replacementPosition, e); return FEELFnResult.ofResult(toReturn); }
@Test void invokeReplaceByPositionWithNotNull() { List list = getList(); List expected = new ArrayList<>(list); expected.set(1, "test"); FunctionTestUtil.assertResult(listReplaceFunction.invoke(list, BigDecimal.valueOf(2), "test"), expected); }
public RecurringJobBuilder withZoneId(ZoneId zoneId) { this.zoneId = zoneId; return this; }
@Test void testWithZoneId() { RecurringJob recurringJob = aRecurringJob() .withZoneId(ZoneId.of("Europe/Brussels")) .withCron(every5Seconds) .withDetails(() -> testService.doWork()) .build(jobDetailsGenerator); assertThat(recurringJob) .hasZoneId("Europe/Brussels") .hasId() .hasScheduleExpression(every5Seconds); }
@Override public AttributedList<Path> read(final Path directory, final List<String> replies) throws FTPInvalidListException { final AttributedList<Path> children = new AttributedList<Path>(); // At least one entry successfully parsed boolean success = false; // Call hook for those implementors which need to perform some action upon the list after it has been created // from the server stream, but before any clients see the list parser.preParse(replies); for(String line : replies) { final FTPFile f = parser.parseFTPEntry(line); if(null == f) { continue; } final String name = f.getName(); if(!success) { if(lenient) { // Workaround for #2410. STAT only returns ls of directory itself // Workaround for #2434. STAT of symbolic link directory only lists the directory itself. if(directory.getName().equals(name)) { log.warn(String.format("Skip %s matching parent directory name", f.getName())); continue; } if(name.contains(String.valueOf(Path.DELIMITER))) { if(!name.startsWith(directory.getAbsolute() + Path.DELIMITER)) { // Workaround for #2434. log.warn(String.format("Skip %s with delimiter in name", name)); continue; } } } } success = true; if(name.equals(".") || name.equals("..")) { if(log.isDebugEnabled()) { log.debug(String.format("Skip %s", f.getName())); } continue; } final Path parsed = new Path(directory, PathNormalizer.name(name), f.getType() == FTPFile.DIRECTORY_TYPE ? EnumSet.of(Path.Type.directory) : EnumSet.of(Path.Type.file)); switch(f.getType()) { case FTPFile.SYMBOLIC_LINK_TYPE: parsed.setType(EnumSet.of(Path.Type.file, Path.Type.symboliclink)); // Symbolic link target may be an absolute or relative path final String target = f.getLink(); if(StringUtils.isBlank(target)) { log.warn(String.format("Missing symbolic link target for %s", parsed)); final EnumSet<Path.Type> type = parsed.getType(); type.remove(Path.Type.symboliclink); } else if(StringUtils.startsWith(target, String.valueOf(Path.DELIMITER))) { parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(target), EnumSet.of(Path.Type.file))); } else if(StringUtils.equals("..", target)) { parsed.setSymlinkTarget(directory); } else if(StringUtils.equals(".", target)) { parsed.setSymlinkTarget(parsed); } else { parsed.setSymlinkTarget(new Path(directory, target, EnumSet.of(Path.Type.file))); } break; } if(parsed.isFile()) { parsed.attributes().setSize(f.getSize()); } parsed.attributes().setOwner(f.getUser()); parsed.attributes().setGroup(f.getGroup()); Permission.Action u = Permission.Action.none; if(f.hasPermission(FTPFile.USER_ACCESS, FTPFile.READ_PERMISSION)) { u = u.or(Permission.Action.read); } if(f.hasPermission(FTPFile.USER_ACCESS, FTPFile.WRITE_PERMISSION)) { u = u.or(Permission.Action.write); } if(f.hasPermission(FTPFile.USER_ACCESS, FTPFile.EXECUTE_PERMISSION)) { u = u.or(Permission.Action.execute); } Permission.Action g = Permission.Action.none; if(f.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.READ_PERMISSION)) { g = g.or(Permission.Action.read); } if(f.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.WRITE_PERMISSION)) { g = g.or(Permission.Action.write); } if(f.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.EXECUTE_PERMISSION)) { g = g.or(Permission.Action.execute); } Permission.Action o = Permission.Action.none; if(f.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.READ_PERMISSION)) { o = o.or(Permission.Action.read); } if(f.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.WRITE_PERMISSION)) { o = o.or(Permission.Action.write); } if(f.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.EXECUTE_PERMISSION)) { o = o.or(Permission.Action.execute); } final Permission permission = new Permission(u, g, o); if(f instanceof FTPExtendedFile) { permission.setSetuid(((FTPExtendedFile) f).isSetuid()); permission.setSetgid(((FTPExtendedFile) f).isSetgid()); permission.setSticky(((FTPExtendedFile) f).isSticky()); } if(!Permission.EMPTY.equals(permission)) { parsed.attributes().setPermission(permission); } final Calendar timestamp = f.getTimestamp(); if(timestamp != null) { parsed.attributes().setModificationDate(timestamp.getTimeInMillis()); } children.add(parsed); } if(!success) { throw new FTPInvalidListException(children); } return children; }
@Test(expected = FTPInvalidListException.class) public void testListNoRead() throws Exception { final Path directory = new Path("/sandbox/noread", EnumSet.of(Path.Type.directory)); final String[] lines = new String[]{ "213-Status follows:", "d-w--w---- 2 1003 1003 4096 Nov 06 2013 noread", "213 End of status"}; final AttributedList<Path> list = new FTPListResponseReader(new FTPParserSelector().getParser("UNIX"), true) .read(directory, Arrays.asList(lines)); assertEquals(0, list.size()); }
public static <K, V> AsMultimap<K, V> asMultimap() { return new AsMultimap<>(false); }
@Test @Category({ValidatesRunner.class}) public void testMultimapSideInputWithNonDeterministicKeyCoder() { final PCollectionView<Map<String, Iterable<Integer>>> view = pipeline .apply( "CreateSideInput", Create.of(KV.of("a", 1), KV.of("a", 1), KV.of("a", 2), KV.of("b", 3)) .withCoder(KvCoder.of(new NonDeterministicStringCoder(), VarIntCoder.of()))) .apply(View.asMultimap()); PCollection<KV<String, Integer>> output = pipeline .apply("CreateMainInput", Create.of("apple", "banana", "blackberry")) .apply( "OutputSideInputs", ParDo.of( new DoFn<String, KV<String, Integer>>() { @ProcessElement public void processElement(ProcessContext c) { for (Integer v : c.sideInput(view).get(c.element().substring(0, 1))) { c.output(KV.of(c.element(), v)); } } }) .withSideInputs(view)); PAssert.that(output) .containsInAnyOrder( KV.of("apple", 1), KV.of("apple", 1), KV.of("apple", 2), KV.of("banana", 3), KV.of("blackberry", 3)); pipeline.run(); }
@Override KeyGroupsStateHandle closeAndGetHandle() throws IOException { StreamStateHandle streamStateHandle = super.closeAndGetHandleAfterLeasesReleased(); return streamStateHandle != null ? new KeyGroupsStateHandle(keyGroupRangeOffsets, streamStateHandle) : null; }
@Test void testEmptyKeyedStream() throws Exception { final KeyGroupRange keyRange = new KeyGroupRange(0, 2); KeyedStateCheckpointOutputStream stream = createStream(keyRange); TestMemoryCheckpointOutputStream innerStream = (TestMemoryCheckpointOutputStream) stream.getDelegate(); KeyGroupsStateHandle emptyHandle = stream.closeAndGetHandle(); assertThat(innerStream.isClosed()).isTrue(); assertThat(emptyHandle).isNull(); }
@Override public void deleteGroup(Long id) { // 校验存在 validateGroupExists(id); // 校验分组下是否有用户 validateGroupHasUser(id); // 删除 memberGroupMapper.deleteById(id); }
@Test public void testDeleteGroup_notExists() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> groupService.deleteGroup(id), GROUP_NOT_EXISTS); }
public void createMapping( String mappingName, String tableName, List<SqlColumnMetadata> mappingColumns, String dataConnectionRef, String idColumn ) { sqlService.execute( createMappingQuery(mappingName, tableName, mappingColumns, dataConnectionRef, idColumn) ).close(); }
@Test @SuppressWarnings("OperatorWrap") public void when_createMappingWithTwoColumns_then_quoteParameters() { mappingHelper.createMapping( "myMapping", "myTable", asList( new SqlColumnMetadata("id", SqlColumnType.INTEGER, true), new SqlColumnMetadata("name", SqlColumnType.VARCHAR, true) ), "dataConnectionRef", "idColumn" ); verify(sqlService).execute( "CREATE MAPPING \"myMapping\" " + "EXTERNAL NAME \"myTable\" " + "( \"id\" INTEGER, \"name\" VARCHAR ) " + "DATA CONNECTION \"dataConnectionRef\" " + "OPTIONS (" + " 'idColumn' = 'idColumn' " + ")" ); }
public static String convertMsToClockTime(long millis) { Preconditions.checkArgument(millis >= 0, "Negative values %s are not supported to convert to clock time.", millis); long days = millis / Constants.DAY_MS; long hours = (millis % Constants.DAY_MS) / Constants.HOUR_MS; long mins = (millis % Constants.HOUR_MS) / Constants.MINUTE_MS; long secs = (millis % Constants.MINUTE_MS) / Constants.SECOND_MS; return String.format("%d day(s), %d hour(s), %d minute(s), and %d second(s)", days, hours, mins, secs); }
@Test public void convertMsToClockTime() { assertEquals("0 day(s), 0 hour(s), 0 minute(s), and 0 second(s)", CommonUtils.convertMsToClockTime(10)); assertEquals("0 day(s), 0 hour(s), 0 minute(s), and 1 second(s)", CommonUtils.convertMsToClockTime(TimeUnit.SECONDS.toMillis(1))); assertEquals("0 day(s), 0 hour(s), 1 minute(s), and 0 second(s)", CommonUtils.convertMsToClockTime(TimeUnit.MINUTES.toMillis(1))); assertEquals("0 day(s), 0 hour(s), 1 minute(s), and 30 second(s)", CommonUtils.convertMsToClockTime(TimeUnit.MINUTES.toMillis(1) + TimeUnit.SECONDS.toMillis(30))); assertEquals("0 day(s), 1 hour(s), 0 minute(s), and 0 second(s)", CommonUtils.convertMsToClockTime(TimeUnit.HOURS.toMillis(1))); long time = TimeUnit.DAYS.toMillis(1) + TimeUnit.HOURS.toMillis(4) + TimeUnit.MINUTES.toMillis(10) + TimeUnit.SECONDS.toMillis(45); String out = CommonUtils.convertMsToClockTime(time); assertEquals("1 day(s), 4 hour(s), 10 minute(s), and 45 second(s)", out); }
public QueryConsumeQueueResponseBody queryConsumeQueue(final String brokerAddr, final String topic, final int queueId, final long index, final int count, final String consumerGroup, final long timeoutMillis) throws InterruptedException, RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException, MQClientException { QueryConsumeQueueRequestHeader requestHeader = new QueryConsumeQueueRequestHeader(); requestHeader.setTopic(topic); requestHeader.setQueueId(queueId); requestHeader.setIndex(index); requestHeader.setCount(count); requestHeader.setConsumerGroup(consumerGroup); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.QUERY_CONSUME_QUEUE, requestHeader); RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), brokerAddr), request, timeoutMillis); assert response != null; if (ResponseCode.SUCCESS == response.getCode()) { return QueryConsumeQueueResponseBody.decode(response.getBody(), QueryConsumeQueueResponseBody.class); } throw new MQClientException(response.getCode(), response.getRemark()); }
@Test public void assertQueryConsumeQueue() throws RemotingException, InterruptedException, MQClientException { mockInvokeSync(); QueryConsumeQueueResponseBody responseBody = new QueryConsumeQueueResponseBody(); responseBody.setQueueData(Collections.singletonList(new ConsumeQueueData())); setResponseBody(responseBody); QueryConsumeQueueResponseBody actual = mqClientAPI.queryConsumeQueue(defaultBrokerAddr, defaultTopic, 1, 1, 1, group, defaultTimeout); assertNotNull(actual); assertEquals(1, actual.getQueueData().size()); }
@Override public ExportResult<MediaContainerResource> export( UUID jobId, TokensAndUrlAuthData authData, Optional<ExportInformation> exportInformation) throws UploadErrorException, FailedToListAlbumsException, InvalidTokenException, PermissionDeniedException, IOException, FailedToListMediaItemsException { if (!exportInformation.isPresent()) { // Make list of photos contained in albums so they are not exported twice later on populateContainedMediaList(jobId, authData); return exportAlbums(authData, Optional.empty(), jobId); } else if (exportInformation.get().getContainerResource() instanceof PhotosContainerResource) { // if ExportInformation is a photos container, this is a request to only export the contents // in that container instead of the whole user library return exportPhotosContainer( (PhotosContainerResource) exportInformation.get().getContainerResource(), authData, jobId); } else if (exportInformation.get().getContainerResource() instanceof MediaContainerResource) { // if ExportInformation is a media container, this is a request to only export the contents // in that container instead of the whole user library (this is to support backwards // compatibility with the GooglePhotosExporter) return exportMediaContainer( (MediaContainerResource) exportInformation.get().getContainerResource(), authData, jobId); } /* * Use the export information to determine whether this export call should export albums or * photos. * * Albums are exported if and only if the export information doesn't hold an album * already, and the pagination token begins with the album prefix. There must be a pagination * token for album export since this is isn't the first export operation performed (if it was, * there wouldn't be any export information at all). * * Otherwise, photos are exported. If photos are exported, there may or may not be pagination * information, and there may or may not be album information. If there is no container * resource, that means that we're exporting albumless photos and a pagination token must be * present. The beginning step of exporting albumless photos is indicated by a pagination token * containing only MEDIA_TOKEN_PREFIX with no token attached, in order to differentiate this * case for the first step of export (no export information at all). */ StringPaginationToken paginationToken = (StringPaginationToken) exportInformation.get().getPaginationData(); IdOnlyContainerResource idOnlyContainerResource = (IdOnlyContainerResource) exportInformation.get().getContainerResource(); boolean containerResourcePresent = idOnlyContainerResource != null; boolean paginationDataPresent = paginationToken != null; if (!containerResourcePresent && paginationDataPresent && paginationToken.getToken().startsWith(ALBUM_TOKEN_PREFIX)) { // were still listing out all of the albums since we have pagination data return exportAlbums(authData, Optional.of(paginationToken), jobId); } else { return exportMedia( authData, Optional.ofNullable(idOnlyContainerResource), Optional.ofNullable(paginationToken), jobId); } }
@Test public void testExportAlbums_failureInterruptsTransfer() throws Exception { String albumIdToFail1 = "albumid3"; String albumIdToFail2 = "albumid5"; ImmutableList<PhotoModel> photos = ImmutableList.of(); ImmutableList<PhotoAlbum> albums = ImmutableList.of( setUpSinglePhotoAlbum("albumid1", "album 1`", ""), setUpSinglePhotoAlbum("albumid2", "album 2", ""), setUpSinglePhotoAlbum(albumIdToFail1, "album 3", ""), setUpSinglePhotoAlbum("albumid4", "album 4", ""), setUpSinglePhotoAlbum(albumIdToFail2, "album 5", ""), setUpSinglePhotoAlbum("albumid6", "album 6", "") ); PhotosContainerResource container = new PhotosContainerResource(albums, photos); ExportInformation exportInfo = new ExportInformation(null, container); MediaMetadata photoMediaMetadata = new MediaMetadata(); photoMediaMetadata.setPhoto(new Photo()); // For the album_id_to_fail albums, throw an exception. when(photosInterface.getAlbum(albumIdToFail1)).thenThrow(IOException.class); when(photosInterface.getAlbum(albumIdToFail2)).thenThrow(IOException.class); // For all other albums, return a GoogleMediaAlbum. for (PhotoAlbum photoAlbum: albums) { if (photoAlbum.getId().equals(albumIdToFail1) || photoAlbum.getId().equals(albumIdToFail2)) { continue; } when(photosInterface.getAlbum(photoAlbum.getId())).thenReturn( setUpGoogleAlbum(Optional.of(photoAlbum.getId()), Optional.of(photoAlbum.getName())) ); } assertThrows(IOException.class, () -> googleMediaExporter.export( uuid, authData, Optional.of(exportInfo) )); }
@Override public int remainingCapacity() { int sum = 0; for (BlockingQueue<E> q : this.queues) { sum += q.remainingCapacity(); } return sum; }
@Test public void testTotalCapacityOfSubQueues() { Configuration conf = new Configuration(); FairCallQueue<Schedulable> fairCallQueue; fairCallQueue = new FairCallQueue<Schedulable>(1, 1000, "ns", conf); assertThat(fairCallQueue.remainingCapacity()).isEqualTo(1000); fairCallQueue = new FairCallQueue<Schedulable>(4, 1000, "ns", conf); assertThat(fairCallQueue.remainingCapacity()).isEqualTo(1000); fairCallQueue = new FairCallQueue<Schedulable>(7, 1000, "ns", conf); assertThat(fairCallQueue.remainingCapacity()).isEqualTo(1000); fairCallQueue = new FairCallQueue<Schedulable>(1, 1025, "ns", conf); assertThat(fairCallQueue.remainingCapacity()).isEqualTo(1025); fairCallQueue = new FairCallQueue<Schedulable>(4, 1025, "ns", conf); assertThat(fairCallQueue.remainingCapacity()).isEqualTo(1025); fairCallQueue = new FairCallQueue<Schedulable>(7, 1025, "ns", conf); assertThat(fairCallQueue.remainingCapacity()).isEqualTo(1025); fairCallQueue = new FairCallQueue<Schedulable>(7, 1025, "ns", new int[]{7, 6, 5, 4, 3, 2, 1}, false, conf); assertThat(fairCallQueue.remainingCapacity()).isEqualTo(1025); }
@Override public <T> List<SearchResult<T>> search(SearchRequest request, Class<T> typeFilter) { SearchSession<T> session = new SearchSession<>(request, Collections.singleton(typeFilter)); if (request.inParallel()) { ForkJoinPool commonPool = ForkJoinPool.commonPool(); getProviderTasks(request, session).stream().map(commonPool::submit).forEach(ForkJoinTask::join); } else { getProviderTasks(request, session).forEach(Runnable::run); } return session.getResults(); }
@Test public void testFuzzyLabel() { GraphGenerator generator = GraphGenerator.build().generateTinyGraph(); Node node = generator.getGraph().getNode(GraphGenerator.FIRST_NODE); node.setLabel("foobar"); List<Element> r1 = toList(controller.search(buildRequest("foo", generator), Element.class)); List<Element> r2 = toList(controller.search(buildRequest("bar", generator), Element.class)); List<Element> r3 = toList(controller.search(buildRequest("oo", generator), Element.class)); Assert.assertEquals(List.of(node), r1); Assert.assertEquals(List.of(node), r2); Assert.assertEquals(List.of(node), r3); }
@Override public void configure(final Map<String, ?> config) { configure( config, new Options(), org.rocksdb.LRUCache::new, org.rocksdb.WriteBufferManager::new ); }
@Test public void shouldFailIfConfiguredTwiceFromDifferentInstances() { // Given: rocksDBConfig.configure(CONFIG_PROPS); // Expect: // When: final Exception e = assertThrows( IllegalStateException.class, () -> secondRocksDBConfig.configure(CONFIG_PROPS) ); // Then: assertThat(e.getMessage(), containsString("KsqlBoundedMemoryRocksDBConfigSetter has already been configured. Cannot re-configure.")); }
public static MySQLBinaryProtocolValue getBinaryProtocolValue(final BinaryColumnType binaryColumnType) { Preconditions.checkArgument(BINARY_PROTOCOL_VALUES.containsKey(binaryColumnType), "Cannot find MySQL type '%s' in column type when process binary protocol value", binaryColumnType); return BINARY_PROTOCOL_VALUES.get(binaryColumnType); }
@Test void assertGetBinaryProtocolValueWithMySQLTypeSet() { assertThat(MySQLBinaryProtocolValueFactory.getBinaryProtocolValue(MySQLBinaryColumnType.SET), instanceOf(MySQLStringLenencBinaryProtocolValue.class)); }
@Override public boolean isQualified(final SQLStatementContext sqlStatementContext, final ReadwriteSplittingDataSourceGroupRule rule, final HintValueContext hintValueContext) { return isPrimaryRoute(sqlStatementContext, hintValueContext); }
@Test void assertHintRouteWriteOnly() { when(sqlStatementContext.getSqlStatement()).thenReturn(mock(SelectStatement.class)); when(hintValueContext.isWriteRouteOnly()).thenReturn(false); assertFalse(new QualifiedReadwriteSplittingPrimaryDataSourceRouter().isQualified(sqlStatementContext, null, hintValueContext)); when(hintValueContext.isWriteRouteOnly()).thenReturn(true); assertTrue(new QualifiedReadwriteSplittingPrimaryDataSourceRouter().isQualified(sqlStatementContext, null, hintValueContext)); }
static void cleanStackTrace(Throwable throwable) { new StackTraceCleaner(throwable).clean(Sets.<Throwable>newIdentityHashSet()); }
@Test public void cleaningTraceIsIdempotent() { Throwable throwable = createThrowableWithStackTrace("com.example.Foo", "org.junit.FilterMe"); StackTraceCleaner.cleanStackTrace(throwable); StackTraceCleaner.cleanStackTrace(throwable); assertThat(throwable.getStackTrace()).isEqualTo(createStackTrace("com.example.Foo")); }
@Override public String getFieldDefinition( ValueMetaInterface v, String tk, String pk, boolean useAutoinc, boolean addFieldName, boolean addCr ) { String retval = ""; String fieldname = v.getName(); int length = v.getLength(); int precision = v.getPrecision(); if ( addFieldName ) { if ( Const.indexOfString( fieldname, getReservedWords() ) >= 0 ) { retval += getStartQuote() + fieldname + getEndQuote(); } else { retval += fieldname + " "; } } int type = v.getType(); switch ( type ) { case ValueMetaInterface.TYPE_TIMESTAMP: case ValueMetaInterface.TYPE_DATE: retval += "TIMESTAMP"; break; case ValueMetaInterface.TYPE_BOOLEAN: retval += "CHAR(1)"; break; case ValueMetaInterface.TYPE_NUMBER: case ValueMetaInterface.TYPE_INTEGER: case ValueMetaInterface.TYPE_BIGNUMBER: if ( fieldname.equalsIgnoreCase( tk ) || // Technical key fieldname.equalsIgnoreCase( pk ) // Primary key ) { retval += "BIGINT NOT NULL PRIMARY KEY"; } else { if ( length > 0 ) { if ( precision > 0 || length > 18 ) { retval += "DECIMAL(" + length; if ( precision > 0 ) { retval += ", " + precision; } retval += ")"; } else { if ( length > 9 ) { retval += "INT64"; } else { if ( length < 5 ) { retval += "SMALLINT"; } else { retval += "INTEGER"; } } } } else { retval += "DOUBLE"; } } break; case ValueMetaInterface.TYPE_STRING: if ( length < 32720 ) { retval += "VARCHAR"; if ( length > 0 ) { retval += "(" + length + ")"; } else { retval += "(8000)"; // Maybe use some default DB String length? } } else { retval += "BLOB SUB_TYPE TEXT"; } break; default: retval += " UNKNOWN"; break; } if ( addCr ) { retval += Const.CR; } return retval; }
@Test public void testGetFieldDefinition() { assertEquals( "FOO TIMESTAMP", nativeMeta.getFieldDefinition( new ValueMetaDate( "FOO" ), "", "", false, true, false ) ); assertEquals( "TIMESTAMP", nativeMeta.getFieldDefinition( new ValueMetaTimestamp( "FOO" ), "", "", false, false, false ) ); assertEquals( "CHAR(1)", nativeMeta.getFieldDefinition( new ValueMetaBoolean( "FOO" ), "", "", false, false, false ) ); assertEquals( "BIGINT NOT NULL PRIMARY KEY", nativeMeta.getFieldDefinition( new ValueMetaNumber( "FOO", 10, 0 ), "FOO", "", false, false, false ) ); assertEquals( "BIGINT NOT NULL PRIMARY KEY", nativeMeta.getFieldDefinition( new ValueMetaBigNumber( "FOO", 10, 0 ), "", "FOO", false, false, false ) ); // Decimal Types assertEquals( "DECIMAL(10, 3)", nativeMeta.getFieldDefinition( new ValueMetaNumber( "FOO", 10, 3 ), "", "", false, false, false ) ); assertEquals( "DECIMAL(19)", nativeMeta.getFieldDefinition( new ValueMetaNumber( "FOO", 19, 0 ), "", "", false, false, false ) ); // Integers assertEquals( "INT64", nativeMeta.getFieldDefinition( new ValueMetaInteger( "FOO", 10, 0 ), "", "", false, false, false ) ); assertEquals( "SMALLINT", nativeMeta.getFieldDefinition( new ValueMetaInteger( "FOO", 4, 0 ), "", "", false, false, false ) ); assertEquals( "INTEGER", nativeMeta.getFieldDefinition( new ValueMetaInteger( "FOO", 5, 0 ), "", "", false, false, false ) ); assertEquals( "DOUBLE", nativeMeta.getFieldDefinition( new ValueMetaNumber( "FOO", -7, -3 ), "", "", false, false, false ) ); // Strings assertEquals( "VARCHAR(32719)", nativeMeta.getFieldDefinition( new ValueMetaString( "FOO", 32719, 0 ), "", "", false, false, false ) ); assertEquals( "BLOB SUB_TYPE TEXT", nativeMeta.getFieldDefinition( new ValueMetaString( "FOO", 32720, 0 ), "", "", false, false, false ) ); assertEquals( "VARCHAR(8000)", nativeMeta.getFieldDefinition( new ValueMetaString( "FOO", -122, 0 ), "", "", false, false, false ) ); assertEquals( " UNKNOWN", nativeMeta.getFieldDefinition( new ValueMetaInternetAddress( "FOO" ), "", "", false, false, false ) ); assertEquals( " UNKNOWN" + System.getProperty( "line.separator" ), nativeMeta.getFieldDefinition( new ValueMetaInternetAddress( "FOO" ), "", "", false, false, true ) ); }
@Override public void destroy() { for (MappedFile mf : this.mappedFiles) { mf.destroy(1000 * 3); } this.mappedFiles.clear(); this.setFlushedWhere(0); Set<String> storePathSet = getPaths(); storePathSet.addAll(getReadonlyPaths()); for (String path : storePathSet) { File file = new File(path); if (file.isDirectory()) { file.delete(); } } }
@Test public void testGetLastMappedFile() { final byte[] fixedMsg = new byte[1024]; MessageStoreConfig config = new MessageStoreConfig(); config.setStorePathCommitLog("target/unit_test_store/a/" + MixAll.MULTI_PATH_SPLITTER + "target/unit_test_store/b/" + MixAll.MULTI_PATH_SPLITTER + "target/unit_test_store/c/"); MappedFileQueue mappedFileQueue = new MultiPathMappedFileQueue(config, 1024, null, null); String[] storePaths = config.getStorePathCommitLog().trim().split(MixAll.MULTI_PATH_SPLITTER); for (int i = 0; i < 1024; i++) { MappedFile mappedFile = mappedFileQueue.getLastMappedFile(fixedMsg.length * i); assertThat(mappedFile).isNotNull(); assertThat(mappedFile.appendMessage(fixedMsg)).isTrue(); int idx = i % storePaths.length; assertThat(mappedFile.getFileName().startsWith(storePaths[idx])).isTrue(); } mappedFileQueue.shutdown(1000); mappedFileQueue.destroy(); }
@Inject public ConfigSentinelClient() { supervisor = new Supervisor(new Transport("sentinel-client")).setDropEmptyBuffers(true); }
@Test public void testConfigSentinelClient() { ConfigSentinelDummy configsentinel = new ConfigSentinelDummy(); List<VespaService> services = new ArrayList<>(); VespaService docproc = new VespaService("docprocservice", "docproc/cluster.x.indexing/0"); VespaService searchnode4 = new VespaService("searchnode4", "search/cluster.x/g0/c1/r1"); VespaService container = new VespaService("container", "container/default.0"); services.add(searchnode4); services.add(container); services.add(docproc); try (MockConfigSentinelClient client = new MockConfigSentinelClient(configsentinel)) { client.updateServiceStatuses(services); assertEquals(6520, container.getPid()); assertEquals("RUNNING", container.getState()); assertTrue(container.isAlive()); assertEquals(6534, searchnode4.getPid()); assertEquals("RUNNING", searchnode4.getState()); assertTrue(searchnode4.isAlive()); assertEquals(-1, docproc.getPid()); assertEquals("FINISHED", docproc.getState()); assertFalse(docproc.isAlive()); configsentinel.reConfigure(); client.ping(docproc); assertEquals(100, docproc.getPid()); assertEquals("RUNNING", docproc.getState()); assertTrue(docproc.isAlive()); // container has yet not been checked assertTrue(container.isAlive()); client.updateServiceStatuses(services); assertEquals(100, docproc.getPid()); assertEquals("RUNNING", docproc.getState()); assertTrue(docproc.isAlive()); // container is no longer running on this node - so should be false assertFalse(container.isAlive()); } }
protected Map<String, Integer> getNumPartitions(final Set<String> topics, final Set<String> tempUnknownTopics) { log.debug("Trying to check if topics {} have been created with expected number of partitions.", topics); final Map<String, List<TopicPartitionInfo>> topicPartitionInfo = getTopicPartitionInfo(topics, tempUnknownTopics); return topicPartitionInfo.entrySet().stream().collect(Collectors.toMap( Entry::getKey, e -> e.getValue().size())); }
@Test public void shouldReturnCorrectPartitionCounts() { mockAdminClient.addTopic( false, topic1, Collections.singletonList(new TopicPartitionInfo(0, broker1, singleReplica, Collections.emptyList())), null); assertEquals(Collections.singletonMap(topic1, 1), internalTopicManager.getNumPartitions(Collections.singleton(topic1), Collections.emptySet())); }
File globalStateDir() { final File dir = new File(stateDir, "global"); if (hasPersistentStores) { if (!dir.exists() && !dir.mkdir()) { throw new ProcessorStateException( String.format("global state directory [%s] doesn't exist and couldn't be created", dir.getPath())); } else if (dir.exists() && !dir.isDirectory()) { throw new ProcessorStateException( String.format("global state directory [%s] can't be created as there is an existing file with the same name", dir.getPath())); } } return dir; }
@Test public void shouldNotCreateGlobalStateDirectory() throws IOException { initializeStateDirectory(false, false); final File globalStateDir = directory.globalStateDir(); assertFalse(globalStateDir.exists()); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } Tuple3<?, ?, ?> tuple3 = (Tuple3<?, ?, ?>) o; return Objects.equals(f0, tuple3.f0) && Objects.equals(f1, tuple3.f1) && Objects.equals(f2, tuple3.f2); }
@Test public void testEquals() { assertEquals(Tuple3.of(1, "a", 1.1), Tuple3.of(1, "a", 1.1)); assertEquals(Tuple3.of(1, "a", 1.1).hashCode(), Tuple3.of(1, "a", 1.1).hashCode()); }
public int accumulateMul(int... nums) { LOGGER.info("Source module {}", VERSION); var sum = 1; for (final var num : nums) { sum *= num; } return sum; }
@Test void testAccumulateMul() { assertEquals(0, source.accumulateMul(-1, 0, 1)); }
public MaterializedConfiguration getConfiguration() { MaterializedConfiguration conf = new SimpleMaterializedConfiguration(); FlumeConfiguration fconfig = getFlumeConfiguration(); AgentConfiguration agentConf = fconfig.getConfigurationFor(getAgentName()); if (agentConf != null) { Map<String, ChannelComponent> channelComponentMap = Maps.newHashMap(); Map<String, SourceRunner> sourceRunnerMap = Maps.newHashMap(); Map<String, SinkRunner> sinkRunnerMap = Maps.newHashMap(); try { loadChannels(agentConf, channelComponentMap); loadSources(agentConf, channelComponentMap, sourceRunnerMap); loadSinks(agentConf, channelComponentMap, sinkRunnerMap); Set<String> channelNames = new HashSet<String>(channelComponentMap.keySet()); for (String channelName : channelNames) { ChannelComponent channelComponent = channelComponentMap.get(channelName); if (channelComponent.components.isEmpty()) { LOGGER.warn("Channel {} has no components connected" + " and has been removed.", channelName); channelComponentMap.remove(channelName); Map<String, Channel> nameChannelMap = channelCache.get(channelComponent.channel.getClass()); if (nameChannelMap != null) { nameChannelMap.remove(channelName); } } else { LOGGER.info("Channel {} connected to {}", channelName, channelComponent.components.toString()); conf.addChannel(channelName, channelComponent.channel); } } for (Map.Entry<String, SourceRunner> entry : sourceRunnerMap.entrySet()) { conf.addSourceRunner(entry.getKey(), entry.getValue()); } for (Map.Entry<String, SinkRunner> entry : sinkRunnerMap.entrySet()) { conf.addSinkRunner(entry.getKey(), entry.getValue()); } } catch (InstantiationException ex) { LOGGER.error("Failed to instantiate component", ex); } finally { channelComponentMap.clear(); sourceRunnerMap.clear(); sinkRunnerMap.clear(); } } else { LOGGER.warn("No configuration found for this host:{}", getAgentName()); } return conf; }
@Test public void testDispoableChannel() throws Exception { String agentName = "agent1"; Map<String, String> properties = getPropertiesForChannel(agentName, DisposableChannel.class.getName()); MemoryConfigurationProvider provider = new MemoryConfigurationProvider(agentName, properties); MaterializedConfiguration config1 = provider.getConfiguration(); Channel channel1 = config1.getChannels().values().iterator().next(); assertTrue(channel1 instanceof DisposableChannel); MaterializedConfiguration config2 = provider.getConfiguration(); Channel channel2 = config2.getChannels().values().iterator().next(); assertTrue(channel2 instanceof DisposableChannel); assertNotSame(channel1, channel2); }
@Override public <VR> KTable<K, VR> mapValues(final ValueMapper<? super V, ? extends VR> mapper) { Objects.requireNonNull(mapper, "mapper can't be null"); return doMapValues(withKey(mapper), NamedInternal.empty(), null); }
@Test public void shouldNotAllowNullMapperOnMapValueWithKey() { assertThrows(NullPointerException.class, () -> table.mapValues((ValueMapperWithKey) null)); }
public static DateTimeFormatter timeFormatterWithOptionalMilliseconds() { // This is the .SSS part DateTimeParser ms = new DateTimeFormatterBuilder() .appendLiteral(".") .appendFractionOfSecond(1, 3) .toParser(); return new DateTimeFormatterBuilder() .append(DateTimeFormat.forPattern(ES_DATE_FORMAT_NO_MS).withZoneUTC()) .appendOptional(ms) .toFormatter(); }
@Test public void testTimeFormatterWithOptionalMilliseconds() { /* * We can actually consider this working if it does not throw parser exceptions. * Check the toString() representation to make sure though. (using startsWith() * to avoid problems on test systems in other time zones, that are not CEST and do * not end with a +02:00 or shit.) */ assertTrue(DateTime.parse("2013-09-15 02:21:02", Tools.timeFormatterWithOptionalMilliseconds()).toString().startsWith("2013-09-15T02:21:02.000")); assertTrue(DateTime.parse("2013-09-15 02:21:02.123", Tools.timeFormatterWithOptionalMilliseconds()).toString().startsWith("2013-09-15T02:21:02.123")); assertTrue(DateTime.parse("2013-09-15 02:21:02.12", Tools.timeFormatterWithOptionalMilliseconds()).toString().startsWith("2013-09-15T02:21:02.120")); assertTrue(DateTime.parse("2013-09-15 02:21:02.1", Tools.timeFormatterWithOptionalMilliseconds()).toString().startsWith("2013-09-15T02:21:02.100")); }
@Operation(summary = "Get single organization") @GetMapping(value = "name/{name}", produces = "application/json") @ResponseBody public Organization getByName(@PathVariable("name") String name) { return organizationService.getOrganizationByName(name); }
@Test public void organizationNameNotFound() { when(organizationServiceMock.getOrganizationByName(anyString())).thenThrow(NotFoundException.class); assertThrows(NotFoundException.class, () -> { controllerMock.getByName("test"); }); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { if(directory.isRoot()) { return new DeepBoxesListService().list(directory, listener); } if(containerService.isDeepbox(directory)) { // in DeepBox return new BoxesListService().list(directory, listener); } if(containerService.isBox(directory)) { // in Box return new BoxListService().list(directory, listener); } final String deepBoxNodeId = fileid.getDeepBoxNodeId(directory); final String boxNodeId = fileid.getBoxNodeId(directory); if(containerService.isThirdLevel(directory)) { // in Inbox/Documents/Trash // N.B. although Documents and Trash have a nodeId, calling the listFiles1/listTrash1 API with // parentNode may fail! if(containerService.isInInbox(directory)) { return new NodeListService(new Contents() { @Override public NodeContent getNodes(final int offset) throws ApiException { return new BoxRestControllerApi(session.getClient()).listQueue(deepBoxNodeId, boxNodeId, null, offset, chunksize, "displayName asc"); } }).list(directory, listener); } if(containerService.isInDocuments(directory)) { return new NodeListService(new Contents() { @Override public NodeContent getNodes(final int offset) throws ApiException { return new BoxRestControllerApi(session.getClient()).listFiles( deepBoxNodeId, boxNodeId, offset, chunksize, "displayName asc"); } }).list(directory, listener); } if(containerService.isInTrash(directory)) { return new NodeListService(new Contents() { @Override public NodeContent getNodes(final int offset) throws ApiException { return new BoxRestControllerApi(session.getClient()).listTrash( deepBoxNodeId, boxNodeId, offset, chunksize, "displayName asc"); } }).list(directory, listener); } } // in subfolder of Documents/Trash (Inbox has no subfolders) final String nodeId = fileid.getFileId(directory); if(containerService.isInTrash(directory)) { return new NodeListService(new Contents() { @Override public NodeContent getNodes(final int offset) throws ApiException { return new BoxRestControllerApi(session.getClient()).listTrash1( deepBoxNodeId, boxNodeId, nodeId, offset, chunksize, "displayName asc"); } }).list(directory, listener); } return new NodeListService(new Contents() { @Override public NodeContent getNodes(final int offset) throws ApiException { return new BoxRestControllerApi(session.getClient()).listFiles1( deepBoxNodeId, boxNodeId, nodeId, offset, chunksize, "displayName asc"); } }).list(directory, listener); }
@Test // In this test setting, we get a NodeId from Box REST API and have canListFilesRoot==true for the documents folder. // When listing the files for this nodId or when we try to get its NodeInfo, we get 403. // Still, subfolders of documents are accessible (by listing the boxes files, but without passing the nodeId). // See also comment at {@link DeepboxIdProvider#lookupDocumentsNodeId}. public void testNoListChildrenTrashInbox() throws Exception { final DeepboxIdProvider nodeid = new DeepboxIdProvider(session); final Path box = new Path("/ORG 1 - DeepBox Desktop App/ORG1:Box2", EnumSet.of(Path.Type.directory, Path.Type.volume)); final AttributedList<Path> list = new DeepboxListService(session, nodeid).list(box, new DisabledListProgressListener()); final String deepBoxNodeId = nodeid.getDeepBoxNodeId(box); final String boxNodeId = nodeid.getBoxNodeId(box); final BoxAccessPolicy boxPolicy = new BoxRestControllerApi(session.getClient()).getBox(deepBoxNodeId, boxNodeId).getBoxPolicy(); assertTrue(boxPolicy.isCanListFilesRoot()); assertFalse(boxPolicy.isCanAccessTrash()); assertFalse(boxPolicy.isCanListQueue()); final Path documents = new Path("/ORG 1 - DeepBox Desktop App/ORG1:Box2/Documents", EnumSet.of(Path.Type.directory, Path.Type.volume)); final String documentsNodeId = nodeid.getFileId(documents); final ApiException apiExceptionGetNodeInfo = assertThrows(ApiException.class, () -> new CoreRestControllerApi(session.getClient()).getNodeInfo(documentsNodeId, null, null, null)); assertEquals(403, apiExceptionGetNodeInfo.getCode()); final ApiException apiExceptionListFilestWithDocumentsNodeId = assertThrows(ApiException.class, () -> new BoxRestControllerApi(session.getClient()).listFiles1(deepBoxNodeId, boxNodeId, documentsNodeId, null, null, null)); assertEquals(403, apiExceptionListFilestWithDocumentsNodeId.getCode()); assertNotNull(list.find(new SimplePathPredicate(documents))); assertNull(list.find(new SimplePathPredicate(new Path("/ORG 1 - DeepBox Desktop App/ORG1:Box2/Inbox", EnumSet.of(Path.Type.directory, Path.Type.volume))))); assertNull(list.find(new SimplePathPredicate(new Path("/ORG 1 - DeepBox Desktop App/ORG1:Box2/Trash", EnumSet.of(Path.Type.directory, Path.Type.volume))))); }
public static IOException wrapException(final String path, final String methodName, final IOException exception) { if (exception instanceof InterruptedIOException || exception instanceof PathIOException) { return exception; } else { String msg = String .format("Failed with %s while processing file/directory :[%s] in " + "method:[%s]", exception.getClass().getName(), path, methodName); try { return wrapWithMessage(exception, msg); } catch (Exception ex) { // For subclasses which have no (String) constructor throw IOException // with wrapped message return new PathIOException(path, exception); } } }
@Test public void testWrapException() throws Exception { // Test for IOException with valid (String) constructor LambdaTestUtils.intercept(EOFException.class, "Failed with java.io.EOFException while processing file/directory " + ":[/tmp/abc.txt] in method:[testWrapException]", () -> { throw IOUtils.wrapException("/tmp/abc.txt", "testWrapException", new EOFException("EOFException ")); }); // Test for IOException with no (String) constructor PathIOException returnedEx = LambdaTestUtils .intercept(PathIOException.class, "Input/output error:", () -> { throw IOUtils.wrapException("/tmp/abc.txt", "testWrapEx", new CharacterCodingException()); }); assertEquals("/tmp/abc.txt", returnedEx.getPath().toString()); }
@Nullable @Override public Message decode(@Nonnull RawMessage rawMessage) { String s = new String(rawMessage.getPayload(), StandardCharsets.UTF_8); LOG.trace("Received raw message: {}", s); String timezoneID = configuration.getString(CK_TIMEZONE); // previously existing PA inputs after updating will not have a Time Zone configured, default to UTC DateTimeZone timezone = timezoneID != null ? DateTimeZone.forID(timezoneID) : DateTimeZone.UTC; LOG.trace("Configured time zone: {}", timezone); PaloAltoMessageBase p = parser.parse(s, timezone); // Return when error occurs parsing syslog header. if (p == null) { return null; } Message message = messageFactory.createMessage(p.payload(), p.source(), p.timestamp()); switch (p.panType()) { case "THREAT": final PaloAltoTypeParser parserThreat = new PaloAltoTypeParser(templates.getThreatMessageTemplate()); message.addFields(parserThreat.parseFields(p.fields(), timezone)); break; case "SYSTEM": final PaloAltoTypeParser parserSystem = new PaloAltoTypeParser(templates.getSystemMessageTemplate()); message.addFields(parserSystem.parseFields(p.fields(), timezone)); break; case "TRAFFIC": final PaloAltoTypeParser parserTraffic = new PaloAltoTypeParser(templates.getTrafficMessageTemplate()); message.addFields(parserTraffic.parseFields(p.fields(), timezone)); break; default: LOG.error("Unsupported PAN type [{}]. Not adding any parsed fields.", p.panType()); } LOG.trace("Successfully processed [{}] message with [{}] fields.", p.panType(), message.getFieldCount()); return message; }
@Test public void testAllSyslogFormats() { PaloAltoCodec codec = new PaloAltoCodec(Configuration.EMPTY_CONFIGURATION, messageFactory); Message message = codec.decode(new RawMessage(SYSLOG_THREAT_MESSAGE.getBytes(StandardCharsets.UTF_8))); assertEquals("THREAT", message.getField("type")); message = codec.decode(new RawMessage(SYSLOG_THREAT_MESSAGE_DOUBLE_SPACE_DATE.getBytes(StandardCharsets.UTF_8))); assertEquals("THREAT", message.getField("type")); message = codec.decode(new RawMessage(SYSLOG_THREAT_MESSAGE_NO_HOST.getBytes(StandardCharsets.UTF_8))); assertEquals("THREAT", message.getField("type")); message = codec.decode(new RawMessage(SYSLOG_THREAT_MESSAGE_NO_HOST_DOUBLE_SPACE_DATE.getBytes(StandardCharsets.UTF_8))); assertEquals("THREAT", message.getField("type")); }