focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static CellLocation toLocation(String locationRef) { final int x = colNameToIndex(locationRef); final int y = ReUtil.getFirstNumber(locationRef) - 1; return new CellLocation(x, y); }
@Test public void toLocationTest() { final CellLocation a11 = ExcelUtil.toLocation("A11"); assertEquals(0, a11.getX()); assertEquals(10, a11.getY()); }
public static SAXParserFactory newSecureSAXParserFactory() throws SAXException, ParserConfigurationException { SAXParserFactory spf = SAXParserFactory.newInstance(); spf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); spf.setFeature(DISALLOW_DOCTYPE_DECL, true); spf.setFeature(LOAD_EXTERNAL_DECL, false); spf.setFeature(EXTERNAL_GENERAL_ENTITIES, false); spf.setFeature(EXTERNAL_PARAMETER_ENTITIES, false); return spf; }
@Test(expected = SAXException.class) public void testEntityDtdWithSecureSAXParserFactory() throws Exception { SAXParser parser = XMLUtils.newSecureSAXParserFactory().newSAXParser(); try (InputStream stream = getResourceStream("/xml/entity-dtd.xml")) { parser.parse(stream, new DefaultHandler()); } }
@Override public void branchReport(String xid, long branchId, BranchStatus status, String applicationData) throws TransactionException { DefaultResourceManager.get().branchReport(BranchType.SAGA, xid, branchId, status, applicationData); }
@Test public void testBranchReport() { ResourceManager resourceManager = Mockito.mock(ResourceManager.class); Mockito.doNothing().when(resourceManager).registerResource(any(Resource.class)); DefaultResourceManager.get(); DefaultResourceManager.mockResourceManager(BranchType.SAGA, resourceManager); Assertions.assertDoesNotThrow(() -> sagaTransactionalTemplate.branchReport("", 0, BranchStatus.Unknown, "")); }
public static IdGenerator decrementingLongs() { AtomicLong longs = new AtomicLong(); return () -> Long.toString(longs.decrementAndGet()); }
@Test public void decrementingIndependent() { IdGenerator gen = IdGenerators.decrementingLongs(); IdGenerator otherGen = IdGenerators.decrementingLongs(); assertThat(gen.getId(), equalTo("-1")); assertThat(gen.getId(), equalTo("-2")); assertThat(otherGen.getId(), equalTo("-1")); }
public static Permission getPermission(String name, String serviceName, String... actions) { PermissionFactory permissionFactory = PERMISSION_FACTORY_MAP.get(serviceName); if (permissionFactory == null) { throw new IllegalArgumentException("No permissions found for service: " + serviceName); } return permissionFactory.create(name, actions); }
@Test public void getPermission_Topic() { Permission permission = ActionConstants.getPermission("foo", TopicService.SERVICE_NAME); assertNotNull(permission); assertTrue(permission instanceof TopicPermission); }
public <T> List<Class<? extends T>> scanForSubClassesInPackage(String packageName, Class<T> parentClass) { return scanForClassesInPackage(packageName, isSubClassOf(parentClass)) .stream() .map(aClass -> (Class<? extends T>) aClass.asSubclass(parentClass)) .collect(toList()); }
@Test void scanForSubClassesInNonExistingPackage() { List<Class<? extends ExampleInterface>> classes = scanner .scanForSubClassesInPackage("io.cucumber.core.resource.does.not.exist", ExampleInterface.class); assertThat(classes, empty()); }
public void applyConfig(ClientBwListDTO configDTO) { requireNonNull(configDTO, "Client filtering config must not be null"); requireNonNull(configDTO.mode, "Config mode must not be null"); requireNonNull(configDTO.entries, "Config entries must not be null"); ClientSelector selector; switch (configDTO.mode) { case DISABLED: selector = ClientSelectors.any(); break; case WHITELIST: selector = createSelector(configDTO.entries); break; case BLACKLIST: selector = ClientSelectors.inverse(createSelector(configDTO.entries)); break; default: throw new IllegalArgumentException("Unknown client B/W list mode: " + configDTO.mode); } clientEngine.applySelector(selector); }
@Test public void testApplyConfig_nullMode_throws() { assertThrows(NullPointerException.class, () -> handler.applyConfig(createConfig(null))); }
@Override public byte[] evaluateResponse(byte[] responseBytes) throws SaslAuthenticationException { /* * Message format (from https://tools.ietf.org/html/rfc4616): * * message = [authzid] UTF8NUL authcid UTF8NUL passwd * authcid = 1*SAFE ; MUST accept up to 255 octets * authzid = 1*SAFE ; MUST accept up to 255 octets * passwd = 1*SAFE ; MUST accept up to 255 octets * UTF8NUL = %x00 ; UTF-8 encoded NUL character * * SAFE = UTF1 / UTF2 / UTF3 / UTF4 * ;; any UTF-8 encoded Unicode character except NUL */ String response = new String(responseBytes, StandardCharsets.UTF_8); List<String> tokens = extractTokens(response); String authorizationIdFromClient = tokens.get(0); String username = tokens.get(1); String password = tokens.get(2); if (username.isEmpty()) { throw new SaslAuthenticationException("Authentication failed: username not specified"); } if (password.isEmpty()) { throw new SaslAuthenticationException("Authentication failed: password not specified"); } NameCallback nameCallback = new NameCallback("username", username); PlainAuthenticateCallback authenticateCallback = new PlainAuthenticateCallback(password.toCharArray()); try { callbackHandler.handle(new Callback[]{nameCallback, authenticateCallback}); } catch (Throwable e) { throw new SaslAuthenticationException("Authentication failed: credentials for user could not be verified", e); } if (!authenticateCallback.authenticated()) throw new SaslAuthenticationException("Authentication failed: Invalid username or password"); if (!authorizationIdFromClient.isEmpty() && !authorizationIdFromClient.equals(username)) throw new SaslAuthenticationException("Authentication failed: Client requested an authorization id that is different from username"); this.authorizationId = username; complete = true; return new byte[0]; }
@Test public void authorizationIdEqualsAuthenticationId() { byte[] nextChallenge = saslServer.evaluateResponse(saslMessage(USER_A, USER_A, PASSWORD_A)); assertEquals(0, nextChallenge.length); }
@PublicAPI(usage = ACCESS) public static <T extends Comparable<T>> DescribedPredicate<T> lessThan(T value) { return new LessThanPredicate<>(value); }
@Test public void lessThan_works() { assertThat(lessThan(4)) .accepts(3) .hasDescription("less than '4'") .rejects(4) .rejects(5); assertThat(lessThan(Foo.SECOND)) .accepts(Foo.FIRST) .rejects(Foo.SECOND) .rejects(Foo.THIRD); }
@Override protected SchemaTransform from(FileWriteSchemaTransformConfiguration configuration) { return new FileWriteSchemaTransform(configuration); }
@Test public void formatMapsToFileWriteSchemaFormatTransform() { FileWriteSchemaTransformFormatProvider avroFormatProvider = ((FileWriteSchemaTransform) PROVIDER.from(defaultConfiguration().setFormat(AVRO).build())) .getProvider(); assertTrue(avroFormatProvider instanceof AvroWriteSchemaTransformFormatProvider); FileWriteSchemaTransformFormatProvider jsonFormatProvider = ((FileWriteSchemaTransform) PROVIDER.from(defaultConfiguration().setFormat(JSON).build())) .getProvider(); assertTrue(jsonFormatProvider instanceof JsonWriteSchemaTransformFormatProvider); FileWriteSchemaTransformFormatProvider parquetFormatProvider = ((FileWriteSchemaTransform) PROVIDER.from(defaultConfiguration().setFormat(PARQUET).build())) .getProvider(); assertTrue(parquetFormatProvider instanceof ParquetWriteSchemaTransformFormatProvider); FileWriteSchemaTransformFormatProvider xmlFormatProvider = ((FileWriteSchemaTransform) PROVIDER.from(defaultConfiguration().setFormat(XML).build())) .getProvider(); assertTrue(xmlFormatProvider instanceof XmlWriteSchemaTransformFormatProvider); }
@Override public Set<V> readUnion(String... names) { return get(readUnionAsync(names)); }
@Test public void testReadUnion() { RSet<Integer> set = redisson.getSet("set"); set.add(5); set.add(6); RSet<Integer> set1 = redisson.getSet("set1"); set1.add(1); set1.add(2); RSet<Integer> set2 = redisson.getSet("set2"); set2.add(3); set2.add(4); assertThat(set.readUnion("set1", "set2")).containsOnly(1, 2, 3, 4, 5, 6); assertThat(set).containsOnly(5, 6); }
public static String getStackTrace(Throwable throwable, long maxLines) { StringWriter writer = new StringWriter(); PrintWriter pw = new PrintWriter(writer, true); throwable.printStackTrace(pw); String[] lines = writer.toString().split(NEWLINE); StringBuilder sb = new StringBuilder(); for (int i = 0; i < Math.min(lines.length, maxLines); i++) { if (i > 0) { sb.append(NEWLINE); } sb.append(lines[i]); } return sb.toString(); }
@Test public void testExceptionStackTrace() { try { throw new Exception("error happened!!"); } catch (Exception e) { assertEquals(0, ExceptionHelper.getStackTrace(e, 0).length()); assertTrue( ExceptionHelper.getStackTrace(e, 1).startsWith("java.lang.Exception: error happened!!")); assertTrue(ExceptionHelper.getStackTrace(e, 2000).length() > 100); } }
public void removeSCM(String id) { SCM scmToBeDeleted = this.find(id); if (scmToBeDeleted == null) { throw new RuntimeException(String.format("Could not find SCM with id '%s'", id)); } this.remove(scmToBeDeleted); }
@Test void shouldRemoveSCMById() { SCM scm1 = SCMMother.create("id1"); SCM scm2 = SCMMother.create("id2"); SCMs scms = new SCMs(scm1, scm2); scms.removeSCM("id1"); assertThat(scms).containsExactly(scm2); }
public void addCve(String cve) { this.cve.add(cve); }
@Test @SuppressWarnings("squid:S2699") public void testAddCve() { //already tested, this is just left so the IDE doesn't recreate it. }
@Override public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return true; } try { new SFTPAttributesFinderFeature(session).find(file, listener); return true; } catch(NotfoundException e) { // We expect SSH_FXP_STATUS if the file is not found return false; } }
@Test public void testFindRoot() throws Exception { assertTrue(new SFTPFindFeature(session).find(new Path("/", EnumSet.of(Path.Type.directory)))); }
@Override public boolean retainAll(Collection<?> collection) { // Using this implementation from the Android ArrayList since the Java 1.8 ArrayList // doesn't call through to remove. Calling through to remove lets us leverage the notification // done there boolean result = false; Iterator<?> it = iterator(); while (it.hasNext()) { if (!collection.contains(it.next())) { it.remove(); result = true; } } return result; }
@Test public void testRetainAll() { List<EpoxyModel<?>> modelsToRetain = new ArrayList<>(); modelsToRetain.add(modelList.get(0)); modelList.retainAll(modelsToRetain); verify(observer, times(2)).onItemRangeRemoved(1, 1); }
public boolean poll(Timer timer, boolean waitForJoinGroup) { maybeUpdateSubscriptionMetadata(); invokeCompletedOffsetCommitCallbacks(); if (subscriptions.hasAutoAssignedPartitions()) { if (protocol == null) { throw new IllegalStateException("User configured " + ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG + " to empty while trying to subscribe for group protocol to auto assign partitions"); } // Always update the heartbeat last poll time so that the heartbeat thread does not leave the // group proactively due to application inactivity even if (say) the coordinator cannot be found. pollHeartbeat(timer.currentTimeMs()); if (coordinatorUnknownAndUnreadySync(timer)) { return false; } if (rejoinNeededOrPending()) { // due to a race condition between the initial metadata fetch and the initial rebalance, // we need to ensure that the metadata is fresh before joining initially. This ensures // that we have matched the pattern against the cluster's topics at least once before joining. if (subscriptions.hasPatternSubscription()) { // For consumer group that uses pattern-based subscription, after a topic is created, // any consumer that discovers the topic after metadata refresh can trigger rebalance // across the entire consumer group. Multiple rebalances can be triggered after one topic // creation if consumers refresh metadata at vastly different times. We can significantly // reduce the number of rebalances caused by single topic creation by asking consumer to // refresh metadata before re-joining the group as long as the refresh backoff time has // passed. if (this.metadata.timeToAllowUpdate(timer.currentTimeMs()) == 0) { this.metadata.requestUpdate(true); } if (!client.ensureFreshMetadata(timer)) { return false; } maybeUpdateSubscriptionMetadata(); } // if not wait for join group, we would just use a timer of 0 if (!ensureActiveGroup(waitForJoinGroup ? timer : time.timer(0L))) { // since we may use a different timer in the callee, we'd still need // to update the original timer's current time after the call timer.update(time.milliseconds()); return false; } } } else { // For manually assigned partitions, we do not try to pro-actively lookup coordinator; // instead we only try to refresh metadata when necessary. // If connections to all nodes fail, wakeups triggered while attempting to send fetch // requests result in polls returning immediately, causing a tight loop of polls. Without // the wakeup, poll() with no channels would block for the timeout, delaying re-connection. // awaitMetadataUpdate() in ensureCoordinatorReady initiates new connections with configured backoff and avoids the busy loop. if (metadata.updateRequested() && !client.hasReadyNodes(timer.currentTimeMs())) { client.awaitMetadataUpdate(timer); } // if there is pending coordinator requests, ensure they have a chance to be transmitted. client.pollNoWakeup(); } maybeAutoCommitOffsetsAsync(timer.currentTimeMs()); return true; }
@Test public void testJoinGroupInvalidGroupId() { final String consumerId = "leader"; subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener)); // ensure metadata is up-to-date for leader client.updateMetadata(metadataResponse); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); client.prepareResponse(joinGroupLeaderResponse(0, consumerId, Collections.emptyMap(), Errors.INVALID_GROUP_ID)); assertThrows(ApiException.class, () -> coordinator.poll(time.timer(Long.MAX_VALUE))); }
File putIfAbsent(String userId, boolean saveToDisk) throws IOException { String idKey = getIdStrategy().keyFor(userId); String directoryName = idToDirectoryNameMap.get(idKey); File directory = null; if (directoryName == null) { synchronized (this) { directoryName = idToDirectoryNameMap.get(idKey); if (directoryName == null) { directory = createDirectoryForNewUser(userId); directoryName = directory.getName(); idToDirectoryNameMap.put(idKey, directoryName); if (saveToDisk) { save(); } } } } return directory == null ? new File(usersDirectory, directoryName) : directory; }
@Test public void testDirectoryFormatLongerUserId() throws IOException { UserIdMapper mapper = createUserIdMapper(IdStrategy.CASE_INSENSITIVE); String user1 = "muchlongeruserid"; File directory1 = mapper.putIfAbsent(user1, true); assertThat(directory1.getName(), startsWith("muchlongeruser_")); }
@Override public TapiNepRef getNepRef(TapiNepRef nepRef) throws NoSuchElementException { updateCache(); TapiNepRef ret = null; try { ret = tapiNepRefList.stream() .filter(nepRef::equals) .findFirst().get(); } catch (NoSuchElementException e) { log.error("Nep not found of {}", nepRef); throw e; } return ret; }
@Test(expected = NoSuchElementException.class) public void testGetNepRefWithConnectPointWhenEmpty() { tapiResolver.getNepRef(cp); }
public static void assertThatClassIsUtility(Class<?> clazz) { final UtilityClassChecker checker = new UtilityClassChecker(); if (!checker.isProperlyDefinedUtilityClass(clazz)) { final Description toDescription = new StringDescription(); final Description mismatchDescription = new StringDescription(); checker.describeTo(toDescription); checker.describeMismatch(mismatchDescription); final String reason = "\n" + "Expected: is \"" + toDescription.toString() + "\"\n" + " but : was \"" + mismatchDescription.toString() + "\""; throw new AssertionError(reason); } }
@Test public void testNonFinalClass() throws Exception { boolean gotException = false; try { assertThatClassIsUtility(NonFinal.class); } catch (AssertionError assertion) { assertThat(assertion.getMessage(), containsString("is not final")); gotException = true; } assertThat(gotException, is(true)); }
public void findIntersections(Rectangle query, Consumer<T> consumer) { IntArrayList todoNodes = new IntArrayList(levelOffsets.length * degree); IntArrayList todoLevels = new IntArrayList(levelOffsets.length * degree); int rootLevel = levelOffsets.length - 1; int rootIndex = levelOffsets[rootLevel]; if (doesIntersect(query, rootIndex)) { todoNodes.push(rootIndex); todoLevels.push(rootLevel); } while (!todoNodes.isEmpty()) { int nodeIndex = todoNodes.popInt(); int level = todoLevels.popInt(); if (level == 0) { // This is a leaf node consumer.accept(items[nodeIndex / ENVELOPE_SIZE]); } else { int childrenOffset = getChildrenOffset(nodeIndex, level); for (int i = 0; i < degree; i++) { int childIndex = childrenOffset + ENVELOPE_SIZE * i; if (doesIntersect(query, childIndex)) { todoNodes.push(childIndex); todoLevels.push(level - 1); } } } } }
@Test public void testOctagonQuery() { OGCGeometryWrapper octagonA = new OGCGeometryWrapper(POLYGON_A); OGCGeometryWrapper octagonB = new OGCGeometryWrapper(POLYGON_B); OGCGeometryWrapper pointX = new OGCGeometryWrapper(POINT_X); OGCGeometryWrapper pointY = new OGCGeometryWrapper(POINT_Y); OGCGeometryWrapper pointZ = new OGCGeometryWrapper(POINT_Z); OGCGeometryWrapper pointW = new OGCGeometryWrapper(POINT_W); Flatbush<OGCGeometryWrapper> rtree = new Flatbush<>(new OGCGeometryWrapper[] {pointX, pointY, pointZ, pointW}); List<OGCGeometryWrapper> resultsA = findIntersections(rtree, octagonA.getExtent()); assertEqualsSorted(resultsA, ImmutableList.of(pointX, pointY), Comparator.naturalOrder()); List<OGCGeometryWrapper> resultsB = findIntersections(rtree, octagonB.getExtent()); assertEqualsSorted(resultsB, ImmutableList.of(pointY, pointZ), Comparator.naturalOrder()); }
public static String partitionsToLogString(Collection<TopicIdPartition> partitions, Boolean traceEnabled) { if (traceEnabled) { return String.format("( %s )", String.join(", ", partitions.toString())); } return String.format("%s partition(s)", partitions.size()); }
@Test public void testPartitionsToLogStringEmpty() { String response = ShareSession.partitionsToLogString(Collections.emptyList(), false); assertEquals("0 partition(s)", response); response = ShareSession.partitionsToLogString(Collections.emptyList(), true); assertEquals("( [] )", response); }
public static Permission getPermission(String name, String serviceName, String... actions) { PermissionFactory permissionFactory = PERMISSION_FACTORY_MAP.get(serviceName); if (permissionFactory == null) { throw new IllegalArgumentException("No permissions found for service: " + serviceName); } return permissionFactory.create(name, actions); }
@Test public void getPermission_Lock() { Permission permission = ActionConstants.getPermission("foo", LockSupportService.SERVICE_NAME); assertNotNull(permission); assertTrue(permission instanceof LockPermission); }
@Override public List<String> getServerList() { return serverList.isEmpty() ? serversFromEndpoint : serverList; }
@Test void testConstructWithEndpointAndTimedNotNeedRefresh() throws Exception { Properties properties = new Properties(); properties.put(PropertyKeyConst.ENDPOINT, "127.0.0.1"); serverListManager = new ServerListManager(properties); List<String> serverList = serverListManager.getServerList(); assertEquals(1, serverList.size()); assertEquals("127.0.0.1:8848", serverList.get(0)); httpRestResult.setData("127.0.0.1:8848\n127.0.0.1:8948"); mockThreadInvoke(serverListManager, false); serverList = serverListManager.getServerList(); assertEquals(1, serverList.size()); assertEquals("127.0.0.1:8848", serverList.get(0)); }
public void updateInstanceMetadata(Service service, String metadataId, InstanceMetadata instanceMetadata) { MetadataOperation<InstanceMetadata> operation = buildMetadataOperation(service); operation.setTag(metadataId); operation.setMetadata(instanceMetadata); WriteRequest operationLog = WriteRequest.newBuilder().setGroup(Constants.INSTANCE_METADATA) .setOperation(DataOperation.CHANGE.name()).setData(ByteString.copyFrom(serializer.serialize(operation))) .build(); submitMetadataOperation(operationLog); }
@Test void testUpdateInstanceMetadata() { assertThrows(NacosRuntimeException.class, () -> { String metadataId = "metadataId"; InstanceMetadata instanceMetadata = new InstanceMetadata(); namingMetadataOperateService.updateInstanceMetadata(service, metadataId, instanceMetadata); Mockito.verify(service).getNamespace(); Mockito.verify(service).getGroup(); Mockito.verify(service).getName(); }); }
public static Double interpolateCourse(Double c1, Double c2, double fraction) { if (c1 == null || c2 == null) { return null; } checkArgument(VALID_COURSE_RANGE.contains(c1), "The 1st course: " + c1 + " is not in range"); checkArgument(VALID_COURSE_RANGE.contains(c2), "The 2nd course: " + c2 + " is not in range"); checkArgument(VALID_FRACTION_RANGE.contains(fraction), "The fraction: " + fraction + " is not in range"); double angleDelta = Spherical.angleDifference(c2, c1); Double course = c1 + interpolate(0.0, angleDelta, fraction); return Spherical.mod(course, 360.0d); }
@Test public void testInterpolateCourseOnBadInput2() { try { interpolateCourse(5.0, 10.0, -0.1); fail("Should not work because fraction -0.1 is out of range"); } catch (IllegalArgumentException iae) { assertTrue(iae.getMessage().contains("The fraction: ")); } }
public Materialization create( final StreamsMaterialization delegate, final MaterializationInfo info, final QueryId queryId, final QueryContext.Stacker contextStacker ) { final TransformVisitor transformVisitor = new TransformVisitor(queryId, contextStacker); final List<Transform> transforms = info .getTransforms() .stream() .map(xform -> xform.visit(transformVisitor)) .collect(Collectors.toList()); return materializationFactory.create( delegate, info.getSchema(), transforms ); }
@Test public void shouldReturnMaterialization() { // Given: final KsqlMaterialization ksqlMaterialization = mock(KsqlMaterialization.class); when(materializationFactory.create(any(), any(), any())) .thenReturn(ksqlMaterialization); // When: final Materialization result = factory .create(materialization, info, queryId, contextStacker); // Then: assertThat(result, is(ksqlMaterialization)); }
@Override public void publish() { checkNotPublished(); if (!metadataWritten.get()) { throw new IllegalStateException("Metadata is missing"); } File zip = tempFolder.newFile(); FILES2.zipDir(rootDir, zip); File targetZip = projectExportDumpFS.exportDumpOf(descriptor); FILES2.deleteIfExists(targetZip); FILES2.moveFile(zip, targetZip); FILES2.deleteIfExists(rootDir); LoggerFactory.getLogger(getClass()).info("Dump file published | size={} | path={}", humanReadableByteCountSI(sizeOf(targetZip)), targetZip.getAbsolutePath()); published.set(true); }
@Test public void publish_fails_if_metadata_is_missing() { assertThatThrownBy(() -> underTest.publish()) .isInstanceOf(IllegalStateException.class) .hasMessage("Metadata is missing"); }
@Override Map<KeyValueSegment, WriteBatch> getWriteBatches(final Collection<ConsumerRecord<byte[], byte[]>> records) { final Map<KeyValueSegment, WriteBatch> writeBatchMap = new HashMap<>(); for (final ConsumerRecord<byte[], byte[]> record : records) { final long timestamp = WindowKeySchema.extractStoreTimestamp(record.key()); observedStreamTime = Math.max(observedStreamTime, timestamp); minTimestamp = Math.min(minTimestamp, timestamp); final long segmentId = segments.segmentId(timestamp); final KeyValueSegment segment = segments.getOrCreateSegmentIfLive(segmentId, context, observedStreamTime); if (segment != null) { //null segment is if it has expired, so we don't want those records ChangelogRecordDeserializationHelper.applyChecksAndUpdatePosition( record, consistencyEnabled, position ); try { final WriteBatch batch = writeBatchMap.computeIfAbsent(segment, s -> new WriteBatch()); final byte[] baseKey = TimeFirstWindowKeySchema.fromNonPrefixWindowKey(record.key()); segment.addToBatch(new KeyValue<>(baseKey, record.value()), batch); } catch (final RocksDBException e) { throw new ProcessorStateException("Error restoring batch to store " + name(), e); } } } return writeBatchMap; }
@Test public void shouldCreateWriteBatches() { final String key = "a"; final Collection<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>(); records.add(new ConsumerRecord<>("", 0, 0L, serializeKey(key, 0, 0L).get(), serializeValue(50L))); records.add(new ConsumerRecord<>("", 0, 0L, serializeKey(key, 1, 1L).get(), serializeValue(100L))); final Map<KeyValueSegment, WriteBatch> writeBatchMap = bytesStore.getWriteBatches(records); assertEquals(1, writeBatchMap.size()); for (final WriteBatch batch : writeBatchMap.values()) { assertEquals(2, batch.count()); } }
@Override public List<ControllerInfo> getControllers() { DriverHandler handler = handler(); OvsdbClientService clientService = getOvsdbClientService(handler); Set<ControllerInfo> controllers = clientService.getControllers( handler().data().deviceId()); return new ArrayList<>(controllers); }
@Test public void testGetControllers() throws Exception { // DriverService driverService = new Driv // AbstractBehaviour ab = new AbstractBehaviour(); // DriverHandler handler = handler(); // List<ControllerInfo> controllersList = // controllerConfig.getControllers(DeviceId.deviceId("0000000000000018")); // log.info("controllers " + controllersList); }
public static Map<String, String> resolveAttachments(Object invocation, boolean isApache) { if (invocation == null) { return Collections.emptyMap(); } final Map<String, String> attachments = new HashMap<>(); if (isApache) { attachments.putAll(getAttachmentsFromContext(APACHE_RPC_CONTEXT)); } else { attachments.putAll(getAttachmentsFromContext(ALIBABA_RPC_CONTEXT)); } final Optional<Object> fieldValue = ReflectUtils.getFieldValue(invocation, ATTACHMENTS_FIELD); if (fieldValue.isPresent() && fieldValue.get() instanceof Map) { attachments.putAll((Map<String, String>) fieldValue.get()); } return Collections.unmodifiableMap(attachments); }
@Test public void testStringAttachments() { final TestStringInvocation testStringInvocation = new TestStringInvocation(buildAttachments()); final Map<String, String> attachmentsByString = DubboAttachmentsHelper .resolveAttachments(testStringInvocation, false); Assert.assertEquals(attachmentsByString, testStringInvocation.attachments); final Map<String, String> attachmentsByString2 = DubboAttachmentsHelper .resolveAttachments(testStringInvocation, true); Assert.assertEquals(attachmentsByString2, testStringInvocation.attachments); }
public <T> HttpResponse<T> httpRequest(String url, String method, HttpHeaders headers, Object requestBodyData, TypeReference<T> responseFormat) { return httpRequest(url, method, headers, requestBodyData, responseFormat, null, null); }
@Test public void testUseSslConfigsOnlyWhenNecessary() throws Exception { int statusCode = Response.Status.OK.getStatusCode(); Request req = mock(Request.class); ContentResponse resp = mock(ContentResponse.class); when(resp.getContentAsString()).thenReturn(toJsonString(TEST_DTO)); setupHttpClient(statusCode, req, resp); assertDoesNotThrow(() -> httpRequest( httpClient, MOCK_URL, TEST_METHOD, TEST_TYPE, TEST_SIGNATURE_ALGORITHM )); String httpsUrl = "https://localhost:1234/api/endpoint"; RestClient client = spy(new RestClient(null)); assertThrows(RuntimeException.class, () -> client.httpRequest( httpsUrl, TEST_METHOD, null, TEST_DTO, TEST_TYPE, MOCK_SECRET_KEY, TEST_SIGNATURE_ALGORITHM )); }
public Map<Integer, Slice> toKeyMap(List<OrcType> types, List<HiveColumnHandle> physicalColumnHandles) { Map<String, Integer> columnIndexMap = physicalColumnHandles.stream() .collect(toImmutableMap(HiveColumnHandle::getName, HiveColumnHandle::getHiveColumnIndex)); return toKeyMap(types, columnIndexMap); }
@Test(expectedExceptions = PrestoException.class) public void testInvalidKeyMap() { DwrfEncryptionMetadata dwrfEncryptionMetadata = new DwrfEncryptionMetadata(ImmutableMap.of("c1", "abcd".getBytes()), ImmutableMap.of(), "test_algo", "test_provider"); List<HiveColumnHandle> columnHandleList = ImmutableList.of( new HiveColumnHandle("column1", HIVE_INT, TypeSignature.parseTypeSignature(BIGINT), 0, HiveColumnHandle.ColumnType.REGULAR, Optional.empty(), Optional.empty())); List<OrcType> orcTypes = ImmutableList.of( new OrcType(OrcType.OrcTypeKind.STRUCT, ImmutableList.of(1), ImmutableList.of("column1"), Optional.empty(), Optional.empty(), Optional.empty()), new OrcType(OrcType.OrcTypeKind.INT, ImmutableList.of(), ImmutableList.of(), Optional.empty(), Optional.empty(), Optional.empty())); dwrfEncryptionMetadata.toKeyMap(orcTypes, columnHandleList); }
public static <T> boolean isEmpty(T[] array) { return array == null || array.length == 0; }
@Test public void isEmptyTest() { int[] a = {}; assertTrue(ArrayUtil.isEmpty(a)); assertTrue(ArrayUtil.isEmpty((Object) a)); int[] b = null; //noinspection ConstantConditions assertTrue(ArrayUtil.isEmpty(b)); Object c = null; //noinspection ConstantConditions assertTrue(ArrayUtil.isEmpty(c)); Object d = new Object[]{"1", "2", 3, 4D}; boolean isEmpty = ArrayUtil.isEmpty(d); assertFalse(isEmpty); d = new Object[0]; isEmpty = ArrayUtil.isEmpty(d); assertTrue(isEmpty); d = null; //noinspection ConstantConditions isEmpty = ArrayUtil.isEmpty(d); //noinspection ConstantConditions assertTrue(isEmpty); // Object数组 Object[] e = new Object[]{"1", "2", 3, 4D}; final boolean empty = ArrayUtil.isEmpty(e); assertFalse(empty); }
public ClusterState getState() { LockGuard stateLock = getStateLock(); return stateLock.isLocked() ? ClusterState.IN_TRANSITION : state; }
@Test public void test_defaultState() { assertEquals(ACTIVE, clusterStateManager.getState()); }
@Override public final int hashCode() { return toURI().hashCode(); }
@Test public void testHashCode() throws IOException { String parentFolder = "parentFolder"; File parentFile = tmp.newFolder(parentFolder); String child = "child"; File childFile = new File(parentFile, child); VirtualFile vf = new VirtualFileMinimalImplementation(childFile); assertThat(vf.hashCode(), is(childFile.toURI().hashCode())); }
@SuppressWarnings("unchecked") public static <S, F> S visit(final Schema schema, final Visitor<S, F> visitor) { final BiFunction<Visitor<?, ?>, Schema, Object> handler = HANDLER.get(schema.type()); if (handler == null) { throw new UnsupportedOperationException("Unsupported schema type: " + schema.type()); } return (S) handler.apply(visitor, schema); }
@Test public void shouldVisitInt64() { // Given: final Schema schema = Schema.OPTIONAL_INT64_SCHEMA; when(visitor.visitInt64(any())).thenReturn("Expected"); // When: final String result = SchemaWalker.visit(schema, visitor); // Then: verify(visitor).visitInt64(same(schema)); assertThat(result, is("Expected")); }
@Override public void touch(final Local file) throws AccessDeniedException { try { try { Files.createFile(Paths.get(file.getAbsolute())); } catch(NoSuchFileException e) { final Local parent = file.getParent(); new DefaultLocalDirectoryFeature().mkdir(parent); if(log.isDebugEnabled()) { log.debug(String.format("Created folder %s", parent)); } Files.createFile(Paths.get(file.getAbsolute())); } catch(FileAlreadyExistsException e) { log.warn(String.format("File %s already exists", file)); throw new LocalAccessDeniedException(MessageFormat.format( LocaleFactory.localizedString("Cannot create {0}", "Error"), file.getAbsolute()), e); } } catch(IOException e) { throw new LocalAccessDeniedException(MessageFormat.format( LocaleFactory.localizedString("Cannot create {0}", "Error"), file.getAbsolute()), e); } if(log.isDebugEnabled()) { log.debug(String.format("Created file %s", file)); } }
@Test public void testFailure() { Local l = new Local("/" + UUID.randomUUID().toString()); final DefaultLocalTouchFeature f = new DefaultLocalTouchFeature(); try { f.touch(l); } catch(AccessDeniedException e) { final String s = l.getName(); assertEquals("Cannot create " + PreferencesFactory.get().getProperty("local.delimiter") + s + ". Please verify disk permissions.", e.getDetail()); assertEquals("Access denied", e.getMessage()); } }
@Override public boolean equals(Object o) { if (!(o instanceof ClusterMessage)) { return false; } ClusterMessage that = (ClusterMessage) o; return Objects.equals(this.sender, that.sender) && Objects.equals(this.subject, that.subject) && Arrays.equals(this.payload, that.payload); }
@Test public void testEquals() { new EqualsTester() .addEqualityGroup(message1, sameAsMessage1) .addEqualityGroup(message2) .addEqualityGroup(message3) .testEquals(); }
@Override protected Object getContent(ScmGetRequest request) { GithubScm.validateUserHasPushPermission(request.getApiUrl(), request.getCredentials().getPassword().getPlainText(), request.getOwner(), request.getRepo()); String url = String.format("%s/repos/%s/%s/contents/%s", request.getApiUrl(), request.getOwner(), request.getRepo(), request.getPath()); if(request.getBranch() != null){ //if branch is present fetch this file from branch url += "?ref="+request.getBranch(); } try { Map ghContent = HttpRequest.get(url) .withAuthorizationToken(request.getCredentials().getPassword().getPlainText()) .to(Map.class); if(ghContent == null){ throw new ServiceException.UnexpectedErrorException("Failed to load file: "+request.getPath()); } String base64Data = (String)ghContent.get("content"); // JENKINS-47887 - this content contains \n which breaks IE11 base64Data = base64Data == null ? null : base64Data.replace("\n", ""); return new GithubFile(new GitContent.Builder() .sha((String)ghContent.get("sha")) .name((String)ghContent.get("name")) .repo(request.getRepo()) .owner(request.getOwner()) .path(request.getPath()) .base64Data(base64Data) .build()); } catch (IOException e) { throw new ServiceException.UnexpectedErrorException(String.format("Failed to load file %s: %s", request.getPath(),e.getMessage()), e); } }
@Test public void getContentForMbp() throws UnirestException { String credentialId = createGithubCredential(user); StaplerRequest staplerRequest = mockStapler(); MultiBranchProject mbp = mockMbp(credentialId, user, GithubScm.DOMAIN_NAME); GithubFile content = (GithubFile) new GithubScmContentProvider().getContent(staplerRequest, mbp); assertEquals("Jenkinsfile", content.getContent().getName()); assertEquals("e23b8ef5c2c4244889bf94db6c05cc08ea138aef", content.getContent().getSha()); assertEquals("PR-demo", content.getContent().getRepo()); assertEquals("cloudbeers", content.getContent().getOwner()); }
public static StatementExecutorResponse validate( final ConfiguredStatement<CreateConnector> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { final CreateConnector createConnector = statement.getStatement(); final ConnectClient client = serviceContext.getConnectClient(); if (checkForExistingConnector(statement, createConnector, client)) { final String errorMsg = String.format( "Connector %s already exists", createConnector.getName()); throw new KsqlRestException(EndpointResponse.create() .status(HttpStatus.SC_CONFLICT) .entity(new KsqlErrorMessage(Errors.toErrorCode(HttpStatus.SC_CONFLICT), errorMsg)) .build() ); } final List<String> errors = validateConfigs(createConnector, client); if (!errors.isEmpty()) { final String errorMessage = "Validation error: " + String.join("\n", errors); throw new KsqlException(errorMessage); } return StatementExecutorResponse.handled(Optional.of(new CreateConnectorEntity( statement.getMaskedStatementText(), DUMMY_CREATE_RESPONSE ))); }
@Test public void shouldThrowIfConnectorTypeIsMissing() { // Given: final CreateConnector createConnectorMissingType = new CreateConnector( "connector-name", ImmutableMap.of("foo", new StringLiteral("bar")), Type.SOURCE, false); final ConfiguredStatement<CreateConnector> createConnectorMissingTypeConfigured = ConfiguredStatement.of(PreparedStatement.of( "CREATE SOURCE CONNECTOR foo WITH ('foo'='bar');", createConnectorMissingType), SessionConfig.of(CONFIG, ImmutableMap.of())); // When: final KsqlException e = assertThrows( KsqlException.class, () -> ConnectExecutor.validate(createConnectorMissingTypeConfigured, mock(SessionProperties.class), null, serviceContext)); // Then: assertThat(e.getMessage(), is("Validation error: " + "Connector config {name=connector-name, foo=bar} contains no connector type")); }
@Override public String toString() { return toStringHelper(getClass()) .add("vni", Integer.toString(getVni())) .add("flags", Byte.toString(getFlag())) .toString(); }
@Test public void testToStringVXLAN() throws Exception { VXLAN vxlan = deserializer.deserialize(BYTE_PACKET_VXLAN, 0, BYTE_PACKET_VXLAN.length); String str = vxlan.toString(); assertTrue(StringUtils.contains(str, "flags=" + TEST_FLAGS)); assertTrue(StringUtils.contains(str, "vni=" + TEST_VNI1)); }
@Override public void check(Collection<? extends T> collection, ConditionEvents events) { ViolatedAndSatisfiedConditionEvents subEvents = new ViolatedAndSatisfiedConditionEvents(); for (T item : collection) { condition.check(item, subEvents); } if (!subEvents.getAllowed().isEmpty() || !subEvents.getViolating().isEmpty()) { events.add(new OnlyConditionEvent(collection, subEvents)); } }
@Test public void satisfied_works_and_description_contains_mismatches() { ConditionEvents events = ConditionEvents.Factory.create(); containOnlyElementsThat(IS_SERIALIZABLE).check(ONE_SERIALIZABLE_AND_ONE_NON_SERIALIZABLE_OBJECT, events); assertThat(events).containViolations(isSerializableMessageFor(Object.class)); events = ConditionEvents.Factory.create(); containOnlyElementsThat(IS_SERIALIZABLE).check(TWO_SERIALIZABLE_OBJECTS, events); assertThat(events).containNoViolation(); }
@Override public String authenticate(AuthenticationDataSource authData) throws AuthenticationException { String token; try { // Get Token token = getToken(authData); } catch (AuthenticationException exception) { incrementFailureMetric(ErrorCode.INVALID_AUTH_DATA); throw exception; } // Parse Token by validating String role = getPrincipal(authenticateToken(token)); AuthenticationMetrics.authenticateSuccess(getClass().getSimpleName(), getAuthMethodName()); return role; }
@Test(expectedExceptions = AuthenticationException.class) public void testAuthenticateWhenAuthHeaderValuePrefixIsInvalid() throws AuthenticationException { AuthenticationProviderToken provider = new AuthenticationProviderToken(); provider.authenticate(new AuthenticationDataSource() { @Override public String getHttpHeader(String name) { return "MyBearer "; } @Override public boolean hasDataFromHttp() { return true; } }); }
public static DeviceKey createDeviceKeyUsingUsernamePassword(DeviceKeyId id, String label, String username, String password) { DefaultAnnotations annotations = builder().set(AnnotationKeys.USERNAME, username) .set(AnnotationKeys.PASSWORD, password).build(); return new DeviceKey(id, label, Type.USERNAME_PASSWORD, annotations); }
@Test(expected = NullPointerException.class) public void testCreateDeviceKeyUsingUsernamePasswordWithNull() { DeviceKey deviceKey = DeviceKey.createDeviceKeyUsingUsernamePassword(null, null, null, null); }
@Override public void execute(final CommandLine commandLine, final Options options, RPCHook rpcHook) throws SubCommandException { DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook); defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis())); String topic = commandLine.getOptionValue('t').trim(); try { defaultMQAdminExt.start(); Set<String> clusters = defaultMQAdminExt.getTopicClusterList(topic); for (String value : clusters) { System.out.printf("%s%n", value); } } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } finally { defaultMQAdminExt.shutdown(); } }
@Test public void testExecute() { TopicClusterSubCommand cmd = new TopicClusterSubCommand(); Options options = ServerUtil.buildCommandlineOptions(new Options()); String[] subargs = new String[] {"-t unit-test"}; final CommandLine commandLine = ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs, cmd.buildCommandlineOptions(options), new DefaultParser()); assertThat(commandLine.getOptionValue('t').trim()).isEqualTo("unit-test"); }
public StepInterface getStep( StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, TransMeta transMeta, Trans trans ) { return new ZipFile( stepMeta, stepDataInterface, cnr, transMeta, trans ); }
@Test public void testGetStep() throws Exception { StepMeta stepInfo = mock( StepMeta.class ); when( stepInfo.getName() ).thenReturn( "Zip Step Name" ); StepDataInterface stepData = mock( StepDataInterface.class ); TransMeta transMeta = mock( TransMeta.class ); when( transMeta.findStep( "Zip Step Name" ) ).thenReturn( stepInfo ); Trans trans = mock( Trans.class ); ZipFileMeta zipFileMeta = new ZipFileMeta(); ZipFile zipFile = (ZipFile) zipFileMeta.getStep( stepInfo, stepData, 0, transMeta, trans ); assertEquals( stepInfo, zipFile.getStepMeta() ); assertEquals( stepData, zipFile.getStepDataInterface() ); assertEquals( transMeta, zipFile.getTransMeta() ); assertEquals( trans, zipFile.getTrans() ); assertEquals( 0, zipFile.getCopy() ); }
public List<Document> generate(RegisteredPlugin registeredPlugin) throws Exception { ArrayList<Document> result = new ArrayList<>(); result.addAll(index(registeredPlugin)); result.addAll(this.generate(registeredPlugin, registeredPlugin.getTasks(), Task.class, "tasks")); result.addAll(this.generate(registeredPlugin, registeredPlugin.getTriggers(), AbstractTrigger.class, "triggers")); result.addAll(this.generate(registeredPlugin, registeredPlugin.getConditions(), Condition.class, "conditions")); result.addAll(this.generate(registeredPlugin, registeredPlugin.getTaskRunners(), TaskRunner.class, "task-runners")); result.addAll(guides(registeredPlugin)); return result; }
@Test void pluginDoc() throws Exception { PluginScanner pluginScanner = new PluginScanner(ClassPluginDocumentationTest.class.getClassLoader()); RegisteredPlugin core = pluginScanner.scan(); List<Document> docs = documentationGenerator.generate(core); Document doc = docs.getFirst(); assertThat(doc.getIcon(), is(notNullValue())); assertThat(doc.getBody(), containsString("## <img width=\"25\" src=\"data:image/svg+xml;base64,")); }
@Override public TopicCleanupPolicy getTopicCleanupPolicy(final String topicName) { final String policy = getTopicConfig(topicName) .getOrDefault(TopicConfig.CLEANUP_POLICY_CONFIG, "") .toLowerCase(); if (policy.equals("compact")) { return TopicCleanupPolicy.COMPACT; } else if (policy.equals("delete")) { return TopicCleanupPolicy.DELETE; } else if (policy.contains("compact") && policy.contains("delete")) { return TopicCleanupPolicy.COMPACT_DELETE; } else { throw new KsqlException("Could not get the topic configs for : " + topicName); } }
@Test public void shouldGetTopicCleanUpPolicyDelete() { // Given: givenTopicConfigs( "foo", overriddenConfigEntry(CLEANUP_POLICY_CONFIG, CLEANUP_POLICY_DELETE) ); // When / Then: assertThat(kafkaTopicClient.getTopicCleanupPolicy("foo"), is(TopicCleanupPolicy.DELETE)); }
@Override public <R> R queryOne(String sql, Class<R> cls) { return queryOne(jdbcTemplate, sql, cls); }
@Test void testQueryOne2() { final String sql = "SELECT * FROM config_info WHERE id = ? AND data_id = ? AND group_id = ?"; MockConfigInfo configInfo = new MockConfigInfo(); configInfo.setId(1L); configInfo.setDataId("test"); configInfo.setGroup("test"); Object[] args = new Object[] {configInfo.getId(), configInfo.getDataId(), configInfo.getGroup()}; when(jdbcTemplate.queryForObject(sql, args, MockConfigInfo.class)).thenReturn(configInfo); assertEquals(operate.queryOne(sql, args, MockConfigInfo.class), configInfo); }
@KeyboardExtension.KeyboardExtensionType public int getExtensionType() { return mExtensionType; }
@Test public void testGetCurrentKeyboardExtensionTopDefault() throws Exception { KeyboardExtension extension = AnyApplication.getTopRowFactory(getApplicationContext()).getEnabledAddOn(); Assert.assertNotNull(extension); Assert.assertEquals("5d945f40-ded5-11e0-9572-0800200c9a66", extension.getId()); Assert.assertEquals(KeyboardExtension.TYPE_TOP, extension.getExtensionType()); Assert.assertEquals(R.xml.ext_kbd_top_row_small, extension.getKeyboardResId()); }
public String doLayout(ILoggingEvent event) { if (!isStarted()) { return CoreConstants.EMPTY_STRING; } return writeLoopOnConverters(event); }
@Test public void testOK() { pl.setPattern("%d %le [%t] %lo{30} - %m%n"); pl.start(); String val = pl.doLayout(getEventObject()); // 2006-02-01 22:38:06,212 INFO [main] c.q.l.pattern.ConverterTest - Some // message //2010-12-29 19:04:26,137 INFO [pool-1-thread-47] c.q.l.c.pattern.ConverterTest - Some message String regex = ISO_REGEX + " INFO " + MAIN_REGEX + " c.q.l.c.pattern.ConverterTest - Some message\\s*"; assertThat(val, matchesPattern(regex)); }
@Override public void createOrUpdate(final String path, final Object data) { zkClient.createOrUpdate(path, data, CreateMode.PERSISTENT); }
@Test public void testOnPluginChangedCreate() { PluginData pluginData = PluginData.builder().id(MOCK_ID).name(MOCK_NAME).config(MOCK_CONFIG).build(); String pluginPath = DefaultPathConstants.buildPluginPath(pluginData.getName()); zookeeperDataChangedListener.onPluginChanged(ImmutableList.of(pluginData), DataEventTypeEnum.CREATE); verify(zkClient, times(1)).createOrUpdate(pluginPath, pluginData, CreateMode.PERSISTENT); }
@Override public Optional<ConfigItem> resolve(final String propertyName, final boolean strict) { if (propertyName.startsWith(KSQL_REQUEST_CONFIG_PROPERTY_PREFIX)) { return resolveRequestConfig(propertyName); } else if (propertyName.startsWith(KSQL_CONFIG_PROPERTY_PREFIX) && !propertyName.startsWith(KSQL_STREAMS_PREFIX)) { return resolveKsqlConfig(propertyName); } return resolveStreamsConfig(propertyName, strict); }
@Test public void shouldResolveConsumerPrefixedConsumerConfig() { assertThat(resolver.resolve( StreamsConfig.CONSUMER_PREFIX + ConsumerConfig.FETCH_MIN_BYTES_CONFIG, true), is(resolvedItem(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, CONSUMER_CONFIG_DEF))); }
@Override public void waitFor( final KsqlEntityList previousCommands, final Class<? extends Statement> statementClass) { if (mustSync.test(statementClass)) { final ArrayList<KsqlEntity> reversed = new ArrayList<>(previousCommands); Collections.reverse(reversed); reversed.stream() .filter(e -> e instanceof CommandStatusEntity) .map(CommandStatusEntity.class::cast) .map(CommandStatusEntity::getCommandSequenceNumber) .findFirst() .ifPresent(seqNum -> { try { commandQueue.ensureConsumedPast(seqNum, timeout); } catch (final InterruptedException e) { throw new KsqlRestException(Errors.serverShuttingDown()); } catch (final TimeoutException e) { throw new KsqlRestException(Errors.commandQueueCatchUpTimeout(seqNum)); } }); } }
@Test public void shouldOnlyWaitForMostRecentDistributedStatements() throws Exception { // Given: givenSyncWithPredicate(clazz -> true); givenEntities(entity1, commandStatusEntity1, entity2, commandStatusEntity2); // When: commandQueueSync.waitFor(entities, CreateStreamAsSelect.class); // Then: verify(commandQueue, times(1)).ensureConsumedPast(2L, Duration.ZERO); verify(commandQueue, never()).ensureConsumedPast(1L, Duration.ZERO); }
static void readFullyDirectBuffer(InputStream f, ByteBuffer buf, byte[] temp) throws IOException { int nextReadLength = Math.min(buf.remaining(), temp.length); int bytesRead = 0; while (nextReadLength > 0 && (bytesRead = f.read(temp, 0, nextReadLength)) >= 0) { buf.put(temp, 0, bytesRead); nextReadLength = Math.min(buf.remaining(), temp.length); } if (bytesRead < 0 && buf.remaining() > 0) { throw new EOFException("Reached the end of stream with " + buf.remaining() + " bytes left to read"); } }
@Test public void testDirectReadFullyPosition() throws Exception { final ByteBuffer readBuffer = ByteBuffer.allocateDirect(10); readBuffer.position(3); readBuffer.mark(); MockInputStream stream = new MockInputStream(2, 3, 3); DelegatingSeekableInputStream.readFullyDirectBuffer(stream, readBuffer, TEMP.get()); Assert.assertEquals(10, readBuffer.position()); Assert.assertEquals(10, readBuffer.limit()); DelegatingSeekableInputStream.readFullyDirectBuffer(stream, readBuffer, TEMP.get()); Assert.assertEquals(10, readBuffer.position()); Assert.assertEquals(10, readBuffer.limit()); readBuffer.reset(); Assert.assertEquals("Buffer contents should match", ByteBuffer.wrap(TEST_ARRAY, 0, 7), readBuffer); }
@Override public void killCluster(String clusterId) throws FlinkException { try { client.stopAndCleanupCluster(clusterId); } catch (Exception e) { throw new FlinkException("Could not kill Kubernetes cluster " + clusterId); } }
@Test void testKillCluster() throws Exception { flinkConfig.set(DeploymentOptions.TARGET, KubernetesDeploymentTarget.SESSION.getName()); deploySessionCluster(); assertThat(kubeClient.services().list().getItems()).hasSize(2); descriptor.killCluster(CLUSTER_ID); // Mock kubernetes server do not delete the accompanying resources by gc. assertThat(kubeClient.apps().deployments().list().getItems()).isEmpty(); assertThat(kubeClient.services().list().getItems()).hasSize(2); assertThat(kubeClient.configMaps().list().getItems()).hasSize(1); }
static int linuxMajorVersion0(String version, boolean isLinux) { if (!isLinux) { return -1; } String[] versionTokens = version.split("\\."); try { return Integer.parseInt(versionTokens[0]); } catch (NumberFormatException e) { return -1; } }
@Test public void test_linuxMajorVersion0_whenNotIsLinux() { assertEquals(-1, OS.linuxMajorVersion0("5.16.12-200.fc35.x86_64", false)); }
@Override public List<ControllerInfo> getControllers() { DriverHandler handler = handler(); NetconfController controller = handler.get(NetconfController.class); MastershipService mastershipService = handler.get(MastershipService.class); DeviceId deviceId = handler.data().deviceId(); Preconditions.checkNotNull(controller, "Netconf controller is null"); List<ControllerInfo> controllers = new ArrayList<>(); if (mastershipService.isLocalMaster(deviceId)) { try { String reply = controller.getNetconfDevice(deviceId).getSession(). getConfig(DatastoreId.RUNNING); log.debug("Reply XML {}", reply); controllers.addAll(XmlConfigParser.parseStreamControllers(XmlConfigParser. loadXml(new ByteArrayInputStream(reply.getBytes(StandardCharsets.UTF_8))))); } catch (NetconfException e) { log.error("Cannot communicate with device {} ", deviceId, e); } } else { log.warn("I'm not master for {} please use master, {} to execute command", deviceId, mastershipService.getMasterFor(deviceId)); } return controllers; }
@Test public void testGetControllers() { assertNotNull(netconfCtlConfig.getControllers()); }
public PartnerConnection createBinding( ConnectorConfig config ) throws ConnectionException { if ( this.binding == null ) { this.binding = new PartnerConnection( config ); } return this.binding; }
@Test public void testCreateBinding() throws KettleException, ConnectionException { SalesforceConnection conn = new SalesforceConnection( null, "http://localhost:1234", "aUser", "aPass" ); ConnectorConfig config = new ConnectorConfig(); config.setAuthEndpoint( Connector.END_POINT ); config.setManualLogin( true ); // Required to prevent connection attempt during test assertNull( conn.getBinding() ); conn.createBinding( config ); PartnerConnection binding1 = conn.getBinding(); conn.createBinding( config ); PartnerConnection binding2 = conn.getBinding(); assertSame( binding1, binding2 ); }
public synchronized void userAddFunction(Function f, boolean allowExists) throws UserException { addFunction(f, false, allowExists); GlobalStateMgr.getCurrentState().getEditLog().logAddFunction(f); }
@Test public void testUserAddFunctionGivenFunctionAlreadyExists() throws UserException { FunctionName name = new FunctionName(null, "addIntInt"); name.setAsGlobalFunction(); final Type[] argTypes = {Type.INT, Type.INT}; Function f = new Function(name, argTypes, Type.INT, false); // Add the UDF for the first time globalFunctionMgr.userAddFunction(f, false); // Attempt to add the same UDF again, expecting an exception Assert.assertThrows(UserException.class, () -> globalFunctionMgr.userAddFunction(f, false)); }
@Override public PackageMaterialPluginInfo pluginInfoFor(GoPluginDescriptor descriptor) { RepositoryConfiguration repositoryConfiguration = extension.getRepositoryConfiguration(descriptor.id()); com.thoughtworks.go.plugin.api.material.packagerepository.PackageConfiguration packageConfiguration = extension.getPackageConfiguration(descriptor.id()); if (repositoryConfiguration == null) { throw new RuntimeException(format("Plugin[%s] returned null repository configuration", descriptor.id())); } if (packageConfiguration == null) { throw new RuntimeException(format("Plugin[%s] returned null package configuration", descriptor.id())); } PluggableInstanceSettings pluginSettingsAndView = getPluginSettingsAndView(descriptor, extension); return new PackageMaterialPluginInfo(descriptor, new PluggableInstanceSettings(packageRepoConfigurations(repositoryConfiguration)), new PluggableInstanceSettings(packageRepoConfigurations(packageConfiguration)), pluginSettingsAndView); }
@Test public void shouldBuildPluginInfo() { GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build(); PackageMaterialPluginInfo pluginInfo = new PackageMaterialPluginInfoBuilder(extension).pluginInfoFor(descriptor); List<PluginConfiguration> packageSettings = List.of( new PluginConfiguration("username", new PackageMaterialMetadata(true, false, false, "foo", 1)), new PluginConfiguration("password", new PackageMaterialMetadata(true, true, true, "", 2)) ); List<PluginConfiguration> repoSettings = List.of( new PluginConfiguration("foo", new PackageMaterialMetadata(true, false, true, "", 1)), new PluginConfiguration("bar", new PackageMaterialMetadata(true, true, true, "", 2)) ); List<PluginConfiguration> pluginSettings = List.of(new PluginConfiguration("k1", new Metadata(true, false))); assertThat(pluginInfo.getDescriptor(), is(descriptor)); assertThat(pluginInfo.getExtensionName(), is("package-repository")); assertThat(pluginInfo.getPackageSettings(), is(new PluggableInstanceSettings(packageSettings, null))); assertThat(pluginInfo.getRepositorySettings(), is(new PluggableInstanceSettings(repoSettings, null))); assertThat(pluginInfo.getPluginSettings(), is(new PluggableInstanceSettings(pluginSettings, new PluginView("some-html")))); }
static Map<String, Comparable> prepareProperties(Map<String, Comparable> properties, Collection<PropertyDefinition> propertyDefinitions) { Map<String, Comparable> mappedProperties = createHashMap(propertyDefinitions.size()); for (PropertyDefinition propertyDefinition : propertyDefinitions) { String propertyKey = propertyDefinition.key(); if (properties.containsKey(propertyKey.replace("-", ""))) { properties.put(propertyKey, properties.remove(propertyKey.replace("-", ""))); } if (!properties.containsKey(propertyKey)) { if (!propertyDefinition.optional()) { throw new InvalidConfigurationException( String.format("Missing property '%s' on discovery strategy", propertyKey)); } continue; } Comparable value = properties.get(propertyKey); TypeConverter typeConverter = propertyDefinition.typeConverter(); Comparable mappedValue = typeConverter.convert(value); ValueValidator validator = propertyDefinition.validator(); if (validator != null) { validator.validate(mappedValue); } mappedProperties.put(propertyKey, mappedValue); } verifyNoUnknownProperties(mappedProperties, properties); return mappedProperties; }
@Test(expected = InvalidConfigurationException.class) public void unknownProperty() { // given Map<String, Comparable> properties = new HashMap<>(singletonMap(PROPERTY_KEY_1, (Comparable) PROPERTY_VALUE_1)); Collection<PropertyDefinition> propertyDefinitions = emptyList(); // when prepareProperties(properties, propertyDefinitions); // then // throw exception }
@Override public boolean hasTransformedResource() { return true; }
@Test public void testHasTransformedResource() { assertTrue(transformer.hasTransformedResource()); }
public static <T> T uncheckIOExceptions(CallableRaisingIOE<T> call) { return call.unchecked(); }
@Test public void testUncheckIOExceptions() throws Throwable { final IOException raised = new IOException("text"); final UncheckedIOException ex = intercept(UncheckedIOException.class, "text", () -> uncheckIOExceptions(() -> { throw raised; })); Assertions.assertThat(ex.getCause()) .describedAs("Cause of %s", ex) .isSameAs(raised); }
@Override public Object getValue() { try { return mBeanServerConn.getAttribute(getObjectName(), attributeName); } catch (IOException | JMException e) { return null; } }
@Test public void returnsNullIfObjectNamePatternAmbiguous() throws Exception { ObjectName objectName = new ObjectName("JmxAttributeGaugeTest:type=test,*"); JmxAttributeGauge gauge = new JmxAttributeGauge(mBeanServer, objectName, "Value"); assertThat(gauge.getValue()).isNull(); }
@Override public KeyValueIterator<K, V> reverseRange(final K from, final K to) { final byte[] serFrom = from == null ? null : serdes.rawKey(from); final byte[] serTo = to == null ? null : serdes.rawKey(to); return new MeteredKeyValueIterator( wrapped().reverseRange(Bytes.wrap(serFrom), Bytes.wrap(serTo)), rangeSensor ); }
@Test public void shouldThrowNullPointerOnReverseRangeIfFromIsNull() { setUpWithoutContext(); assertThrows(NullPointerException.class, () -> metered.reverseRange(null, "to")); }
@Override public KeyValueSegment getOrCreateSegmentIfLive(final long segmentId, final ProcessorContext context, final long streamTime) { final KeyValueSegment segment = super.getOrCreateSegmentIfLive(segmentId, context, streamTime); cleanupExpiredSegments(streamTime); return segment; }
@Test public void shouldGetSegmentsWithinBackwardTimeRange() { updateStreamTimeAndCreateSegment(0); updateStreamTimeAndCreateSegment(1); updateStreamTimeAndCreateSegment(2); updateStreamTimeAndCreateSegment(3); final long streamTime = updateStreamTimeAndCreateSegment(4); segments.getOrCreateSegmentIfLive(0, context, streamTime); segments.getOrCreateSegmentIfLive(1, context, streamTime); segments.getOrCreateSegmentIfLive(2, context, streamTime); segments.getOrCreateSegmentIfLive(3, context, streamTime); segments.getOrCreateSegmentIfLive(4, context, streamTime); final List<KeyValueSegment> segments = this.segments.segments(0, 2 * SEGMENT_INTERVAL, false); assertEquals(3, segments.size()); assertEquals(0, segments.get(2).id); assertEquals(1, segments.get(1).id); assertEquals(2, segments.get(0).id); }
private boolean detectCharset(byte[] buf) throws IOException { ByteCharsetDetector detector = new ByteCharsetDetector(new CharsetValidation(), userEncoding); ByteOrderMark bom = detector.detectBOM(buf); if (bom != null) { detectedCharset = Charset.forName(bom.getCharsetName()); stream.skip(bom.length()); return true; } detectedCharset = detector.detect(buf); return detectedCharset != null; }
@Test public void should_detect_charset_from_BOM() { Path basedir = Paths.get("src/test/resources/org/sonar/scanner/scan/filesystem/"); assertThat(detectCharset(basedir.resolve("without_BOM.txt"), US_ASCII)).isEqualTo(US_ASCII); assertThat(detectCharset(basedir.resolve("UTF-8.txt"), US_ASCII)).isEqualTo(UTF_8); assertThat(detectCharset(basedir.resolve("UTF-16BE.txt"), US_ASCII)).isEqualTo(UTF_16BE); assertThat(detectCharset(basedir.resolve("UTF-16LE.txt"), US_ASCII)).isEqualTo(UTF_16LE); assertThat(detectCharset(basedir.resolve("UTF-32BE.txt"), US_ASCII)).isEqualTo(MetadataGenerator.UTF_32BE); assertThat(detectCharset(basedir.resolve("UTF-32LE.txt"), US_ASCII)).isEqualTo(MetadataGenerator.UTF_32LE); }
protected String convertHeaderValueToString(Exchange exchange, Object headerValue) { if ((headerValue instanceof Date || headerValue instanceof Locale) && convertDateAndLocaleLocally(exchange)) { if (headerValue instanceof Date) { return toHttpDate((Date) headerValue); } else { return toHttpLanguage((Locale) headerValue); } } else { return exchange.getContext().getTypeConverter().convertTo(String.class, headerValue); } }
@Test public void testConvertDateTypeConverter() { DefaultHttpBinding binding = new DefaultHttpBinding(); Date date = new Date(); Exchange exchange = super.createExchangeWithBody(null); exchange.setProperty(DefaultHttpBinding.DATE_LOCALE_CONVERSION, false); String value = binding.convertHeaderValueToString(exchange, date); assertEquals(value, date.toString()); }
@Override public <T> T convert(DataTable dataTable, Type type) { return convert(dataTable, type, false); }
@Test void to_map_of_object_to_object__without_implied_entries__prefers__default_table_entry_converter() { DataTable table = parse("", "| KMSY | KSFO |", "| KSFO | KSEA |", "| KSEA | KJFK |", "| KJFK | AMS |"); Map<AirPortCode, AirPortCode> expected = new HashMap<AirPortCode, AirPortCode>() { { put(new AirPortCode("KMSY"), new AirPortCode("KSFO")); put(new AirPortCode("KSFO"), new AirPortCode("KSEA")); put(new AirPortCode("KSEA"), new AirPortCode("KJFK")); put(new AirPortCode("KJFK"), new AirPortCode("AMS")); } }; registry.setDefaultDataTableCellTransformer(JACKSON_TABLE_CELL_BY_TYPE_CONVERTER); assertEquals(expected, converter.convert(table, MAP_OF_AIR_PORT_CODE_TO_AIR_PORT_CODE)); }
public static String getDeprecatedMetricsInSonarQube105() { return "'" + String.join("', '", "new_blocker_violations", "new_critical_violations", "new_major_violations", "new_minor_violations", "new_info_violations", "blocker_violations", "critical_violations", "major_violations", "minor_violations", "info_violations") + "'"; }
@Test public void getDeprecatedMetricsInSonarQube105_shouldReturnExactString() { String actual = MeasuresWsModule.getDeprecatedMetricsInSonarQube105(); assertThat(actual).isEqualTo("'new_blocker_violations', 'new_critical_violations', 'new_major_violations', 'new_minor_violations', " + "'new_info_violations', 'blocker_violations', 'critical_violations', 'major_violations', 'minor_violations', 'info_violations'"); }
@Override public ResourceAllocationResult tryFulfillRequirements( Map<JobID, Collection<ResourceRequirement>> missingResources, TaskManagerResourceInfoProvider taskManagerResourceInfoProvider, BlockedTaskManagerChecker blockedTaskManagerChecker) { final ResourceAllocationResult.Builder resultBuilder = ResourceAllocationResult.builder(); final List<InternalResourceInfo> registeredResources = getAvailableResources( taskManagerResourceInfoProvider, resultBuilder, blockedTaskManagerChecker); final List<InternalResourceInfo> pendingResources = getPendingResources(taskManagerResourceInfoProvider, resultBuilder); ResourceProfile totalCurrentResources = Stream.concat(registeredResources.stream(), pendingResources.stream()) .map(internalResourceInfo -> internalResourceInfo.totalProfile) .reduce(ResourceProfile.ZERO, ResourceProfile::merge); for (Map.Entry<JobID, Collection<ResourceRequirement>> resourceRequirements : missingResources.entrySet()) { final JobID jobId = resourceRequirements.getKey(); final Collection<ResourceRequirement> unfulfilledJobRequirements = tryFulfillRequirementsForJobWithResources( jobId, resourceRequirements.getValue(), registeredResources); if (!unfulfilledJobRequirements.isEmpty()) { totalCurrentResources = totalCurrentResources.merge( tryFulfillRequirementsForJobWithPendingResources( jobId, unfulfilledJobRequirements, pendingResources, resultBuilder)); } } // Unlike tryFulfillRequirementsForJobWithPendingResources, which updates pendingResources // to the latest state after a new PendingTaskManager is created, // tryFulFillRequiredResources will not update pendingResources even after new // PendingTaskManagers are created. // This is because the pendingResources are no longer needed afterward. tryFulFillRequiredResources( registeredResources, pendingResources, totalCurrentResources, resultBuilder); return resultBuilder.build(); }
@Test void testFulfillRequirementWithRegisteredResources() { final TaskManagerInfo taskManager = new TestingTaskManagerInfo( DEFAULT_SLOT_RESOURCE.multiply(10), DEFAULT_SLOT_RESOURCE.multiply(10), DEFAULT_SLOT_RESOURCE); final JobID jobId = new JobID(); final List<ResourceRequirement> requirements = new ArrayList<>(); final ResourceProfile largeResource = DEFAULT_SLOT_RESOURCE.multiply(8); final TaskManagerResourceInfoProvider taskManagerResourceInfoProvider = TestingTaskManagerResourceInfoProvider.newBuilder() .setRegisteredTaskManagersSupplier(() -> Collections.singleton(taskManager)) .build(); requirements.add(ResourceRequirement.create(largeResource, 1)); requirements.add(ResourceRequirement.create(ResourceProfile.UNKNOWN, 2)); final ResourceAllocationResult result = ANY_MATCHING_STRATEGY.tryFulfillRequirements( Collections.singletonMap(jobId, requirements), taskManagerResourceInfoProvider, resourceID -> false); assertThat(result.getUnfulfillableJobs()).isEmpty(); assertThat(result.getAllocationsOnPendingResources()).isEmpty(); assertThat(result.getPendingTaskManagersToAllocate()).isEmpty(); assertThat( result.getAllocationsOnRegisteredResources() .get(jobId) .get(taskManager.getInstanceId()) .getResourceCount(DEFAULT_SLOT_RESOURCE)) .isEqualTo(2); assertThat( result.getAllocationsOnRegisteredResources() .get(jobId) .get(taskManager.getInstanceId()) .getResourceCount(largeResource)) .isEqualTo(1); }
protected synchronized boolean download(final DownloadableFile downloadableFile) throws IOException, GeneralSecurityException { File toDownload = downloadableFile.getLocalFile(); LOG.info("Downloading {}", toDownload); String url = downloadableFile.url(urlGenerator); final HttpRequestBase request = new HttpGet(url); request.setConfig(RequestConfig.custom().setConnectTimeout(HTTP_TIMEOUT_IN_MILLISECONDS).build()); try (CloseableHttpClient httpClient = httpClientBuilder.build(); CloseableHttpResponse response = httpClient.execute(request)) { LOG.info("Got server response"); if (response.getEntity() == null) { LOG.error("Unable to read file from the server response"); return false; } handleInvalidResponse(response, url); try (BufferedOutputStream outStream = new BufferedOutputStream(new FileOutputStream(downloadableFile.getLocalFile()))) { response.getEntity().writeTo(outStream); LOG.info("Piped the stream to {}", downloadableFile); } } return true; }
@Test public void shouldThrowExceptionInCaseOf404() { ServerBinaryDownloader downloader = new ServerBinaryDownloader(new GoAgentServerHttpClientBuilder(null, SslVerificationMode.NONE, null, null, null), ServerUrlGeneratorMother.generatorWithoutSubPathFor("https://localhost:" + server.getSecurePort() + "/go/not-found")); assertThatThrownBy(() -> downloader.download(DownloadableFile.AGENT)) .isInstanceOf(IOException.class) .hasMessageContaining("This agent might be incompatible with your GoCD Server. Please fix the version mismatch between GoCD Server and GoCD Agent."); }
void start() throws TransientKinesisException { ImmutableMap.Builder<String, ShardRecordsIterator> shardsMap = ImmutableMap.builder(); for (ShardCheckpoint checkpoint : initialCheckpoint) { shardsMap.put(checkpoint.getShardId(), createShardIterator(kinesis, checkpoint)); } shardIteratorsMap.set(shardsMap.build()); if (!shardIteratorsMap.get().isEmpty()) { recordsQueue = new ArrayBlockingQueue<>(queueCapacityPerShard * shardIteratorsMap.get().size()); String streamName = initialCheckpoint.getStreamName(); startReadingShards(shardIteratorsMap.get().values(), streamName); } else { // There are no shards to handle when restoring from an empty checkpoint. Empty checkpoints // are generated when the last shard handled by this pool was closed recordsQueue = new ArrayBlockingQueue<>(1); } }
@Test public void shouldStopReadingShardAfterReceivingShardClosedException() throws Exception { when(firstIterator.readNextBatch()).thenThrow(KinesisShardClosedException.class); when(firstIterator.findSuccessiveShardRecordIterators()).thenReturn(Collections.emptyList()); shardReadersPool.start(); verify(firstIterator, timeout(TIMEOUT_IN_MILLIS).times(1)).readNextBatch(); verify(secondIterator, timeout(TIMEOUT_IN_MILLIS).atLeast(2)).readNextBatch(); }
@Override public Cursor<Tuple> zScan(byte[] key, ScanOptions options) { return new KeyBoundCursor<Tuple>(key, 0, options) { private RedisClient client; @Override protected ScanIteration<Tuple> doScan(byte[] key, long cursorId, ScanOptions options) { if (isQueueing() || isPipelined()) { throw new UnsupportedOperationException("'ZSCAN' cannot be called in pipeline / transaction mode."); } List<Object> args = new ArrayList<Object>(); args.add(key); args.add(Long.toUnsignedString(cursorId)); if (options.getPattern() != null) { args.add("MATCH"); args.add(options.getPattern()); } if (options.getCount() != null) { args.add("COUNT"); args.add(options.getCount()); } RFuture<ListScanResult<Tuple>> f = executorService.readAsync(client, key, ByteArrayCodec.INSTANCE, ZSCAN, args.toArray()); ListScanResult<Tuple> res = syncFuture(f); client = res.getRedisClient(); return new ScanIteration<Tuple>(Long.parseUnsignedLong(res.getPos()), res.getValues()); } }.open(); }
@Test public void testZScan() { connection.zAdd("key".getBytes(), 1, "value1".getBytes()); connection.zAdd("key".getBytes(), 2, "value2".getBytes()); Cursor<RedisZSetCommands.Tuple> t = connection.zScan("key".getBytes(), ScanOptions.scanOptions().build()); assertThat(t.hasNext()).isTrue(); assertThat(t.next().getValue()).isEqualTo("value1".getBytes()); assertThat(t.hasNext()).isTrue(); assertThat(t.next().getValue()).isEqualTo("value2".getBytes()); }
@Override public Optional<ProfileDescription> compare(final ProfileDescription next) { // Filter out profiles with matching checksum final Optional<ProfileDescription> found = repository.stream() .filter(description -> Objects.equals(description.getChecksum(), next.getChecksum())) .findFirst(); if(found.isPresent()) { // Found matching checksum. Determine if latest version if(found.get().isLatest()) { // Latest version already installed return Optional.empty(); } else { // Read last profile version from server as we found matching checksum for previous version return found; } } log.warn(String.format("Local only profile %s", next)); return Optional.empty(); }
@Test public void testNewerVersionFound() throws Exception { final ProfileDescription local = new ProfileDescription(ProtocolFactory.get(), new Checksum(HashAlgorithm.md5, "d41d8cd98f00b204e9800998ecf8427e"), null); final ProfileDescription remote = new ProfileDescription( ProtocolFactory.get(), new Checksum(HashAlgorithm.md5, "d41d8cd98f00b204e9800998ecf8427e"), null) { @Override public boolean isLatest() { return false; } @Override public Optional<Local> getFile() { return Optional.of(new NullLocal("Profile.cyberduckprofile")); } }; assertTrue(new ChecksumProfileMatcher(Stream.of(remote).collect(Collectors.toSet())).compare(local).isPresent()); }
public static DistCpOptions parse(String[] args) throws IllegalArgumentException { CommandLineParser parser = new CustomParser(); CommandLine command; try { command = parser.parse(cliOptions, args, true); } catch (ParseException e) { throw new IllegalArgumentException("Unable to parse arguments. " + Arrays.toString(args), e); } DistCpOptions.Builder builder = parseSourceAndTargetPaths(command); builder .withAtomicCommit( command.hasOption(DistCpOptionSwitch.ATOMIC_COMMIT.getSwitch())) .withSyncFolder( command.hasOption(DistCpOptionSwitch.SYNC_FOLDERS.getSwitch())) .withDeleteMissing( command.hasOption(DistCpOptionSwitch.DELETE_MISSING.getSwitch())) .withIgnoreFailures( command.hasOption(DistCpOptionSwitch.IGNORE_FAILURES.getSwitch())) .withOverwrite( command.hasOption(DistCpOptionSwitch.OVERWRITE.getSwitch())) .withAppend( command.hasOption(DistCpOptionSwitch.APPEND.getSwitch())) .withSkipCRC( command.hasOption(DistCpOptionSwitch.SKIP_CRC.getSwitch())) .withBlocking( !command.hasOption(DistCpOptionSwitch.BLOCKING.getSwitch())) .withVerboseLog( command.hasOption(DistCpOptionSwitch.VERBOSE_LOG.getSwitch())) .withDirectWrite( command.hasOption(DistCpOptionSwitch.DIRECT_WRITE.getSwitch())) .withUseIterator( command.hasOption(DistCpOptionSwitch.USE_ITERATOR.getSwitch())) .withUpdateRoot( command.hasOption(DistCpOptionSwitch.UPDATE_ROOT.getSwitch())); if (command.hasOption(DistCpOptionSwitch.DIFF.getSwitch())) { String[] snapshots = getVals(command, DistCpOptionSwitch.DIFF.getSwitch()); checkSnapshotsArgs(snapshots); builder.withUseDiff(snapshots[0], snapshots[1]); } if (command.hasOption(DistCpOptionSwitch.RDIFF.getSwitch())) { String[] snapshots = getVals(command, DistCpOptionSwitch.RDIFF.getSwitch()); checkSnapshotsArgs(snapshots); builder.withUseRdiff(snapshots[0], snapshots[1]); } if (command.hasOption(DistCpOptionSwitch.FILTERS.getSwitch())) { builder.withFiltersFile( getVal(command, DistCpOptionSwitch.FILTERS.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.LOG_PATH.getSwitch())) { builder.withLogPath( new Path(getVal(command, DistCpOptionSwitch.LOG_PATH.getSwitch()))); } if (command.hasOption(DistCpOptionSwitch.WORK_PATH.getSwitch())) { final String workPath = getVal(command, DistCpOptionSwitch.WORK_PATH.getSwitch()); if (workPath != null && !workPath.isEmpty()) { builder.withAtomicWorkPath(new Path(workPath)); } } if (command.hasOption(DistCpOptionSwitch.TRACK_MISSING.getSwitch())) { builder.withTrackMissing( new Path(getVal( command, DistCpOptionSwitch.TRACK_MISSING.getSwitch()))); } if (command.hasOption(DistCpOptionSwitch.BANDWIDTH.getSwitch())) { try { final Float mapBandwidth = Float.parseFloat( getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch())); builder.withMapBandwidth(mapBandwidth); } catch (NumberFormatException e) { throw new IllegalArgumentException("Bandwidth specified is invalid: " + getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch()), e); } } if (command.hasOption( DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())) { try { final Integer numThreads = Integer.parseInt(getVal(command, DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())); builder.withNumListstatusThreads(numThreads); } catch (NumberFormatException e) { throw new IllegalArgumentException( "Number of liststatus threads is invalid: " + getVal(command, DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch()), e); } } if (command.hasOption(DistCpOptionSwitch.MAX_MAPS.getSwitch())) { try { final Integer maps = Integer.parseInt( getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch())); builder.maxMaps(maps); } catch (NumberFormatException e) { throw new IllegalArgumentException("Number of maps is invalid: " + getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch()), e); } } if (command.hasOption(DistCpOptionSwitch.COPY_STRATEGY.getSwitch())) { builder.withCopyStrategy( getVal(command, DistCpOptionSwitch.COPY_STRATEGY.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) { builder.preserve( getVal(command, DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.FILE_LIMIT.getSwitch())) { LOG.warn(DistCpOptionSwitch.FILE_LIMIT.getSwitch() + " is a deprecated" + " option. Ignoring."); } if (command.hasOption(DistCpOptionSwitch.SIZE_LIMIT.getSwitch())) { LOG.warn(DistCpOptionSwitch.SIZE_LIMIT.getSwitch() + " is a deprecated" + " option. Ignoring."); } if (command.hasOption(DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch())) { final String chunkSizeStr = getVal(command, DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch().trim()); try { int csize = Integer.parseInt(chunkSizeStr); csize = csize > 0 ? csize : 0; LOG.info("Set distcp blocksPerChunk to " + csize); builder.withBlocksPerChunk(csize); } catch (NumberFormatException e) { throw new IllegalArgumentException("blocksPerChunk is invalid: " + chunkSizeStr, e); } } if (command.hasOption(DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch())) { final String copyBufferSizeStr = getVal(command, DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch().trim()); try { int copyBufferSize = Integer.parseInt(copyBufferSizeStr); builder.withCopyBufferSize(copyBufferSize); } catch (NumberFormatException e) { throw new IllegalArgumentException("copyBufferSize is invalid: " + copyBufferSizeStr, e); } } return builder.build(); }
@Test public void testParseWorkPath() { DistCpOptions options = OptionsParser.parse(new String[] { "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); Assert.assertNull(options.getAtomicWorkPath()); options = OptionsParser.parse(new String[] { "-atomic", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); Assert.assertNull(options.getAtomicWorkPath()); options = OptionsParser.parse(new String[] { "-atomic", "-tmp", "hdfs://localhost:8020/work", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getAtomicWorkPath(), new Path("hdfs://localhost:8020/work")); try { OptionsParser.parse(new String[] { "-tmp", "hdfs://localhost:8020/work", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); Assert.fail("work path was allowed without -atomic switch"); } catch (IllegalArgumentException ignore) {} }
static String lookupKafkaClusterId(WorkerConfig config) { log.info("Creating Kafka admin client"); try (Admin adminClient = Admin.create(config.originals())) { return lookupKafkaClusterId(adminClient); } }
@Test public void testLookupKafkaClusterIdTimeout() { final Node broker1 = new Node(0, "dummyHost-1", 1234); final Node broker2 = new Node(1, "dummyHost-2", 1234); List<Node> cluster = Arrays.asList(broker1, broker2); MockAdminClient adminClient = new MockAdminClient.Builder(). brokers(cluster).build(); adminClient.timeoutNextRequest(1); assertThrows(ConnectException.class, () -> WorkerConfig.lookupKafkaClusterId(adminClient)); }
@Override public void execute(Exchange exchange) throws SmppException { SubmitMulti[] submitMulties = createSubmitMulti(exchange); List<SubmitMultiResult> results = new ArrayList<>(submitMulties.length); for (SubmitMulti submitMulti : submitMulties) { SubmitMultiResult result; if (log.isDebugEnabled()) { log.debug("Sending multiple short messages for exchange id '{}'...", exchange.getExchangeId()); } try { result = session.submitMultiple( submitMulti.getServiceType(), TypeOfNumber.valueOf(submitMulti.getSourceAddrTon()), NumberingPlanIndicator.valueOf(submitMulti.getSourceAddrNpi()), submitMulti.getSourceAddr(), (Address[]) submitMulti.getDestAddresses(), new ESMClass(submitMulti.getEsmClass()), submitMulti.getProtocolId(), submitMulti.getPriorityFlag(), submitMulti.getScheduleDeliveryTime(), submitMulti.getValidityPeriod(), new RegisteredDelivery(submitMulti.getRegisteredDelivery()), new ReplaceIfPresentFlag(submitMulti.getReplaceIfPresentFlag()), DataCodings.newInstance(submitMulti.getDataCoding()), submitMulti.getSmDefaultMsgId(), submitMulti.getShortMessage(), submitMulti.getOptionalParameters()); results.add(result); } catch (Exception e) { throw new SmppException(e); } } if (log.isDebugEnabled()) { log.debug("Sent multiple short messages for exchange id '{}' and received results '{}'", exchange.getExchangeId(), results); } List<String> messageIDs = new ArrayList<>(results.size()); // {messageID : [{destAddr : address, error : errorCode}]} Map<String, List<Map<String, Object>>> errors = new HashMap<>(); for (SubmitMultiResult result : results) { UnsuccessDelivery[] deliveries = result.getUnsuccessDeliveries(); if (deliveries != null) { List<Map<String, Object>> undelivered = new ArrayList<>(); for (UnsuccessDelivery delivery : deliveries) { Map<String, Object> error = new HashMap<>(); error.put(SmppConstants.DEST_ADDR, delivery.getDestinationAddress().getAddress()); error.put(SmppConstants.ERROR, delivery.getErrorStatusCode()); undelivered.add(error); } if (!undelivered.isEmpty()) { errors.put(result.getMessageId(), undelivered); } } messageIDs.add(result.getMessageId()); } Message message = ExchangeHelper.getResultMessage(exchange); message.setHeader(SmppConstants.ID, messageIDs); message.setHeader(SmppConstants.SENT_MESSAGE_COUNT, messageIDs.size()); if (!errors.isEmpty()) { message.setHeader(SmppConstants.ERROR, errors); } }
@Test public void bodyWithGSM8bitDataCodingNotModified() throws Exception { final byte dataCoding = (byte) 0xF7; /* GSM 8-bit class 3 */ byte[] body = { (byte) 0xFF, 'A', 'B', (byte) 0x00, (byte) 0xFF, (byte) 0x7F, 'C', (byte) 0xFF }; Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut); exchange.getIn().setHeader(SmppConstants.COMMAND, "SubmitMulti"); exchange.getIn().setHeader(SmppConstants.DATA_CODING, dataCoding); exchange.getIn().setBody(body); Address[] destAddrs = new Address[] { new Address( TypeOfNumber.UNKNOWN, NumberingPlanIndicator.UNKNOWN, "1717") }; when(session.submitMultiple(eq("CMT"), eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1616"), eq(destAddrs), eq(new ESMClass()), eq((byte) 0), eq((byte) 1), (String) isNull(), (String) isNull(), eq(new RegisteredDelivery(SMSCDeliveryReceipt.SUCCESS_FAILURE)), eq(ReplaceIfPresentFlag.DEFAULT), eq(DataCodings.newInstance(dataCoding)), eq((byte) 0), eq(body))) .thenReturn(new SubmitMultiResult("1", null, null)); command.execute(exchange); assertEquals(Collections.singletonList("1"), exchange.getMessage().getHeader(SmppConstants.ID)); }
@Operation(summary = "queryProjectByCode", description = "QUERY_PROJECT_BY_ID_NOTES") @Parameters({ @Parameter(name = "code", description = "PROJECT_CODE", schema = @Schema(implementation = long.class, example = "123456", required = true)) }) @GetMapping(value = "/{code}") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_PROJECT_DETAILS_BY_CODE_ERROR) public ProjectQueryResponse queryProjectByCode(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @PathVariable("code") long code) { Result result = projectService.queryByCode(loginUser, code); return new ProjectQueryResponse(result); }
@Test public void testQueryProjectByCode() { Result result = new Result(); putMsg(result, Status.SUCCESS); long projectCode = 1L; Mockito.when(projectMapper.queryByCode(projectCode)).thenReturn(getProject()); Mockito.when(projectService.queryByCode(user, projectCode)).thenReturn(result); ProjectQueryResponse response = projectV2Controller.queryProjectByCode(user, projectCode); Assertions.assertEquals(Status.SUCCESS.getCode(), response.getCode().intValue()); }
public T getRecordingProxy() { return _templateProxy; }
@Test(expectedExceptions = IllegalArgumentException.class) public void testPropagateIntoUnsupportedSimpleType() { PatchTreeRecorder<PatchTreeTestModel> pc = makeOne(); pc.getRecordingProxy().getFooRequired(); }
@Override public ObjectNode encode(FlowRule flowRule, CodecContext context) { checkNotNull(flowRule, "Flow rule cannot be null"); CoreService service = context.getService(CoreService.class); ApplicationId appId = service.getAppId(flowRule.appId()); String strAppId = (appId == null) ? "<none>" : appId.name(); final ObjectNode result = context.mapper().createObjectNode() .put(ID, Long.toString(flowRule.id().value())) .put(APP_ID, strAppId) .put(PRIORITY, flowRule.priority()) .put(TIMEOUT, flowRule.timeout()) .put(IS_PERMANENT, flowRule.isPermanent()) .put(DEVICE_ID, flowRule.deviceId().toString()) .put(TABLE_ID, flowRule.tableId()) .put(TABLE_NAME, flowRule.table().toString()); if (flowRule.treatment() != null) { final JsonCodec<TrafficTreatment> treatmentCodec = context.codec(TrafficTreatment.class); result.set(TREATMENT, treatmentCodec.encode(flowRule.treatment(), context)); } if (flowRule.selector() != null) { final JsonCodec<TrafficSelector> selectorCodec = context.codec(TrafficSelector.class); result.set(SELECTOR, selectorCodec.encode(flowRule.selector(), context)); } return result; }
@Test public void testFlowRuleEncode() { DeviceId deviceId = DeviceId.deviceId("of:000000000000000a"); Instruction output = Instructions.createOutput(PortNumber.portNumber(0)); Instruction modL2Src = Instructions.modL2Src(MacAddress.valueOf("11:22:33:44:55:66")); Instruction modL2Dst = Instructions.modL2Dst(MacAddress.valueOf("44:55:66:77:88:99")); TrafficTreatment.Builder tBuilder = DefaultTrafficTreatment.builder(); TrafficTreatment treatment = tBuilder .add(output) .add(modL2Src) .add(modL2Dst) .build(); Criterion inPort = Criteria.matchInPort(PortNumber.portNumber(0)); Criterion ethSrc = Criteria.matchEthSrc(MacAddress.valueOf("11:22:33:44:55:66")); Criterion ethDst = Criteria.matchEthDst(MacAddress.valueOf("44:55:66:77:88:99")); Criterion ethType = Criteria.matchEthType(Ethernet.TYPE_IPV4); TrafficSelector.Builder sBuilder = DefaultTrafficSelector.builder(); TrafficSelector selector = sBuilder .add(inPort) .add(ethSrc) .add(ethDst) .add(ethType) .build(); FlowRule permFlowRule = DefaultFlowRule.builder() .withCookie(1) .forTable(1) .withPriority(1) .makePermanent() .withTreatment(treatment) .withSelector(selector) .forDevice(deviceId).build(); FlowRule tempFlowRule = DefaultFlowRule.builder() .withCookie(1) .forTable(1) .withPriority(1) .makeTemporary(1000) .withTreatment(treatment) .withSelector(selector) .forDevice(deviceId).build(); ObjectNode permFlowRuleJson = flowRuleCodec.encode(permFlowRule, context); ObjectNode tempFlowRuleJson = flowRuleCodec.encode(tempFlowRule, context); assertThat(permFlowRuleJson, FlowRuleJsonMatcher.matchesFlowRule(permFlowRule)); assertThat(tempFlowRuleJson, FlowRuleJsonMatcher.matchesFlowRule(tempFlowRule)); }
@Subscribe public void onChatMessage(ChatMessage e) { if (e.getType() != ChatMessageType.GAMEMESSAGE && e.getType() != ChatMessageType.SPAM) { return; } CompostState compostUsed = determineCompostUsed(e.getMessage()); if (compostUsed == null) { return; } this.expirePendingActions(); pendingCompostActions.values() .stream() .filter(this::playerIsBesidePatch) .findFirst() .ifPresent(pc -> { setCompostState(pc.getFarmingPatch(), compostUsed); pendingCompostActions.remove(pc.getFarmingPatch()); }); }
@Test public void onChatMessage_handlesFertileSoilMessages() { ChatMessage chatEvent = mock(ChatMessage.class); when(chatEvent.getType()).thenReturn(ChatMessageType.SPAM); when(chatEvent.getMessage()).thenReturn("The allotment has been treated with supercompost."); compostTracker.pendingCompostActions.put(farmingPatch, new CompostTracker.PendingCompost(Instant.MAX, worldPoint, farmingPatch)); compostTracker.onChatMessage(chatEvent); verify(configManager).setRSProfileConfiguration("timetracking", "MOCK.compost", CompostState.SUPERCOMPOST); }
@Override public PollResult poll(long currentTimeMs) { return pollInternal( prepareFetchRequests(), this::handleFetchSuccess, this::handleFetchFailure ); }
@Test public void testFetchForgetTopicIdWhenUnassigned() { buildFetcher(); TopicIdPartition foo = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition bar = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 0)); // Assign foo and bar. subscriptions.assignFromUser(singleton(foo.topicPartition())); client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(foo), tp -> validLeaderEpoch)); subscriptions.seek(foo.topicPartition(), 0); assertEquals(1, sendFetches()); // Fetch should use latest version. client.prepareResponse( fetchRequestMatcher(ApiKeys.FETCH.latestVersion(), singletonMap(foo, new PartitionData( foo.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch)) ), emptyList() ), fullFetchResponse(1, foo, records, Errors.NONE, 100L, 0) ); networkClientDelegate.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); fetchRecords(); // Assign bar and un-assign foo. subscriptions.assignFromUser(singleton(bar.topicPartition())); client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(bar), tp -> validLeaderEpoch)); subscriptions.seek(bar.topicPartition(), 0); // Fetch should use latest version. assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); client.prepareResponse( fetchRequestMatcher(ApiKeys.FETCH.latestVersion(), singletonMap(bar, new PartitionData( bar.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch)) ), singletonList(foo) ), fullFetchResponse(1, bar, records, Errors.NONE, 100L, 0) ); networkClientDelegate.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); fetchRecords(); }
public void isIn(@Nullable Iterable<?> iterable) { checkNotNull(iterable); if (!contains(iterable, actual)) { failWithActual("expected any of", iterable); } }
@Test public void isIn() { assertThat("b").isIn(oneShotIterable("a", "b", "c")); }
public Map<String, String> requestPuk(PukRequest request) throws PukRequestException, SoapValidationException { final PenRequestStatus result = repository.findFirstByBsnAndDocTypeAndSequenceNoOrderByRequestDatetimeDesc(request.getBsn(), request.getDocType(), request.getSequenceNo()); checkExpirationDatePen(result); final Map<String, String> response = new HashMap<>(); final OpvragenPUKCodeEIDRequest rdwRequest = new OpvragenPUKCodeEIDRequest(); rdwRequest.setEIDSTATINFO(eidstatinfoBuilder(request.getBsn(), request.getSequenceNo())); final OpvragenPUKCodeEIDResponse rdwResponse = rdwClient.pukRequest(rdwRequest); final List<EIDSTATUSGEG> eidstatusgegs = rdwResponse.getEIDSTATINFO().getEIDSTATUSTAB().getEIDSTATUSGEG(); if (eidstatusgegs.size() == 1 && eidstatusgegs.get(0).getEIDVOLGNR().equals(rdwRequest.getEIDSTATINFO().getEIDSTATAGEG().getEIDVOLGNRA()) && eidstatusgegs.get(0).getCRYPTRDEPUK() != null) { final String base64Puk = Base64.encodeBase64String(eidstatusgegs.get(0).getCRYPTRDEPUK()); response.put("status", "OK"); response.put("vpuk", base64Puk); } else { final String errormessage = "multiple EIDSTATUSGEG entries in rdw response or sequenceNo does not match"; logger.error(errormessage); throw new PukRequestException("DWS8", errormessage); } return response; }
@Test public void rdwResponseContainsWrongSequenceNoThrowsError() throws PukRequestException, SoapValidationException { // set valid date of penrequest in repo status.setPinResetValidDate(LocalDateTime.of(2019, 1, 2, 12, 33)); OpvragenPUKCodeEIDResponse rdwResponse = buildRdwResponse("SSSSSSSSSSSS"); Mockito.when(mockRdwClient.pukRequest(Mockito.any(OpvragenPUKCodeEIDRequest.class))).thenReturn(rdwResponse); Exception exception = assertThrows(PukRequestException.class, () -> { service.requestPuk(request); }); assertEquals("DWS8", exception.getMessage()); }
public XmlStreamInfo information() throws IOException { if (information.problem != null) { return information; } if (XMLStreamConstants.START_DOCUMENT != reader.getEventType()) { information.problem = new IllegalStateException("Expected START_DOCUMENT"); return information; } boolean skipComments = false; try { while (reader.hasNext()) { int ev = reader.next(); switch (ev) { case XMLStreamConstants.COMMENT: if (!skipComments) { // search for modelines String comment = reader.getText(); if (comment != null) { comment.lines().map(String::trim).forEach(l -> { if (l.startsWith("camel-k:")) { information.modelines.add(l); } }); } } break; case XMLStreamConstants.START_ELEMENT: if (information.rootElementName != null) { // only root element is checked. No need to parse more return information; } skipComments = true; information.rootElementName = reader.getLocalName(); information.rootElementNamespace = reader.getNamespaceURI(); for (int ns = 0; ns < reader.getNamespaceCount(); ns++) { String prefix = reader.getNamespacePrefix(ns); information.namespaceMapping.put(prefix == null ? "" : prefix, reader.getNamespaceURI(ns)); } for (int at = 0; at < reader.getAttributeCount(); at++) { QName qn = reader.getAttributeName(at); String prefix = qn.getPrefix() == null ? "" : qn.getPrefix().trim(); String nsURI = qn.getNamespaceURI() == null ? "" : qn.getNamespaceURI().trim(); String value = reader.getAttributeValue(at); String localPart = qn.getLocalPart(); if (nsURI.isEmpty() || prefix.isEmpty()) { // according to XML spec, this attribut is not namespaced, not in default namespace // https://www.w3.org/TR/xml-names/#defaulting // > The namespace name for an unprefixed attribute name always has no value. information.attributes.put(localPart, value); } else { information.attributes.put("{" + nsURI + "}" + localPart, value); information.attributes.put(prefix + ":" + localPart, value); } } break; case XMLStreamConstants.END_ELEMENT: case XMLStreamConstants.END_DOCUMENT: if (information.rootElementName == null) { information.problem = new IllegalArgumentException("XML Stream is empty"); return information; } break; default: break; } } } catch (XMLStreamException e) { information.problem = e; return information; } return information; }
@Test public void documentWithModeline() throws IOException { String xml = readAllFromFile("documentWithModeline.xml"); XmlStreamDetector detector = new XmlStreamDetector(new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8))); XmlStreamInfo info = detector.information(); assertTrue(info.isValid()); assertEquals("routes", info.getRootElementName()); assertEquals("http://camel.apache.org/schema/spring", info.getRootElementNamespace()); assertEquals(0, info.getAttributes().size()); assertEquals(1, info.getNamespaces().size()); assertEquals("http://camel.apache.org/schema/spring", info.getNamespaces().get("")); assertEquals(3, info.getModelines().size()); assertEquals("camel-k: dependency=mvn:com.i-heart-camel:best-routes-ever:1.0.0", info.getModelines().get(0)); assertEquals("camel-k: env=HELLO=world", info.getModelines().get(1)); assertEquals("camel-k: name=MyApplication", info.getModelines().get(2)); }
static String htmlizeFull(final String javaSource) { final String result = htmlize(javaSource); final String start = "<html><body><style>" + "code { font-size: 12px; } " + "code .string { color: blue; } " + "code .comment { font-style: italic; color: green; } " + "code .keyword { font-weight: bold; color: purple; } " + "code .comment .keyword { color: green; font-weight: normal; } " + "code .comment .string { color: green; } " + "</style><code>"; final String end = "</code></body></html>"; return start + result + end; }
@Test public void test() { final String javaSource = "public class Test { static final String TEST = \"test\"; }"; final String html = JavaHTMLizer.htmlizeFull(javaSource); assertTrue("htmlizeFull", html != null && !html.isEmpty()); }
@Override public void open() throws Exception { this.timerService = getInternalTimerService("processing timer", VoidNamespaceSerializer.INSTANCE, this); this.keySet = new HashSet<>(); super.open(); }
@Test void testProcessRecord() throws Exception { KeyedTwoInputNonBroadcastProcessOperator<Long, Integer, Long, Long> processOperator = new KeyedTwoInputNonBroadcastProcessOperator<>( new TwoInputNonBroadcastStreamProcessFunction<Integer, Long, Long>() { @Override public void processRecordFromFirstInput( Integer record, Collector<Long> output, PartitionedContext ctx) { output.collect(Long.valueOf(record)); } @Override public void processRecordFromSecondInput( Long record, Collector<Long> output, PartitionedContext ctx) { output.collect(record); } }); try (KeyedTwoInputStreamOperatorTestHarness<Long, Integer, Long, Long> testHarness = new KeyedTwoInputStreamOperatorTestHarness<>( processOperator, (KeySelector<Integer, Long>) (data) -> (long) (data + 1), (KeySelector<Long, Long>) value -> value + 1, Types.LONG)) { testHarness.open(); testHarness.processElement1(new StreamRecord<>(1)); testHarness.processElement2(new StreamRecord<>(2L)); testHarness.processElement2(new StreamRecord<>(4L)); testHarness.processElement1(new StreamRecord<>(3)); Collection<StreamRecord<Long>> recordOutput = testHarness.getRecordOutput(); assertThat(recordOutput) .containsExactly( new StreamRecord<>(1L), new StreamRecord<>(2L), new StreamRecord<>(4L), new StreamRecord<>(3L)); } }
@Override public ReleaseId getParentReleaseId() { return parentReleaseId; }
@Test public void getParentReleaseId() { final MavenSession mavenSession = mockMavenSession(true); final ProjectPomModel pomModel = new ProjectPomModel(mavenSession); final ReleaseId releaseId = pomModel.getParentReleaseId(); assertReleaseId(releaseId, PARENT_ARTIFACT_ID); }
@Override public byte getStoragePolicyID() { byte id = getLocalStoragePolicyID(); if (id == BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { id = this.getParent() != null ? this.getParent().getStoragePolicyID() : id; } // For Striped EC files, we support only suitable policies. Current // supported policies are HOT, COLD, ALL_SSD. // If the file was set with any other policies, then we just treat policy as // BLOCK_STORAGE_POLICY_ID_UNSPECIFIED. if (isStriped() && id != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED && !ErasureCodingPolicyManager .checkStoragePolicySuitableForECStripedMode(id)) { id = HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; if (LOG.isDebugEnabled()) { LOG.debug("The current effective storage policy id : " + id + " is not suitable for striped mode EC file : " + getName() + ". So, just returning unspecified storage policy id"); } } return id; }
@Test public void testStoragePolicyID () { for(byte i = 0; i < 16; i++) { final INodeFile f = createINodeFile(i); assertEquals(i, f.getStoragePolicyID()); } }
public void singleTaskCounter() throws IOException{ try { requireTask(); } catch (Exception e) { renderText(e.getMessage()); return; } set(COUNTER_GROUP, URLDecoder.decode($(COUNTER_GROUP), "UTF-8")); set(COUNTER_NAME, URLDecoder.decode($(COUNTER_NAME), "UTF-8")); if (app.getTask() != null) { setTitle(StringHelper.join($(COUNTER_GROUP)," ",$(COUNTER_NAME), " for ", $(TASK_ID))); } render(singleCounterPage()); }
@Test public void testGetSingleTaskCounter() throws IOException { appController.singleTaskCounter(); assertEquals(SingleCounterPage.class, appController.getClazz()); assertNotNull(appController.getProperty().get(AppController.COUNTER_GROUP)); assertNotNull(appController.getProperty().get(AppController.COUNTER_NAME)); }
private void merge(ContentNodeStats stats, int factor) { for (Map.Entry<String, BucketSpaceStats> entry : stats.bucketSpaces.entrySet()) { BucketSpaceStats statsToUpdate = bucketSpaces.get(entry.getKey()); if (statsToUpdate == null && factor == 1) { statsToUpdate = BucketSpaceStats.empty(); bucketSpaces.put(entry.getKey(), statsToUpdate); } if (statsToUpdate != null) { statsToUpdate.merge(entry.getValue(), factor); } } }
@Test void bucket_space_stats_can_transition_from_valid_to_invalid() { BucketSpaceStats stats = BucketSpaceStats.of(5, 1); assertTrue(stats.valid()); stats.merge(BucketSpaceStats.invalid(), 1); assertFalse(stats.valid()); assertEquals(BucketSpaceStats.invalid(5, 1), stats); }
public Class<T> getConfigurationClass() { return Generics.getTypeParameter(getClass(), Configuration.class); }
@Test void canDetermineWrappedConfiguration() throws Exception { final PoserApplication application = new PoserApplication(); assertThat(new WrapperApplication<>(application).getConfigurationClass()) .isSameAs(FakeConfiguration.class); }
public static URI parse(String gluePath) { requireNonNull(gluePath, "gluePath may not be null"); if (gluePath.isEmpty()) { return rootPackageUri(); } // Legacy from the Cucumber Eclipse plugin // Older versions of Cucumber allowed it. if (CLASSPATH_SCHEME_PREFIX.equals(gluePath)) { return rootPackageUri(); } if (nonStandardPathSeparatorInUse(gluePath)) { String standardized = replaceNonStandardPathSeparator(gluePath); return parseAssumeClasspathScheme(standardized); } if (isProbablyPackage(gluePath)) { String path = resourceNameOfPackageName(gluePath); return parseAssumeClasspathScheme(path); } return parseAssumeClasspathScheme(gluePath); }
@Test @EnabledOnOs(OS.WINDOWS) void absolute_windows_path_form_is_not_valid() { Executable testMethod = () -> GluePath.parse("C:\\com\\example\\app"); IllegalArgumentException actualThrown = assertThrows(IllegalArgumentException.class, testMethod); assertThat("Unexpected exception message", actualThrown.getMessage(), is(equalTo( "The glue path must have a classpath scheme C:/com/example/app"))); }
@Override public void define(WebService.NewController controller) { WebService.NewAction action = controller .createAction("sonarlint_events") .setInternal(true) .setDescription("Endpoint for listening to server side events. Currently it notifies listener about change to activation of a rule") .setSince("9.4") .setContentType(Response.ContentType.NO_CONTENT) .setHandler(this); action .createParam(PROJECT_PARAM_KEY) .setDescription("Comma-separated list of projects keys for which events will be delivered") .setRequired(true) .setExampleValue("example-project-key,example-project-key2"); action .createParam(LANGUAGE_PARAM_KEY) .setDescription("Comma-separated list of languages for which events will be delivered") .setRequired(true) .setExampleValue("java,cobol"); }
@Test public void defineTest() { WebService.Action def = ws.getDef(); assertThat(def.since()).isEqualTo("9.4"); assertThat(def.isInternal()).isTrue(); assertThat(def.params()) .extracting(WebService.Param::key, WebService.Param::isRequired) .containsExactlyInAnyOrder(tuple("languages", true), tuple("projectKeys", true)); }
@Udf(schema = "ARRAY<STRUCT<K STRING, V BOOLEAN>>") public List<Struct> entriesBoolean( @UdfParameter(description = "The map to create entries from") final Map<String, Boolean> map, @UdfParameter(description = "If true then the resulting entries are sorted by key") final boolean sorted ) { return entries(map, BOOLEAN_STRUCT_SCHEMA, sorted); }
@Test public void shouldComputeBooleanEntriesSorted() { final Map<String, Boolean> map = createMap(i -> i % 2 == 0); shouldComputeEntriesSorted(map, () -> entriesUdf.entriesBoolean(map, true)); }
public List<TokenSplit> getSplits(String keyspace, String table, Optional<Long> sessionSplitsPerNode) { Set<TokenRange> tokenRanges = session.getTokenRanges(); if (tokenRanges.isEmpty()) { throw new PrestoException(CASSANDRA_METADATA_ERROR, "The cluster metadata is not available. " + "Please make sure that the Cassandra cluster is up and running, " + "and that the contact points are specified correctly."); } if (tokenRanges.stream().anyMatch(TokenRange::isWrappedAround)) { tokenRanges = unwrap(tokenRanges); } Optional<TokenRing> tokenRing = createForPartitioner(session.getPartitioner()); long totalPartitionsCount = getTotalPartitionsCount(keyspace, table, sessionSplitsPerNode); List<TokenSplit> splits = new ArrayList<>(); for (TokenRange tokenRange : tokenRanges) { if (tokenRange.isEmpty()) { continue; } checkState(!tokenRange.isWrappedAround(), "all token ranges must be unwrapped at this step"); List<String> endpoints = getEndpoints(keyspace, tokenRange); checkState(!endpoints.isEmpty(), "endpoints is empty for token range: %s", tokenRange); if (!tokenRing.isPresent()) { checkState(!tokenRange.isWrappedAround(), "all token ranges must be unwrapped at this step"); splits.add(createSplit(tokenRange, endpoints)); continue; } double tokenRangeRingFraction = tokenRing.get().getRingFraction(tokenRange.getStart().toString(), tokenRange.getEnd().toString()); long partitionsCountEstimate = round(totalPartitionsCount * tokenRangeRingFraction); checkState(partitionsCountEstimate >= 0, "unexpected partitions count estimate: %d", partitionsCountEstimate); int subSplitCount = max(toIntExact(partitionsCountEstimate / splitSize), 1); List<TokenRange> subRanges = tokenRange.splitEvenly(subSplitCount); for (TokenRange subRange : subRanges) { if (subRange.isEmpty()) { continue; } checkState(!subRange.isWrappedAround(), "all token ranges must be unwrapped at this step"); splits.add(createSplit(subRange, endpoints)); } } shuffle(splits, ThreadLocalRandom.current()); return unmodifiableList(splits); }
@Test public void testEmptyTable() throws Exception { String tableName = "empty_table"; session.execute(format("CREATE TABLE %s.%s (key text PRIMARY KEY)", KEYSPACE, tableName)); EmbeddedCassandra.refreshSizeEstimates(KEYSPACE, tableName); List<TokenSplit> splits = splitManager.getSplits(KEYSPACE, tableName, Optional.empty()); // even for the empty table at least one split must be produced, in case the statistics are inaccurate assertEquals(splits.size(), 1); session.execute(format("DROP TABLE %s.%s", KEYSPACE, tableName)); }
@Override public PTransformReplacement<PCollectionList<T>, PCollection<T>> getReplacementTransform( AppliedPTransform<PCollectionList<T>, PCollection<T>, PCollections<T>> transform) { checkArgument( transform.getInputs().isEmpty(), "Unexpected nonempty input %s for %s", transform.getInputs(), getClass().getSimpleName()); return PTransformReplacement.of( PCollectionList.empty(transform.getPipeline()), new CreateEmptyFromList<T>()); }
@Test public void getInputNonEmptyThrows() { PCollectionList<Long> nonEmpty = PCollectionList.of(pipeline.apply("unbounded", GenerateSequence.from(0))) .and(pipeline.apply("bounded", GenerateSequence.from(0).to(100))); thrown.expect(IllegalArgumentException.class); thrown.expectMessage(nonEmpty.expand().toString()); thrown.expectMessage(EmptyFlattenAsCreateFactory.class.getSimpleName()); factory.getReplacementTransform( AppliedPTransform.of( "nonEmptyInput", PValues.expandInput(nonEmpty), Collections.emptyMap(), Flatten.pCollections(), ResourceHints.create(), pipeline)); }