focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public ProcessingNodesState calculateProcessingState(TimeRange timeRange) { final DateTime updateThresholdTimestamp = clock.nowUTC().minus(updateThreshold.toMilliseconds()); try (DBCursor<ProcessingStatusDto> statusCursor = db.find(activeNodes(updateThresholdTimestamp))) { if (!statusCursor.hasNext()) { return ProcessingNodesState.NONE_ACTIVE; } int activeNodes = 0; int idleNodes = 0; while (statusCursor.hasNext()) { activeNodes++; ProcessingStatusDto nodeProcessingStatus = statusCursor.next(); DateTime lastIndexedMessage = nodeProcessingStatus.receiveTimes().postIndexing(); // If node is behind and is busy, it is overloaded. if (lastIndexedMessage.isBefore(timeRange.getTo()) && isBusy(nodeProcessingStatus)) { return ProcessingNodesState.SOME_OVERLOADED; } // If a node did not index a message that is at least at the start of the time range, // we consider it idle. if (lastIndexedMessage.isBefore(timeRange.getFrom())) { idleNodes++; } } // Only if all nodes are idle, we stop the processing. if (activeNodes == idleNodes) { return ProcessingNodesState.ALL_IDLE; } } // If none of the above checks return, we can assume that some nodes have already indexed the given timerange. return ProcessingNodesState.SOME_UP_TO_DATE; }
@Test @MongoDBFixtures("processing-status-not-updated-nodes.json") public void processingStateNoActiveNodesBecauseNoNodesAreActive() { when(clock.nowUTC()).thenReturn(DateTime.parse("2019-01-01T04:00:00.000Z")); when(updateThreshold.toMilliseconds()).thenReturn(Duration.hours(1).toMilliseconds()); TimeRange timeRange = AbsoluteRange.create("2019-01-01T00:00:00.000Z", "2019-01-01T00:00:30.000Z"); assertThat(dbService.calculateProcessingState(timeRange)).isEqualTo(ProcessingNodesState.NONE_ACTIVE); }
@Override protected Map<String, Object> executeScenario(KieContainer kieContainer, ScenarioRunnerData scenarioRunnerData, ExpressionEvaluatorFactory expressionEvaluatorFactory, ScesimModelDescriptor scesimModelDescriptor, Settings settings) { if (!ScenarioSimulationModel.Type.DMN.equals(settings.getType())) { throw new ScenarioException("Impossible to run a not-DMN simulation with DMN runner"); } DMNScenarioExecutableBuilder executableBuilder = createBuilderWrapper(kieContainer); executableBuilder.setActiveModel(settings.getDmnFilePath()); defineInputValues(scenarioRunnerData.getBackgrounds(), scenarioRunnerData.getGivens()).forEach(executableBuilder::setValue); return executableBuilder.run().getOutputs(); }
@Test public void validateUnSecureImportPrefix() { String injectedPrefix = "/.(a+)+$/"; FactIdentifier importedPersonFactIdentifier = FactIdentifier.create(injectedPrefix + ".Person", injectedPrefix + ".Person", injectedPrefix); ScenarioRunnerData scenarioRunnerData = new ScenarioRunnerData(); AbstractMap.SimpleEntry<String, Object> givenImportedPersonFactData = new AbstractMap.SimpleEntry<>("surname", "White"); AbstractMap.SimpleEntry<String, Object> givenImportedPersonFactData2 = new AbstractMap.SimpleEntry<>("age", 67); scenarioRunnerData.addGiven(new InstanceGiven(importedPersonFactIdentifier, Map.ofEntries(givenImportedPersonFactData, givenImportedPersonFactData2))); List<String> expectedInputDataToLoad = List.of(injectedPrefix); int inputObjects = expectedInputDataToLoad.size(); runnerHelper.executeScenario(kieContainerMock, scenarioRunnerData, expressionEvaluatorFactory, simulation.getScesimModelDescriptor(), settings); verify(dmnScenarioExecutableBuilderMock, times(inputObjects)).setValue(keyCaptor.capture(), valueCaptor.capture()); assertThat(keyCaptor.getAllValues()).containsAll(expectedInputDataToLoad); String key = keyCaptor.getAllValues().get(0); Map<String, Object> value = (Map<String, Object>) valueCaptor.getAllValues().get(0); assertThat(key).isEqualTo(injectedPrefix); Map<String, Object> subValuePerson = (Map<String, Object>) value.get("Person"); assertThat(subValuePerson).hasSize(2).contains(givenImportedPersonFactData, givenImportedPersonFactData2); }
public static UnicastMappingInstruction unicastPriority(int priority) { return new UnicastMappingInstruction.PriorityMappingInstruction( UnicastType.PRIORITY, priority); }
@Test public void testUnicastPriorityMethod() { final MappingInstruction instruction = MappingInstructions.unicastPriority(2); final UnicastMappingInstruction.PriorityMappingInstruction priorityMappingInstruction = checkAndConvert(instruction, UnicastMappingInstruction.Type.UNICAST, UnicastMappingInstruction.PriorityMappingInstruction.class); assertThat(priorityMappingInstruction.priority(), is(equalTo(2))); }
public HttpResult getBinary(String url) throws IOException, NotModifiedException { return getBinary(url, null, null); }
@Test void ignoreInvalidSsl() throws Exception { this.mockServerClient.when(HttpRequest.request().withMethod("GET")).respond(HttpResponse.response().withBody("ok")); HttpResult result = getter.getBinary("https://localhost:" + this.mockServerClient.getPort()); Assertions.assertEquals("ok", new String(result.getContent())); }
@Override public GetSubClustersResponse getFederationSubClusters(GetSubClustersRequest request) throws YarnException, IOException { // Parameter validation. if (request == null) { routerMetrics.incrGetFederationSubClustersFailedRetrieved(); RouterServerUtil.logAndThrowException( "Missing getFederationSubClusters Request.", null); } // Step1. Get all subClusters of the cluster. Map<SubClusterId, SubClusterInfo> subClusters = federationFacade.getSubClusters(false); // Step2. Get FederationSubCluster data. List<FederationSubCluster> federationSubClusters = new ArrayList<>(); long startTime = clock.getTime(); for (Map.Entry<SubClusterId, SubClusterInfo> subCluster : subClusters.entrySet()) { SubClusterId subClusterId = subCluster.getKey(); try { SubClusterInfo subClusterInfo = subCluster.getValue(); long lastHeartBeat = subClusterInfo.getLastHeartBeat(); Date lastHeartBeatDate = new Date(lastHeartBeat); FederationSubCluster federationSubCluster = FederationSubCluster.newInstance( subClusterId.getId(), subClusterInfo.getState().name(), lastHeartBeatDate.toString()); federationSubClusters.add(federationSubCluster); } catch (Exception e) { routerMetrics.incrGetFederationSubClustersFailedRetrieved(); LOG.error("getSubClusters SubClusterId = [%s] error.", subClusterId, e); } } long stopTime = clock.getTime(); routerMetrics.succeededGetFederationSubClustersRetrieved(stopTime - startTime); // Step3. Return results. return GetSubClustersResponse.newInstance(federationSubClusters); }
@Test public void testGetFederationSubClusters() throws Exception { LambdaTestUtils.intercept(YarnException.class, "Missing getFederationSubClusters Request.", () -> interceptor.getFederationSubClusters(null)); GetSubClustersRequest request = GetSubClustersRequest.newInstance(); GetSubClustersResponse federationSubClusters = interceptor.getFederationSubClusters(request); assertNotNull(federationSubClusters); List<FederationSubCluster> federationSubClustersList = federationSubClusters.getFederationSubClusters(); assertNotNull(federationSubClustersList); assertEquals(4, federationSubClustersList.size()); }
@Override public CEFParserResult evaluate(FunctionArgs args, EvaluationContext context) { final String cef = valueParam.required(args, context); final boolean useFullNames = useFullNamesParam.optional(args, context).orElse(false); final CEFParser parser = CEFParserFactory.create(); if (cef == null || cef.isEmpty()) { LOG.debug("NULL or empty parameter passed to CEF parser function. Not evaluating."); return null; } LOG.debug("Running CEF parser for [{}].", cef); final MappedMessage message; try (Timer.Context timer = parseTime.time()) { message = new MappedMessage(parser.parse(cef.trim()), useFullNames); } catch (Exception e) { LOG.error("Error while parsing CEF message: {}", cef, e); return null; } final Map<String, Object> fields = new HashMap<>(); /* * Add all CEF standard fields. We are prefixing with cef_ to avoid overwriting existing fields or to be * overwritten ourselves later in the processing. The user is encouraged to run another pipeline function * to clean up field names if desired. */ fields.put("cef_version", message.cefVersion()); fields.put("device_vendor", message.deviceVendor()); fields.put("device_product", message.deviceProduct()); fields.put("device_version", message.deviceVersion()); fields.put("device_event_class_id", message.deviceEventClassId()); fields.put("name", message.name()); fields.put("severity", message.severity()); // Add all custom CEF fields. fields.putAll(message.mappedExtensions()); return new CEFParserResult(fields); }
@Test public void evaluate_returns_null_for_invalid_CEF_string() throws Exception { final Map<String, Expression> arguments = ImmutableMap.of( CEFParserFunction.VALUE, new StringExpression(new CommonToken(0), "CEF:0|Foobar"), CEFParserFunction.USE_FULL_NAMES, new BooleanExpression(new CommonToken(0), false) ); final FunctionArgs functionArgs = new FunctionArgs(function, arguments); final Message message = messageFactory.createMessage("__dummy", "__dummy", DateTime.parse("2010-07-30T16:03:25Z")); final EvaluationContext evaluationContext = new EvaluationContext(message); final CEFParserResult result = function.evaluate(functionArgs, evaluationContext); assertNull(result); }
@Override public Path copy(final Path file, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException { try { final EueApiClient client = new EueApiClient(session); if(status.isExists()) { if(log.isWarnEnabled()) { log.warn(String.format("Trash file %s to be replaced with %s", target, file)); } new EueTrashFeature(session, fileid).delete(Collections.singletonMap(target, status), callback, new Delete.DisabledCallback()); } final String resourceId = fileid.getFileId(file); final String parentResourceId = fileid.getFileId(target.getParent()); String targetResourceId = null; final ResourceCopyResponseEntries resourceCopyResponseEntries; switch(parentResourceId) { case EueResourceIdProvider.ROOT: case EueResourceIdProvider.TRASH: resourceCopyResponseEntries = new CopyChildrenForAliasApiApi(client) .resourceAliasAliasChildrenCopyPost(parentResourceId, Collections.singletonList(String.format("%s/resource/%s", session.getBasePath(), resourceId)), null, null, null, "rename", null); break; default: resourceCopyResponseEntries = new CopyChildrenApi(client).resourceResourceIdChildrenCopyPost(parentResourceId, Collections.singletonList(String.format("%s/resource/%s", session.getBasePath(), resourceId)), null, null, null, "rename", null); } if(null == resourceCopyResponseEntries) { // Copy of single file will return 200 status code with empty response body } else { for(ResourceCopyResponseEntry resourceCopyResponseEntry : resourceCopyResponseEntries.values()) { switch(resourceCopyResponseEntry.getStatusCode()) { case HttpStatus.SC_CREATED: fileid.cache(target, EueResourceIdProvider.getResourceIdFromResourceUri(resourceCopyResponseEntry.getHeaders().getLocation())); break; default: log.warn(String.format("Failure %s copying file %s", resourceCopyResponseEntries, file)); throw new EueExceptionMappingService().map(new ApiException(resourceCopyResponseEntry.getReason(), null, resourceCopyResponseEntry.getStatusCode(), client.getResponseHeaders())); } } } listener.sent(status.getLength()); if(!StringUtils.equals(file.getName(), target.getName())) { final ResourceUpdateModel resourceUpdateModel = new ResourceUpdateModel(); final ResourceUpdateModelUpdate resourceUpdateModelUpdate = new ResourceUpdateModelUpdate(); final Uifs uifs = new Uifs(); uifs.setName(target.getName()); resourceUpdateModelUpdate.setUifs(uifs); resourceUpdateModel.setUpdate(resourceUpdateModelUpdate); final ResourceMoveResponseEntries resourceMoveResponseEntries = new UpdateResourceApi(client).resourceResourceIdPatch(fileid.getFileId(target), resourceUpdateModel, null, null, null); if(null == resourceMoveResponseEntries) { // Move of single file will return 200 status code with empty response body } else { for(ResourceMoveResponseEntry resourceMoveResponseEntry : resourceMoveResponseEntries.values()) { switch(resourceMoveResponseEntry.getStatusCode()) { case HttpStatus.SC_CREATED: break; default: log.warn(String.format("Failure %s renaming file %s", resourceMoveResponseEntry, file)); throw new EueExceptionMappingService().map(new ApiException(resourceMoveResponseEntry.getReason(), null, resourceMoveResponseEntry.getStatusCode(), client.getResponseHeaders())); } } } } return target; } catch(ApiException e) { throw new EueExceptionMappingService().map("Cannot copy {0}", e, file); } }
@Test public void testCopyToExistingFile() throws Exception { final EueResourceIdProvider fileid = new EueResourceIdProvider(session); final Path folder = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)); new EueDirectoryFeature(session, fileid).mkdir(folder, new TransferStatus()); final Path test = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final Local local = new Local(System.getProperty("java.io.tmpdir"), test.getName()); final byte[] random = RandomUtils.nextBytes(2547); IOUtils.write(random, local.getOutputStream(false)); final TransferStatus status = new TransferStatus().withLength(random.length); final EueWriteFeature.Chunk upload = new EueSingleUploadService(session, fileid, new EueWriteFeature(session, fileid)).upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener(), status, new DisabledLoginCallback()); assertNotNull(upload.getResourceId()); local.delete(); assertTrue(new EueFindFeature(session, fileid).find(test)); final Path copy = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new EueTouchFeature(session, fileid).touch(copy, new TransferStatus().withLength(0L)); new EueCopyFeature(session, fileid).copy(test, copy, new TransferStatus().exists(true), new DisabledConnectionCallback(), new DisabledStreamListener()); final Find find = new DefaultFindFeature(session); assertTrue(find.find(test)); assertTrue(find.find(copy)); new EueDeleteFeature(session, fileid).delete(Arrays.asList(test, copy), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static Class<?> getLiteral(String className, String literal) { LiteralAnalyzer analyzer = ANALYZERS.get( className ); Class result = null; if ( analyzer != null ) { analyzer.validate( literal ); result = analyzer.getLiteral(); } return result; }
@Test public void testUnderscorePlacement2() { // Invalid: cannot put underscores // adjacent to a decimal point assertThat( getLiteral( float.class.getCanonicalName(), "3_.1415F" ) ).isNull(); // Invalid: cannot put underscores // adjacent to a decimal point assertThat( getLiteral( float.class.getCanonicalName(), "3._1415F" ) ).isNull(); // Invalid: cannot put underscores // prior to an L suffix assertThat( getLiteral( long.class.getCanonicalName(), "999_99_9999_L" ) ).isNull(); // OK (decimal literal) assertThat( getLiteral( int.class.getCanonicalName(), "5_2" ) ).isNotNull(); // Invalid: cannot put underscores // At the end of a literal assertThat( getLiteral( int.class.getCanonicalName(), "52_" ) ).isNull(); // OK (decimal literal) assertThat( getLiteral( int.class.getCanonicalName(), "5_______2" ) ).isNotNull(); // Invalid: cannot put underscores // in the 0x radix prefix assertThat( getLiteral( int.class.getCanonicalName(), "0_x52" ) ).isNull(); // Invalid: cannot put underscores // at the beginning of a number assertThat( getLiteral( int.class.getCanonicalName(), "0x_52" ) ).isNull(); // OK (hexadecimal literal) assertThat( getLiteral( int.class.getCanonicalName(), "0x5_2" ) ).isNotNull(); // Invalid: cannot put underscores // at the end of a number assertThat( getLiteral( int.class.getCanonicalName(), "0x52_" ) ).isNull(); }
Record deserialize(Object data) { return (Record) fieldDeserializer.value(data); }
@Test public void testListDeserialize() { Schema schema = new Schema( optional(1, "list_type", Types.ListType.ofOptional(2, Types.LongType.get())) ); StructObjectInspector inspector = ObjectInspectorFactory.getStandardStructObjectInspector( Arrays.asList("list_type"), Arrays.asList( ObjectInspectorFactory.getStandardListObjectInspector( PrimitiveObjectInspectorFactory.writableLongObjectInspector) )); Deserializer deserializer = new Deserializer.Builder() .schema(schema) .writerInspector((StructObjectInspector) IcebergObjectInspector.create(schema)) .sourceInspector(inspector) .build(); Record expected = GenericRecord.create(schema); expected.set(0, Collections.singletonList(1L)); Object[] data = new Object[] { new Object[] { new LongWritable(1L) } }; Record actual = deserializer.deserialize(data); Assert.assertEquals(expected, actual); }
@Override public void deleteFiles(Iterable<String> pathsToDelete) throws BulkDeletionFailureException { AtomicInteger failureCount = new AtomicInteger(0); Tasks.foreach(pathsToDelete) .executeWith(executorService()) .retry(DELETE_RETRY_ATTEMPTS) .stopRetryOn(FileNotFoundException.class) .suppressFailureWhenFinished() .onFailure( (f, e) -> { LOG.error("Failure during bulk delete on file: {} ", f, e); failureCount.incrementAndGet(); }) .run(this::deleteFile); if (failureCount.get() != 0) { throw new BulkDeletionFailureException(failureCount.get()); } }
@Test public void testDeleteFilesErrorHandling() { List<String> filesCreated = random.ints(2).mapToObj(x -> "fakefsnotreal://file-" + x).collect(Collectors.toList()); assertThatThrownBy(() -> hadoopFileIO.deleteFiles(filesCreated)) .isInstanceOf(BulkDeletionFailureException.class) .hasMessage("Failed to delete 2 files"); }
RegistryEndpointProvider<Void> committer(URL location) { return new Committer(location); }
@Test public void testCommitter_GetAccept() { Assert.assertEquals(0, testBlobPusher.committer(mockUrl).getAccept().size()); }
@Override public DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets( String groupId, Set<TopicPartition> partitions, DeleteConsumerGroupOffsetsOptions options) { SimpleAdminApiFuture<CoordinatorKey, Map<TopicPartition, Errors>> future = DeleteConsumerGroupOffsetsHandler.newFuture(groupId); DeleteConsumerGroupOffsetsHandler handler = new DeleteConsumerGroupOffsetsHandler(groupId, partitions, logContext); invokeDriver(handler, future, options.timeoutMs); return new DeleteConsumerGroupOffsetsResult(future.get(CoordinatorKey.byGroupId(groupId)), partitions); }
@Test public void testDeleteConsumerGroupOffsetsNumRetries() throws Exception { final Cluster cluster = mockCluster(3, 0); final Time time = new MockTime(); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRIES_CONFIG, "0")) { final TopicPartition tp1 = new TopicPartition("foo", 0); env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); env.kafkaClient().prepareResponse(prepareOffsetDeleteResponse(Errors.NOT_COORDINATOR)); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); final DeleteConsumerGroupOffsetsResult result = env.adminClient() .deleteConsumerGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); TestUtils.assertFutureError(result.all(), TimeoutException.class); } }
static Namespace toNamespace(Database database) { return Namespace.of(database.name()); }
@Test public void testToNamespace() { Database database = Database.builder().name("db").build(); Namespace namespace = Namespace.of("db"); assertThat(GlueToIcebergConverter.toNamespace(database)).isEqualTo(namespace); }
public static String getMasterAddress(Configuration conf) { String masterAddress = conf.get(MRConfig.MASTER_ADDRESS, "localhost:8012"); return NetUtils.createSocketAddr(masterAddress, 8012, MRConfig.MASTER_ADDRESS).getHostName(); }
@Test public void testGetMasterAddress() { YarnConfiguration conf = new YarnConfiguration(); // Trying invalid master address for classic conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME); conf.set(MRConfig.MASTER_ADDRESS, "local:invalid"); // should throw an exception for invalid value try { Master.getMasterAddress(conf); fail("Should not reach here as there is a bad master address"); } catch (Exception e) { // Expected } // Change master address to a valid value conf.set(MRConfig.MASTER_ADDRESS, "bar.com:8042"); String masterHostname = Master.getMasterAddress(conf); assertThat(masterHostname).isEqualTo("bar.com"); }
public T send() throws IOException { return web3jService.send(this, responseType); }
@Test public void testEthCoinbase() throws Exception { web3j.ethCoinbase().send(); verifyResult("{\"jsonrpc\":\"2.0\",\"method\":\"eth_coinbase\",\"params\":[],\"id\":1}"); }
@Override public boolean match(Message msg, StreamRule rule) { Double msgVal = getDouble(msg.getField(rule.getField())); if (msgVal == null) { return false; } Double ruleVal = getDouble(rule.getValue()); if (ruleVal == null) { return false; } return rule.getInverted() ^ (msgVal < ruleVal); }
@Test public void testSuccessfulMatch() { StreamRule rule = getSampleRule(); rule.setValue("100"); Message msg = getSampleMessage(); msg.addField("something", "20"); StreamRuleMatcher matcher = getMatcher(rule); assertTrue(matcher.match(msg, rule)); }
private static byte[] getRegionSigningKey(String secret, String date, String region, String signMethod) { byte[] dateSignkey = getDateSigningKey(secret, date, signMethod); try { Mac mac = Mac.getInstance(signMethod); mac.init(new SecretKeySpec(dateSignkey, signMethod)); return mac.doFinal(region.getBytes(StandardCharsets.UTF_8)); } catch (NoSuchAlgorithmException e) { throw new RuntimeException("unsupport Algorithm:" + signMethod); } catch (InvalidKeyException e) { throw new RuntimeException("InvalidKey"); } }
@Test public void testGetRegionSigningKey() throws InvocationTargetException, IllegalAccessException, NoSuchMethodException { String secret = "mySecret"; String date = "20220101"; String region = "cn-beijing"; String signMethod = "HmacSHA256"; byte[] expectArray = new byte[]{-40, 5, 2, 41, -48, 82, 10, -102, 125, -24, -44, -83, 127, 6, -85, 93, -26, 88, -88, 65, 56, 79, -5, -66, 65, -106, 19, -64, -85, 103, -32, 110}; RamSignAdapter adapter = new RamSignAdapter(); // Use reflection to access the private method Method getRegionSigningKeyMethod = RamSignAdapter.class.getDeclaredMethod("getRegionSigningKey", String.class, String.class, String.class, String.class); getRegionSigningKeyMethod.setAccessible(true); byte[] signingKey = (byte[]) getRegionSigningKeyMethod.invoke(adapter, secret, date, region, signMethod); Assertions.assertEquals(32, signingKey.length); Assertions.assertArrayEquals(expectArray, signingKey); }
public ConfigDef define(ConfigKey key) { if (configKeys.containsKey(key.name)) { throw new ConfigException("Configuration " + key.name + " is defined twice."); } if (key.group != null && !groups.contains(key.group)) { groups.add(key.group); } configKeys.put(key.name, key); return this; }
@Test public void testInvalidDefaultRange() { assertThrows(ConfigException.class, () -> new ConfigDef().define("name", Type.INT, -1, Range.between(0, 10), Importance.HIGH, "docs")); }
public static Map<String, String> parseCsvMap(String str) { Map<String, String> map = new HashMap<>(); if (str == null || str.isEmpty()) return map; String[] keyVals = str.split("\\s*,\\s*"); for (String s : keyVals) { int lio = s.lastIndexOf(":"); map.put(s.substring(0, lio).trim(), s.substring(lio + 1).trim()); } return map; }
@Test public void testCsvMap() { Map<String, String> emptyMap = Csv.parseCsvMap(""); assertEquals(Collections.emptyMap(), emptyMap); String kvPairsIpV6 = "a:b:c:v,a:b:c:v"; Map<String, String> ipv6Map = Csv.parseCsvMap(kvPairsIpV6); for (Map.Entry<String, String> entry : ipv6Map.entrySet()) { assertEquals("a:b:c", entry.getKey()); assertEquals("v", entry.getValue()); } String singleEntry = "key:value"; Map<String, String> singleMap = Csv.parseCsvMap(singleEntry); String value = singleMap.get("key"); assertEquals("value", value); String kvPairsIpV4 = "192.168.2.1/30:allow, 192.168.2.1/30:allow"; Map<String, String> ipv4Map = Csv.parseCsvMap(kvPairsIpV4); for (Map.Entry<String, String> entry : ipv4Map.entrySet()) { assertEquals("192.168.2.1/30", entry.getKey()); assertEquals("allow", entry.getValue()); } String kvPairsSpaces = "key:value , key: value"; Map<String, String> spaceMap = Csv.parseCsvMap(kvPairsSpaces); for (Map.Entry<String, String> entry : spaceMap.entrySet()) { assertEquals("key", entry.getKey()); assertEquals("value", entry.getValue()); } }
public void computeTaskLags(final ProcessId uuid, final Map<TaskId, Long> allTaskEndOffsetSums) { if (!taskLagTotals.isEmpty()) { throw new IllegalStateException("Already computed task lags for this client."); } for (final Map.Entry<TaskId, Long> taskEntry : allTaskEndOffsetSums.entrySet()) { final TaskId task = taskEntry.getKey(); final Long endOffsetSum = taskEntry.getValue(); final Long offsetSum = taskOffsetSums.getOrDefault(task, 0L); if (offsetSum == Task.LATEST_OFFSET) { taskLagTotals.put(task, Task.LATEST_OFFSET); } else if (offsetSum == UNKNOWN_OFFSET_SUM) { taskLagTotals.put(task, UNKNOWN_OFFSET_SUM); } else if (endOffsetSum < offsetSum) { LOG.warn("Task " + task + " had endOffsetSum=" + endOffsetSum + " smaller than offsetSum=" + offsetSum + " on member " + uuid + ". This probably means the task is corrupted," + " which in turn indicates that it will need to restore from scratch if it gets assigned." + " The assignor will de-prioritize returning this task to this member in the hopes that" + " some other member may be able to re-use its state."); taskLagTotals.put(task, endOffsetSum); } else { taskLagTotals.put(task, endOffsetSum - offsetSum); } } }
@Test public void shouldThrowIllegalStateExceptionIfTaskLagsMapIsNotEmpty() { final Map<TaskId, Long> taskOffsetSums = Collections.singletonMap(TASK_0_1, 5L); final Map<TaskId, Long> allTaskEndOffsetSums = Collections.singletonMap(TASK_0_1, 1L); client.computeTaskLags(null, taskOffsetSums); assertThrows(IllegalStateException.class, () -> client.computeTaskLags(null, allTaskEndOffsetSums)); }
@Override public InterpreterResult interpret(String st, InterpreterContext context) throws InterpreterException { try { Properties finalProperties = new Properties(); finalProperties.putAll(getProperties()); Properties newProperties = new Properties(); newProperties.load(new StringReader(st)); for (String key : newProperties.stringPropertyNames()) { finalProperties.put(key.trim(), newProperties.getProperty(key).trim()); } LOGGER.debug("Properties for InterpreterGroup: {} is {}", interpreterGroupId, finalProperties); interpreterSetting.setInterpreterGroupProperties(interpreterGroupId, finalProperties); return new InterpreterResult(InterpreterResult.Code.SUCCESS); } catch (IOException e) { LOGGER.error("Fail to update interpreter setting", e); return new InterpreterResult(InterpreterResult.Code.ERROR, ExceptionUtils.getStackTrace(e)); } }
@Test void testEmptyValue() throws InterpreterException { ConfInterpreter confInterpreter = (ConfInterpreter) interpreterFactory.getInterpreter("test.conf", executionContext); InterpreterContext context = InterpreterContext.builder() .setNoteId("noteId") .setParagraphId("paragraphId") .build(); InterpreterResult result = confInterpreter.interpret(" property_1\t \n new_property\t \n", context); assertEquals(InterpreterResult.Code.SUCCESS, result.code, result.toString()); assertTrue(interpreterFactory.getInterpreter("test", executionContext) instanceof RemoteInterpreter); RemoteInterpreter remoteInterpreter = (RemoteInterpreter) interpreterFactory.getInterpreter("test", executionContext); remoteInterpreter.interpret("hello world", context); assertEquals(7, remoteInterpreter.getProperties().size()); assertEquals("", remoteInterpreter.getProperty("property_1")); assertEquals("", remoteInterpreter.getProperty("new_property")); assertEquals("value_3", remoteInterpreter.getProperty("property_3")); }
@Override public <T> ListenableFuture<PluginExecutionResult<T>> executeAsync( PluginExecutorConfig<T> executorConfig) { // Executes the core plugin logic within the thread pool. return FluentFuture.from( pluginExecutionThreadPool.submit( () -> { executionStopwatch.start(); return executorConfig.pluginExecutionLogic().call(); })) // Terminate plugin if it runs over 1 hour. .withTimeout(Duration.ofHours(1), pluginExecutionThreadPool) // If execution succeeded, build successful execution result. .transform(resultData -> buildSucceededResult(resultData, executorConfig), directExecutor()) // If execution failed, build failed execution result. .catching( Throwable.class, exception -> buildFailedResult(exception, executorConfig), directExecutor()); }
@Test public void executeAsync_whenFailedWithUnknownException_returnsFailedResult() throws ExecutionException, InterruptedException { PluginExecutorConfig<String> executorConfig = PluginExecutorConfig.<String>builder() .setMatchedPlugin(FAKE_MATCHING_RESULT) .setPluginExecutionLogic( () -> { throw new RuntimeException("test unknown exception"); }) .build(); PluginExecutionResult<String> executionResult = new PluginExecutorImpl(PLUGIN_EXECUTION_THREAD_POOL, executionStopWatch) .executeAsync(executorConfig) .get(); assertThat(executionResult.exception()).isPresent(); assertThat(executionResult.exception().get()) .hasCauseThat() .isInstanceOf(RuntimeException.class); assertThat(executionResult.exception().get()) .hasMessageThat() .contains( String.format("Plugin execution error on '%s'.", FAKE_MATCHING_RESULT.pluginId())); assertThat(executionResult.isSucceeded()).isFalse(); assertThat(executionResult.executionStopwatch().elapsed()).isEqualTo(TICK_DURATION); assertThat(executionResult.resultData()).isEmpty(); }
@Override public void register(InetSocketAddress address) throws Exception { NetUtil.validAddress(address); doRegister(address); RegistryHeartBeats.addHeartBeat(REGISTRY_TYPE, address, this::doRegister); }
@Test public void testRegister() throws Exception { RegistryService registryService = mock(ConsulRegistryServiceImpl.class); InetSocketAddress inetSocketAddress = new InetSocketAddress("127.0.0.1", 8091); registryService.register(inetSocketAddress); verify(registryService).register(inetSocketAddress); }
@Override public StorageObject upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency); try { MultipartUpload multipart = null; try { if(status.isAppend()) { final List<MultipartUpload> list = multipartService.find(file); if(!list.isEmpty()) { multipart = list.iterator().next(); } } } catch(AccessDeniedException | InteroperabilityException e) { log.warn(String.format("Ignore failure listing incomplete multipart uploads. %s", e)); } final Path bucket = containerService.getContainer(file); final List<MultipartPart> completed = new ArrayList<>(); // Not found or new upload if(null == multipart) { if(log.isInfoEnabled()) { log.info("No pending multipart upload found"); } final S3Object object = new S3WriteFeature(session, acl).getDetails(file, status); // ID for the initiated multipart upload. multipart = session.getClient().multipartStartUpload(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), object); if(log.isDebugEnabled()) { log.debug(String.format("Multipart upload started for %s with ID %s", multipart.getObjectKey(), multipart.getUploadId())); } } else { if(status.isAppend()) { // Add already completed parts completed.addAll(multipartService.list(multipart)); } } // Full size of file final long size = status.getOffset() + status.getLength(); final List<Future<MultipartPart>> parts = new ArrayList<>(); long remaining = status.getLength(); long offset = 0; for(int partNumber = 1; remaining > 0; partNumber++) { boolean skip = false; if(status.isAppend()) { if(log.isInfoEnabled()) { log.info(String.format("Determine if part number %d can be skipped", partNumber)); } for(MultipartPart c : completed) { if(c.getPartNumber().equals(partNumber)) { if(log.isInfoEnabled()) { log.info(String.format("Skip completed part number %d", partNumber)); } skip = true; offset += c.getSize(); break; } } } if(!skip) { // Last part can be less than 5 MB. Adjust part size. final long length = Math.min(Math.max((size / (S3DefaultMultipartService.MAXIMUM_UPLOAD_PARTS - 1)), partsize), remaining); // Submit to queue parts.add(this.submit(pool, file, local, throttle, listener, status, multipart, partNumber, offset, length, callback)); remaining -= length; offset += length; } } completed.addAll(Interruptibles.awaitAll(parts)); // Combining all the given parts into the final object. Processing of a Complete Multipart Upload request // could take several minutes to complete. Because a request could fail after the initial 200 OK response // has been sent, it is important that you check the response body to determine whether the request succeeded. multipart.setBucketName(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName()); final MultipartCompleted complete = session.getClient().multipartCompleteUpload(multipart, completed); if(log.isInfoEnabled()) { log.info(String.format("Completed multipart upload for %s with %d parts and checksum %s", complete.getObjectKey(), completed.size(), complete.getEtag())); } if(file.getType().contains(Path.Type.encrypted)) { log.warn(String.format("Skip checksum verification for %s with client side encryption enabled", file)); } else { if(S3Session.isAwsHostname(session.getHost().getHostname())) { completed.sort(new MultipartPart.PartNumberComparator()); final StringBuilder concat = new StringBuilder(); for(MultipartPart part : completed) { concat.append(part.getEtag()); } final String expected = String.format("%s-%d", ChecksumComputeFactory.get(HashAlgorithm.md5).compute(concat.toString()), completed.size()); final String reference = StringUtils.remove(complete.getEtag(), "\""); if(!StringUtils.equalsIgnoreCase(expected, reference)) { throw new ChecksumException(MessageFormat.format(LocaleFactory.localizedString("Upload {0} failed", "Error"), file.getName()), MessageFormat.format("Mismatch between MD5 hash {0} of uploaded data and ETag {1} returned by the server", expected, reference)); } } } final StorageObject object = new StorageObject(containerService.getKey(file)); object.setETag(complete.getEtag()); object.setContentLength(size); object.setStorageClass(multipart.getStorageClass()); if(multipart.getMetadata() != null) { object.addAllMetadata(multipart.getMetadata()); } // Mark parent status as complete status.withResponse(new S3AttributesAdapter(session.getHost()).toAttributes(object)).setComplete(); return object; } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Upload {0} failed", e, file); } finally { // Cancel future tasks pool.shutdown(false); } }
@Test public void testMultipleParts() throws Exception { // 5L * 1024L * 1024L final S3AccessControlListFeature acl = new S3AccessControlListFeature(session); final S3MultipartUploadService m = new S3MultipartUploadService(session, new S3WriteFeature(session, acl), acl, 5 * 1024L * 1024L, 5); final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new Path(container, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final Local local = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); final int length = 5242881; final byte[] content = RandomUtils.nextBytes(length); IOUtils.write(content, local.getOutputStream(false)); final TransferStatus status = new TransferStatus(); status.setLength(content.length); final BytecountStreamListener count = new BytecountStreamListener(); m.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), count, status, null); assertEquals(content.length, count.getSent()); assertTrue(status.isComplete()); assertNotSame(PathAttributes.EMPTY, status.getResponse()); assertTrue(new S3FindFeature(session, acl).find(test)); assertEquals(content.length, new S3AttributesFinderFeature(session, acl).find(test).getSize()); new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); local.delete(); }
public static boolean isZipCode(CharSequence value) { return isMatchRegex(ZIP_CODE, value); }
@Test public void isZipCodeTest() { // 港 boolean zipCode = Validator.isZipCode("999077"); assertTrue(zipCode); // 澳 zipCode = Validator.isZipCode("999078"); assertTrue(zipCode); // 台(2020年3月起改用6位邮编,3+3) zipCode = Validator.isZipCode("822001"); assertTrue(zipCode); // 内蒙 zipCode = Validator.isZipCode("016063"); assertTrue(zipCode); // 山西 zipCode = Validator.isZipCode("045246"); assertTrue(zipCode); // 河北 zipCode = Validator.isZipCode("066502"); assertTrue(zipCode); // 北京 zipCode = Validator.isZipCode("102629"); assertTrue(zipCode); }
public static <T> CompletionStage<Collection<T>> flattenFutures( Collection<CompletionStage<T>> inputFutures) { CompletableFuture<T>[] futures = inputFutures.toArray(new CompletableFuture[0]); return CompletableFuture.allOf(futures) .thenApply( ignored -> { final List<T> result = Stream.of(futures).map(CompletableFuture::join).collect(Collectors.toList()); return result; }); }
@Test public void testFlattenFuturesForCollection() { CompletionStage<Collection<String>> resultFuture = FutureUtils.flattenFutures( ImmutableList.of( CompletableFuture.completedFuture("hello"), CompletableFuture.completedFuture("world"))); CompletionStage<Void> validationFuture = resultFuture.thenAccept( actualResults -> { Assert.assertEquals( "Expected flattened results to contain {hello, world}", RESULTS, actualResults); }); validationFuture.toCompletableFuture().join(); }
@Override public TopicConfig setName(String name) { this.name = checkHasText(name, "name must contain text"); return this; }
@Test public void testSetName() { TopicConfig topicConfig = new TopicConfig().setName("test"); assertTrue("test".equals(topicConfig.getName())); }
@Override public List<RedisClientInfo> getClientList(RedisClusterNode node) { RedisClient entry = getEntry(node); RFuture<List<String>> f = executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLIENT_LIST); List<String> list = syncFuture(f); return CONVERTER.convert(list.toArray(new String[list.size()])); }
@Test public void testGetClientList() { RedisClusterNode master = getFirstMaster(); List<RedisClientInfo> list = connection.getClientList(master); assertThat(list.size()).isGreaterThan(10); }
public int boundedPoll(final FragmentHandler handler, final long limitPosition, final int fragmentLimit) { if (isClosed) { return 0; } final long initialPosition = subscriberPosition.get(); if (initialPosition >= limitPosition) { return 0; } int fragmentsRead = 0; final int initialOffset = (int)initialPosition & termLengthMask; int offset = initialOffset; final UnsafeBuffer termBuffer = activeTermBuffer(initialPosition); final int limitOffset = (int)Math.min(termBuffer.capacity(), (limitPosition - initialPosition) + offset); final Header header = this.header; header.buffer(termBuffer); try { while (fragmentsRead < fragmentLimit && offset < limitOffset) { final int length = frameLengthVolatile(termBuffer, offset); if (length <= 0) { break; } final int frameOffset = offset; final int alignedLength = BitUtil.align(length, FRAME_ALIGNMENT); offset += alignedLength; if (isPaddingFrame(termBuffer, frameOffset)) { continue; } ++fragmentsRead; header.offset(frameOffset); handler.onFragment(termBuffer, frameOffset + HEADER_LENGTH, length - HEADER_LENGTH, header); } } catch (final Exception ex) { errorHandler.onError(ex); } finally { final long resultingPosition = initialPosition + (offset - initialOffset); if (resultingPosition > initialPosition) { subscriberPosition.setOrdered(resultingPosition); } } return fragmentsRead; }
@Test void shouldPollFragmentsToBoundedFragmentHandlerWithMaxPositionAboveIntMaxValue() { final int initialOffset = TERM_BUFFER_LENGTH - (ALIGNED_FRAME_LENGTH * 2); final long initialPosition = computePosition( INITIAL_TERM_ID, initialOffset, POSITION_BITS_TO_SHIFT, INITIAL_TERM_ID); final long maxPosition = (long)Integer.MAX_VALUE + 1000; position.setOrdered(initialPosition); final Image image = createImage(); insertDataFrame(INITIAL_TERM_ID, initialOffset); insertPaddingFrame(INITIAL_TERM_ID, initialOffset + ALIGNED_FRAME_LENGTH); final int fragmentsRead = image.boundedPoll( mockFragmentHandler, maxPosition, Integer.MAX_VALUE); assertThat(fragmentsRead, is(1)); final InOrder inOrder = Mockito.inOrder(position, mockFragmentHandler); inOrder.verify(mockFragmentHandler).onFragment( any(UnsafeBuffer.class), eq(initialOffset + HEADER_LENGTH), eq(DATA.length), any(Header.class)); inOrder.verify(position).setOrdered(TERM_BUFFER_LENGTH); }
@Override public List<InputSplit> getSplits(JobContext jobContext) throws IOException, InterruptedException { LOG.info("DynamicInputFormat: Getting splits for job:" + jobContext.getJobID()); chunkContext = getChunkContext(jobContext.getConfiguration()); return createSplits(jobContext, splitCopyListingIntoChunksWithShuffle(jobContext)); }
@Test public void testGetSplits() throws Exception { final DistCpContext context = new DistCpContext(getOptions()); Configuration configuration = new Configuration(); configuration.set("mapred.map.tasks", String.valueOf(context.getMaxMaps())); CopyListing.getCopyListing(configuration, CREDENTIALS, context) .buildListing(new Path(cluster.getFileSystem().getUri().toString() +"/tmp/testDynInputFormat/fileList.seq"), context); JobContext jobContext = new JobContextImpl(configuration, new JobID()); DynamicInputFormat<Text, CopyListingFileStatus> inputFormat = new DynamicInputFormat<Text, CopyListingFileStatus>(); List<InputSplit> splits = inputFormat.getSplits(jobContext); int nFiles = 0; int taskId = 0; for (InputSplit split : splits) { StubContext stubContext = new StubContext(jobContext.getConfiguration(), null, taskId); final TaskAttemptContext taskAttemptContext = stubContext.getContext(); RecordReader<Text, CopyListingFileStatus> recordReader = inputFormat.createRecordReader(split, taskAttemptContext); stubContext.setReader(recordReader); recordReader.initialize(splits.get(0), taskAttemptContext); float previousProgressValue = 0f; while (recordReader.nextKeyValue()) { CopyListingFileStatus fileStatus = recordReader.getCurrentValue(); String source = fileStatus.getPath().toString(); System.out.println(source); Assert.assertTrue(expectedFilePaths.contains(source)); final float progress = recordReader.getProgress(); Assert.assertTrue(progress >= previousProgressValue); Assert.assertTrue(progress >= 0.0f); Assert.assertTrue(progress <= 1.0f); previousProgressValue = progress; ++nFiles; } Assert.assertTrue(recordReader.getProgress() == 1.0f); ++taskId; } Assert.assertEquals(expectedFilePaths.size(), nFiles); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testFetchSkipsBlackedOutNodes() { buildFetcher(); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); Node node = initialUpdateResponse.brokers().iterator().next(); client.backoff(node, 500); assertEquals(0, sendFetches()); time.sleep(500); assertEquals(1, sendFetches()); }
@Udf public <T> String toJsonString(@UdfParameter final T input) { return toJson(input); }
@Test public void shouldSerializeNull() { // When: final String result = udf.toJsonString((Integer)null); // Then: assertEquals("null", result); }
@Override public boolean isScanAllowedUsingPermissionsFromDevopsPlatform() { throw new UnsupportedOperationException("Not Implemented"); }
@Test void isScanAllowedUsingPermissionsFromDevopsPlatform_shouldThrowUnsupportedOperationException() { assertThatExceptionOfType(UnsupportedOperationException.class) .isThrownBy(() -> underTest.isScanAllowedUsingPermissionsFromDevopsPlatform()) .withMessage("Not Implemented"); }
public RuntimeOptionsBuilder parse(String... args) { return parse(Arrays.asList(args)); }
@Test void ensure_order_type_random_is_used() { parser .parse("--order", "random") .build(); }
public EndpointConfig setSocketKeepIdleSeconds(int socketKeepIdleSeconds) { Preconditions.checkPositive("socketKeepIdleSeconds", socketKeepIdleSeconds); Preconditions.checkTrue(socketKeepIdleSeconds < MAX_SOCKET_KEEP_IDLE_SECONDS, "socketKeepIdleSeconds value " + socketKeepIdleSeconds + " is outside valid range 1 - 32767"); this.socketKeepIdleSeconds = socketKeepIdleSeconds; return this; }
@Test public void testKeepIdleSecondsValidation() { EndpointConfig endpointConfig = new EndpointConfig(); Assert.assertThrows(IllegalArgumentException.class, () -> endpointConfig.setSocketKeepIdleSeconds(0)); Assert.assertThrows(IllegalArgumentException.class, () -> endpointConfig.setSocketKeepIdleSeconds(32768)); Assert.assertThrows(IllegalArgumentException.class, () -> endpointConfig.setSocketKeepIdleSeconds(-17)); }
public void stop() { isStopped = true; try { queue.put(Message.POISON_PILL); } catch (InterruptedException e) { // allow thread to exit LOGGER.error("Exception caught.", e); } }
@Test void testStop() throws Exception { final var publishPoint = mock(MqPublishPoint.class); final var producer = new Producer("producer", publishPoint); verifyNoMoreInteractions(publishPoint); producer.stop(); verify(publishPoint).put(eq(Message.POISON_PILL)); try { producer.send("Hello!"); fail("Expected 'IllegalStateException' at this point, since the producer has stopped!"); } catch (IllegalStateException e) { assertNotNull(e); assertNotNull(e.getMessage()); assertEquals("Producer Hello! was stopped and fail to deliver requested message [producer].", e.getMessage()); } verifyNoMoreInteractions(publishPoint); }
@Override public JType apply(String nodeName, JsonNode node, JsonNode parent, JType baseType, Schema schema) { Class<?> type = getType(node.asText()); if (type != null) { JType jtype = baseType.owner()._ref(type); if (ruleFactory.getGenerationConfig().isUsePrimitives()) { jtype = jtype.unboxify(); } return jtype; } else { return baseType; } }
@Test public void applyDefaultsToBaseType() { TextNode formatNode = TextNode.valueOf("unknown-format"); JType baseType = new JCodeModel().ref(Long.class); JType result = rule.apply("fooBar", formatNode, null, baseType, null); assertThat(result, equalTo(baseType)); }
@Override public File upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency); try { if(status.getChecksum().algorithm != HashAlgorithm.sha1) { status.setChecksum(new SHA1ChecksumCompute().compute(local.getInputStream(), status)); } final List<Future<Part>> parts = new ArrayList<>(); long offset = 0; long remaining = status.getLength(); final BoxUploadHelper helper = new BoxUploadHelper(session, fileid); final UploadSession uploadSession = helper.createUploadSession(status, file); for(int partNumber = 1; remaining > 0; partNumber++) { final long length = Math.min(uploadSession.getPartSize(), remaining); parts.add(this.submit(pool, file, local, throttle, listener, status, uploadSession.getId(), partNumber, offset, length, callback)); remaining -= length; offset += length; } // Checksums for uploaded segments final List<Part> chunks = Interruptibles.awaitAll(parts); final Files files = helper.commitUploadSession(file, uploadSession.getId(), status, chunks.stream().map(f -> new UploadPart().sha1(f.part.getSha1()) .size(f.status.getLength()).offset(f.status.getOffset()).partId(f.part.getId())).collect(Collectors.toList())); final Optional<File> optional = files.getEntries().stream().findFirst(); if(optional.isPresent()) { final File commited = optional.get(); // Mark parent status as complete status.withResponse(new BoxAttributesFinderFeature(session, fileid).toAttributes(commited)).setComplete(); return commited; } throw new NotfoundException(file.getAbsolute()); } finally { // Cancel future tasks pool.shutdown(false); } }
@Test public void testUploadLargeFileInChunks() throws Exception { final BoxFileidProvider fileid = new BoxFileidProvider(session); final BoxLargeUploadService s = new BoxLargeUploadService(session, fileid, new BoxChunkedWriteFeature(session, fileid)); final Path container = new BoxDirectoryFeature(session, fileid).mkdir(new Path( new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.directory)), new TransferStatus()); final String name = new AlphanumericRandomStringService().random(); final Path file = new Path(container, name, EnumSet.of(Path.Type.file)); final Local local = new Local(System.getProperty("java.io.tmpdir"), name); final byte[] content = RandomUtils.nextBytes(20 * 1024 * 1024); IOUtils.write(content, local.getOutputStream(false)); final TransferStatus status = new TransferStatus(); status.setChecksum(new SHA1ChecksumCompute().compute(local.getInputStream(), new TransferStatus())); status.setLength(content.length); final BytecountStreamListener count = new BytecountStreamListener(); final File response = s.upload(file, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), count, status, new DisabledConnectionCallback()); assertTrue(status.isComplete()); assertNotNull(response.getSha1()); assertEquals(content.length, count.getSent()); assertTrue(status.isComplete()); assertEquals(content.length, status.getResponse().getSize()); assertTrue(new BoxFindFeature(session, fileid).find(file)); assertEquals(content.length, new BoxAttributesFinderFeature(session, fileid).find(file).getSize()); final byte[] compare = new byte[content.length]; IOUtils.readFully(new BoxReadFeature(session, fileid).read(file, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()), compare); assertArrayEquals(content, compare); new BoxDeleteFeature(session, fileid).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback()); local.delete(); }
public final BarcodeParameters getParams() { return params; }
@Test final void testDefaultConstructor() throws IOException { try (BarcodeDataFormat barcodeDataFormat = new BarcodeDataFormat()) { this.checkParams(BarcodeParameters.IMAGE_TYPE, BarcodeParameters.WIDTH, BarcodeParameters.HEIGHT, BarcodeParameters.FORMAT, barcodeDataFormat.getParams()); } }
public static Sessions withGapDuration(Duration gapDuration) { return new Sessions(gapDuration); }
@Test public void testTimeUnit() throws Exception { Map<IntervalWindow, Set<String>> expected = new HashMap<>(); expected.put(new IntervalWindow(new Instant(1), new Instant(2000)), set(1, 2, 1000)); expected.put(new IntervalWindow(new Instant(5000), new Instant(6001)), set(5000, 5001)); expected.put(new IntervalWindow(new Instant(10000), new Instant(11000)), set(10000)); assertEquals( expected, runWindowFn( Sessions.withGapDuration(Duration.standardSeconds(1)), Arrays.asList(1L, 2L, 1000L, 5000L, 5001L, 10000L))); }
public static void tar(@NotNull File source, @NotNull File dest) throws IOException { if (!source.exists()) { throw new IllegalArgumentException("No source file or folder exists: " + source.getAbsolutePath()); } if (dest.exists()) { throw new IllegalArgumentException("Destination refers to existing file or folder: " + dest.getAbsolutePath()); } try (TarArchiveOutputStream tarOut = new TarArchiveOutputStream(new GZIPOutputStream( new BufferedOutputStream(Files.newOutputStream(dest.toPath())), 0x1000))) { doTar("", source, tarOut); } catch (IOException e) { IOUtil.deleteFile(dest); // operation filed, let's remove the destination archive throw e; } }
@Test public void testFolderArchived() throws Exception { File src = new File(randName); src.mkdir(); FileWriter fw = new FileWriter(new File(src, "1.txt")); fw.write("12345"); fw.close(); fw = new FileWriter(new File(src, "2.txt")); fw.write("12"); fw.close(); CompressBackupUtil.tar(src, dest); Assert.assertTrue("No destination archive created", dest.exists()); TarArchiveInputStream tai = new TarArchiveInputStream(new GZIPInputStream(new BufferedInputStream(new FileInputStream(dest)))); ArchiveEntry entry1 = tai.getNextEntry(); ArchiveEntry entry2 = tai.getNextEntry(); if (entry1.getName().compareTo(entry2.getName()) > 0) { // kinda sort them lol ArchiveEntry tmp = entry1; entry1 = entry2; entry2 = tmp; } Assert.assertNotNull("No entry found in destination archive", entry1); Assert.assertEquals("Entry has wrong size", 5, entry1.getSize()); System.out.println(entry1.getName()); Assert.assertEquals("Entry has wrong relative path", src.getName() + "/1.txt", entry1.getName()); System.out.println(entry2.getName()); Assert.assertEquals("Entry has wrong size", 2, entry2.getSize()); Assert.assertEquals("Entry has wrong relative path", src.getName() + "/2.txt", entry2.getName()); }
public static String generateSparkAppId(final SparkApplication app) { long attemptId = ModelUtils.getAttemptId(app); String preferredId = String.format("%s-%d", app.getMetadata().getName(), attemptId); if (preferredId.length() > DEFAULT_ID_LENGTH_LIMIT) { int preferredIdPrefixLength = DEFAULT_ID_LENGTH_LIMIT - DEFAULT_HASH_BASED_IDENTIFIER_LENGTH_LIMIT - 1; String preferredIdPrefix = preferredId.substring(0, preferredIdPrefixLength); return generateHashBasedId( preferredIdPrefix, app.getMetadata().getNamespace(), app.getMetadata().getName(), String.valueOf(attemptId)); } else { return preferredId; } }
@Test void generatedSparkAppIdShouldComplyLengthLimit() { String namespaceName = RandomStringUtils.randomAlphabetic(253); String appName = RandomStringUtils.randomAlphabetic(253); SparkApplication mockApp = mock(SparkApplication.class); ObjectMeta appMeta = new ObjectMetaBuilder().withName(appName).withNamespace(namespaceName).build(); when(mockApp.getMetadata()).thenReturn(appMeta); String appId = SparkAppSubmissionWorker.generateSparkAppId(mockApp); assertTrue(appId.length() <= DEFAULT_ID_LENGTH_LIMIT); }
public static long lowerHexToUnsignedLong(CharSequence lowerHex) { int length = lowerHex.length(); if (length < 1 || length > 32) throw isntLowerHexLong(lowerHex); // trim off any high bits int beginIndex = length > 16 ? length - 16 : 0; return lowerHexToUnsignedLong(lowerHex, beginIndex); }
@Test void lowerHexToUnsignedLongTest() { assertThat(lowerHexToUnsignedLong("ffffffffffffffff")).isEqualTo(-1); assertThat(lowerHexToUnsignedLong(Long.toHexString(Long.MAX_VALUE))).isEqualTo(Long.MAX_VALUE); try { lowerHexToUnsignedLong("0"); // invalid failBecauseExceptionWasNotThrown(NumberFormatException.class); } catch (NumberFormatException e) { } try { lowerHexToUnsignedLong(Character.toString((char) ('9' + 1))); // invalid failBecauseExceptionWasNotThrown(NumberFormatException.class); } catch (NumberFormatException e) { } try { lowerHexToUnsignedLong(Character.toString((char) ('0' - 1))); // invalid failBecauseExceptionWasNotThrown(NumberFormatException.class); } catch (NumberFormatException e) { } try { lowerHexToUnsignedLong(Character.toString((char) ('f' + 1))); // invalid failBecauseExceptionWasNotThrown(NumberFormatException.class); } catch (NumberFormatException e) { } try { lowerHexToUnsignedLong(Character.toString((char) ('a' - 1))); // invalid failBecauseExceptionWasNotThrown(NumberFormatException.class); } catch (NumberFormatException e) { } try { lowerHexToUnsignedLong("fffffffffffffffffffffffffffffffff"); // too long failBecauseExceptionWasNotThrown(NumberFormatException.class); } catch (NumberFormatException e) { } try { lowerHexToUnsignedLong(""); // too short failBecauseExceptionWasNotThrown(NumberFormatException.class); } catch (NumberFormatException e) { } try { lowerHexToUnsignedLong("rs"); // bad charset failBecauseExceptionWasNotThrown(NumberFormatException.class); } catch (NumberFormatException e) { } }
@Override public HttpResponseOutputStream<Node> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final CreateFileUploadResponse uploadResponse = upload.start(file, status); final String uploadUrl = uploadResponse.getUploadUrl(); if(StringUtils.isBlank(uploadUrl)) { throw new InteroperabilityException("Missing upload URL in server response"); } final String uploadToken = uploadResponse.getToken(); if(StringUtils.isBlank(uploadToken)) { throw new InteroperabilityException("Missing upload token in server response"); } final MultipartUploadTokenOutputStream proxy = new MultipartUploadTokenOutputStream(session, nodeid, file, status, uploadUrl); return new HttpResponseOutputStream<Node>(new MemorySegementingOutputStream(proxy, new HostPreferences(session.getHost()).getInteger("sds.upload.multipart.chunksize")), new SDSAttributesAdapter(session), status) { private final AtomicBoolean close = new AtomicBoolean(); private final AtomicReference<Node> node = new AtomicReference<>(); @Override public Node getStatus() { return node.get(); } @Override public void close() throws IOException { try { if(close.get()) { log.warn(String.format("Skip double close of stream %s", this)); return; } super.close(); node.set(upload.complete(file, uploadToken, status)); } catch(BackgroundException e) { throw new IOException(e); } finally { close.set(true); } } @Override protected void handleIOException(final IOException e) throws IOException { // Cancel upload on error reply try { upload.cancel(file, uploadToken); } catch(BackgroundException f) { log.warn(String.format("Failure %s cancelling upload for file %s with upload token %s after failure %s", f, file, uploadToken, e)); } throw e; } }; }
@Test(expected = InteroperabilityException.class) public void testWriteRoot() throws Exception { final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session); final Path test = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final TransferStatus status = new TransferStatus(); final SDSMultipartWriteFeature writer = new SDSMultipartWriteFeature(session, nodeid); final StatusOutputStream<Node> out = writer.write(test, status, new DisabledConnectionCallback()); }
@Override public String doSharding(final Collection<String> availableTargetNames, final PreciseShardingValue<Comparable<?>> shardingValue) { ShardingSpherePreconditions.checkNotNull(shardingValue.getValue(), NullShardingValueException::new); String tableNameSuffix = String.valueOf(doSharding(parseDate(shardingValue.getValue()))); return ShardingAutoTableAlgorithmUtils.findMatchedTargetName(availableTargetNames, tableNameSuffix, shardingValue.getDataNodeInfo()).orElse(null); }
@Test void assertRangeDoShardingWithPartRange() { List<String> availableTargetNames = Arrays.asList("t_order_0", "t_order_1", "t_order_2", "t_order_3", "t_order_4"); Collection<String> actual = shardingAlgorithm.doSharding(availableTargetNames, new RangeShardingValue<>("t_order", "create_time", DATA_NODE_INFO, Range.closed("2020-01-01 00:00:04", "2020-01-01 00:00:10"))); assertThat(actual.size(), is(3)); assertTrue(actual.contains("t_order_1")); assertTrue(actual.contains("t_order_2")); assertTrue(actual.contains("t_order_3")); }
@Override public WebhookDelivery call(Webhook webhook, WebhookPayload payload) { WebhookDelivery.Builder builder = new WebhookDelivery.Builder(); long startedAt = system.now(); builder .setAt(startedAt) .setPayload(payload) .setWebhook(webhook); try { HttpUrl url = HttpUrl.parse(webhook.getUrl()); if (url == null) { throw new IllegalArgumentException("Webhook URL is not valid: " + webhook.getUrl()); } builder.setEffectiveUrl(HttpUrlHelper.obfuscateCredentials(webhook.getUrl(), url)); Request request = buildHttpRequest(url, webhook, payload); try (Response response = execute(request)) { builder.setHttpStatus(response.code()); } } catch (Exception e) { builder.setError(e); } return builder .setDurationInMs((int) (system.now() - startedAt)) .build(); }
@Test public void redirects_should_be_followed_with_POST_method() throws Exception { Webhook webhook = new Webhook(WEBHOOK_UUID, PROJECT_UUID, CE_TASK_UUID, randomAlphanumeric(40), "my-webhook", server.url("/redirect").toString(), null); // /redirect redirects to /target server.enqueue(new MockResponse().setResponseCode(307).setHeader("Location", server.url("target"))); server.enqueue(new MockResponse().setResponseCode(200)); WebhookDelivery delivery = newSender(false).call(webhook, PAYLOAD); assertThat(delivery.getHttpStatus()).contains(200); assertThat(delivery.getDurationInMs().get()).isNotNegative(); assertThat(delivery.getError()).isEmpty(); assertThat(delivery.getAt()).isEqualTo(NOW); assertThat(delivery.getWebhook()).isSameAs(webhook); assertThat(delivery.getPayload()).isSameAs(PAYLOAD); takeAndVerifyPostRequest("/redirect"); takeAndVerifyPostRequest("/target"); }
public Collection<SQLToken> generateSQLTokens(final SelectStatementContext selectStatementContext) { Collection<SQLToken> result = new LinkedHashSet<>(generateSelectSQLTokens(selectStatementContext)); selectStatementContext.getSubqueryContexts().values().stream().map(this::generateSelectSQLTokens).forEach(result::addAll); return result; }
@Test void assertGenerateSQLTokensWhenOwnerMatchTableAliasForSameTable() { SimpleTableSegment doctorTable = new SimpleTableSegment(new TableNameSegment(0, 0, new IdentifierValue("doctor"))); doctorTable.setAlias(new AliasSegment(0, 0, new IdentifierValue("a"))); ColumnSegment column = new ColumnSegment(0, 0, new IdentifierValue("mobile")); column.setColumnBoundInfo(new ColumnSegmentBoundInfo(new IdentifierValue(DefaultDatabase.LOGIC_NAME), new IdentifierValue(DefaultDatabase.LOGIC_NAME), new IdentifierValue("doctor"), new IdentifierValue("mobile"))); column.setOwner(new OwnerSegment(0, 0, new IdentifierValue("a"))); ProjectionsSegment projections = mock(ProjectionsSegment.class); when(projections.getProjections()).thenReturn(Collections.singleton(new ColumnProjectionSegment(column))); SelectStatementContext sqlStatementContext = mock(SelectStatementContext.class, RETURNS_DEEP_STUBS); when(sqlStatementContext.getSubqueryType()).thenReturn(null); when(sqlStatementContext.getDatabaseType()).thenReturn(databaseType); when(sqlStatementContext.getSqlStatement().getProjections()).thenReturn(projections); when(sqlStatementContext.getSubqueryContexts().values()).thenReturn(Collections.emptyList()); SimpleTableSegment sameDoctorTable = new SimpleTableSegment(new TableNameSegment(0, 0, new IdentifierValue("doctor"))); when(sqlStatementContext.getTablesContext()).thenReturn(new TablesContext(Arrays.asList(doctorTable, sameDoctorTable), databaseType, DefaultDatabase.LOGIC_NAME)); when(sqlStatementContext.getProjectionsContext().getProjections()).thenReturn(Collections.singleton(new ColumnProjection("a", "mobile", null, databaseType))); Collection<SQLToken> actual = generator.generateSQLTokens(sqlStatementContext); assertThat(actual.size(), is(1)); }
public Set<ContainerEndpoint> getEndpoints() { return endpoints; }
@Test void testContainerEndpoints() { assertTrue(new DeployState.Builder().endpoints(Set.of()).build().getEndpoints().isEmpty()); var endpoints = Set.of(new ContainerEndpoint("c1", ApplicationClusterEndpoint.Scope.global, List.of("c1.example.com", "c1-alias.example.com"))); assertEquals(endpoints, new DeployState.Builder().endpoints(endpoints).build().getEndpoints()); }
public <T> T fromXmlPartial(String partial, Class<T> o) throws Exception { return fromXmlPartial(toInputStream(partial, UTF_8), o); }
@Test void shouldLoadBranchFromGitPartial() throws Exception { String gitPartial = "<git url='file:///tmp/testGitRepo/project1' branch='foo'/>"; GitMaterialConfig gitMaterial = xmlLoader.fromXmlPartial(gitPartial, GitMaterialConfig.class); assertThat(gitMaterial.getBranch()).isEqualTo("foo"); }
public synchronized <K> KeyQueryMetadata getKeyQueryMetadataForKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); if (topologyMetadata.hasNamedTopologies()) { throw new IllegalArgumentException("Cannot invoke the getKeyQueryMetadataForKey(storeName, key, keySerializer)" + "method when using named topologies, please use the overload that" + "accepts a topologyName parameter to identify the correct store"); } return getKeyQueryMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer)); }
@Test public void shouldThrowWhenSerializerIsNull() { assertThrows(NullPointerException.class, () -> metadataState.getKeyQueryMetadataForKey("table-three", "key", (Serializer<Object>) null)); }
public Protobuf2 writeTo(Message msg, OutputStream output) { try { msg.writeTo(output); } catch (IOException e) { throw new IllegalStateException("Can not write message " + msg, e); } return this; }
@Test public void writeTo_throws_ISE_on_error() throws Exception { try (FailureOutputStream output = new FailureOutputStream()) { assertThatThrownBy(() -> underTest.writeTo(newMetadata(PROJECT_KEY_1), output)) .isInstanceOf(IllegalStateException.class) .hasMessageContaining("Can not write message"); } }
@Override public double logp(double x) { if (x <= 0) { return Double.NEGATIVE_INFINITY; } else { return Math.log(k / lambda) + (k - 1) * Math.log(x / lambda) - Math.pow(x / lambda, k); } }
@Test public void testLogP() { System.out.println("logP"); WeibullDistribution instance = new WeibullDistribution(1.5, 1.0); instance.rand(); assertEquals(Math.log(0.4595763), instance.logp(0.1), 1E-5); assertEquals(Math.log(0.6134254), instance.logp(0.2), 1E-5); assertEquals(Math.log(0.7447834), instance.logp(0.5), 1E-5); assertEquals(Math.log(0.2926085), instance.logp(1.5), 1E-5); assertEquals(Math.log(0.0455367), instance.logp(2.5), 1E-5); assertEquals(Math.log(4.677527e-05), instance.logp(5.0), 1E-5); }
@Override public long getCost(ProcessingDetails details) { assert weights != null : "Cost provider must be initialized before use"; long cost = 0; // weights was initialized to the same length as Timing.values() for (int i = 0; i < Timing.values().length; i++) { cost += details.get(Timing.values()[i]) * weights[i]; } return cost; }
@Test(expected = AssertionError.class) public void testGetCostBeforeInit() { costProvider.getCost(null); }
@Override public boolean isService(Object bean, String beanName) { return isLocalTCC(bean); }
@Test public void testServiceParser(){ TccActionImpl tccAction = new TccActionImpl(); boolean result = localTCCRemotingParser.isService(tccAction, "a"); Assertions.assertTrue(result); }
@Override public Health check() { if (processCommandWrapper.isCeOperational()) { return Health.GREEN; } return RED_HEALTH; }
@Test public void check_returns_RED_status_with_cause_if_ce_is_not_operational() { when(processCommandWrapper.isCeOperational()).thenReturn(false); Health health = underTest.check(); assertThat(health.getStatus()).isEqualTo(Health.Status.RED); assertThat(health.getCauses()).containsOnly("Compute Engine is not operational"); }
@Override public boolean imbalanceDetected(LoadImbalance imbalance) { Set<? extends MigratablePipeline> candidates = imbalance.getPipelinesOwnedBy(imbalance.srcOwner); //only attempts to migrate if at least 1 pipeline exists return !candidates.isEmpty(); }
@Test public void imbalanceDetected_shouldReturnFalseWhenNoPipelineExist() { ownerPipelines.put(imbalance.srcOwner, Collections.emptySet()); boolean imbalanceDetected = strategy.imbalanceDetected(imbalance); assertFalse(imbalanceDetected); }
public void putSeckill(Seckill seckill) { String key = "seckill:" + seckill.getSeckillId(); int timeout = 60; redisTemplate.opsForValue().set(key, seckill, timeout, TimeUnit.SECONDS); }
@Test void putSeckill() { long seckillId = 1001L; ValueOperations valueOperations = mock(ValueOperations.class); when(redisTemplate.opsForValue()).thenReturn(valueOperations); Seckill seckill = new Seckill(); seckill.setSeckillId(seckillId); assertDoesNotThrow(() -> redisService.putSeckill(seckill)); }
static String toCygwin(String path) { if (path.length() >= 3 && ":\\".equals(path.substring(1, 3))) { try { String p = path.endsWith("\\") ? path.substring(0, path.length() - 1) : path; return ExecHelper.exec(false, "cygpath", p).trim(); } catch (IOException e) { String root = path.substring(0, 1); String p = path.substring(3); return "/cygdrive/" + root.toLowerCase(Locale.ROOT) + "/" + p.replace('\\', '/'); } } return path; }
@Test @Disabled void testCygwin() throws Exception { assertEquals("/cygdrive/c/work/tmp/", EnvHelper.toCygwin("C:\\work\\tmp\\")); }
public <R extends Mapper> R findMapper(String dataSource, String tableName) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("[MapperManager] findMapper dataSource: {}, tableName: {}", dataSource, tableName); } if (StringUtils.isBlank(dataSource) || StringUtils.isBlank(tableName)) { throw new NacosRuntimeException(FIND_DATASOURCE_ERROR_CODE, "dataSource or tableName is null"); } Map<String, Mapper> tableMapper = MAPPER_SPI_MAP.get(dataSource); if (Objects.isNull(tableMapper)) { throw new NacosRuntimeException(FIND_DATASOURCE_ERROR_CODE, "[MapperManager] Failed to find the datasource,dataSource:" + dataSource); } Mapper mapper = tableMapper.get(tableName); if (Objects.isNull(mapper)) { throw new NacosRuntimeException(FIND_TABLE_ERROR_CODE, "[MapperManager] Failed to find the table ,tableName:" + tableName); } if (dataSourceLogEnable) { return MapperProxy.createSingleProxy(mapper); } return (R) mapper; }
@Test void testFindMapper() { testJoin(); MapperManager instance = MapperManager.instance(false); Mapper mapper = instance.findMapper(DataSourceConstant.MYSQL, "test"); assertNotNull(mapper); }
public long size() { return size.get(); }
@Test(timeout=60000) public void testSize() throws Exception { createPageFileAndIndex(100); ListIndex<String, Long> listIndex = ((ListIndex<String, Long>) this.index); this.index.load(tx); tx.commit(); int count = 30; tx = pf.tx(); doInsert(count); tx.commit(); assertEquals("correct size", count, listIndex.size()); tx = pf.tx(); Iterator<Map.Entry<String, Long>> iterator = index.iterator(tx); while (iterator.hasNext()) { iterator.next(); iterator.remove(); assertEquals("correct size", --count, listIndex.size()); } tx.commit(); count = 30; tx = pf.tx(); doInsert(count); tx.commit(); assertEquals("correct size", count, listIndex.size()); tx = pf.tx(); listIndex.clear(tx); assertEquals("correct size", 0, listIndex.size()); tx.commit(); }
@Override public ConfigDO getConfig(Long id) { return configMapper.selectById(id); }
@Test public void testGetConfig() { // mock 数据 ConfigDO dbConfig = randomConfigDO(); configMapper.insert(dbConfig);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbConfig.getId(); // 调用 ConfigDO config = configService.getConfig(id); // 断言 assertNotNull(config); assertPojoEquals(dbConfig, config); }
@Override public JdbcTableHandle getTableHandle(ConnectorSession session, SchemaTableName tableName) { return jdbcMetadataCache.getTableHandle(session, tableName); }
@Test public void testGetTableHandle() { JdbcTableHandle tableHandle = metadata.getTableHandle(SESSION, new SchemaTableName("example", "numbers")); assertEquals(metadata.getTableHandle(SESSION, new SchemaTableName("example", "numbers")), tableHandle); assertNull(metadata.getTableHandle(SESSION, new SchemaTableName("example", "unknown"))); assertNull(metadata.getTableHandle(SESSION, new SchemaTableName("unknown", "numbers"))); assertNull(metadata.getTableHandle(SESSION, new SchemaTableName("unknown", "unknown"))); }
@Override public void append(LoggingEvent event) { super.append(event); if (available) { Log temp = parseLog(event); logList.add(temp); } }
@Test void testAppend() { DubboAppender appender = new DubboAppender(); appender.append(event); assumeTrue(0 == DubboAppender.logList.size()); DubboAppender.doStart(); appender.append(event); assertThat(DubboAppender.logList, hasSize(1)); assertThat(DubboAppender.logList.get(0).getLogThread(), equalTo("thread-name")); }
@Override public void writeTo(ByteBuf byteBuf) throws LispWriterException { WRITER.writeTo(byteBuf, this); }
@Test public void testSerialization() throws LispReaderException, LispWriterException, LispParseError { ByteBuf byteBuf = Unpooled.buffer(); RequestWriter writer = new RequestWriter(); writer.writeTo(byteBuf, request1); RequestReader reader = new RequestReader(); LispMapRequest deserialized = reader.readFrom(byteBuf); new EqualsTester().addEqualityGroup(request1, deserialized).testEquals(); }
@Override public void close(RemoteInputChannel inputChannel) throws IOException { clientHandler.removeInputChannel(inputChannel); if (closeReferenceCounter.updateAndGet(count -> Math.max(count - 1, 0)) == 0 && !canBeReused()) { closeConnection(); } else { clientHandler.cancelRequestFor(inputChannel.getInputChannelId()); } }
@TestTemplate void testRetriggerPartitionRequest() throws Exception { final long deadline = System.currentTimeMillis() + 30_000L; // 30 secs final CreditBasedPartitionRequestClientHandler handler = new CreditBasedPartitionRequestClientHandler(); final EmbeddedChannel channel = new EmbeddedChannel(handler); final PartitionRequestClient client = createPartitionRequestClient(channel, handler, connectionReuseEnabled); final int numExclusiveBuffers = 2; final NetworkBufferPool networkBufferPool = new NetworkBufferPool(10, 32); final SingleInputGate inputGate = createSingleInputGate(1, networkBufferPool); final RemoteInputChannel inputChannel = InputChannelBuilder.newBuilder() .setConnectionManager( mockConnectionManagerWithPartitionRequestClient(client)) .setPartitionRequestListenerTimeout(1) .setMaxBackoff(2) .buildRemoteChannel(inputGate); try { inputGate.setInputChannels(inputChannel); final BufferPool bufferPool = networkBufferPool.createBufferPool(6, 6); inputGate.setBufferPool(bufferPool); inputGate.setupChannels(); // first subpartition request inputChannel.requestSubpartitions(); assertThat(channel.isWritable()).isTrue(); Object readFromOutbound = channel.readOutbound(); assertThat(readFromOutbound).isInstanceOf(PartitionRequest.class); assertThat(((PartitionRequest) readFromOutbound).receiverId) .isEqualTo(inputChannel.getInputChannelId()); assertThat(((PartitionRequest) readFromOutbound).credit).isEqualTo(numExclusiveBuffers); // retrigger subpartition request, e.g. due to failures inputGate.retriggerPartitionRequest( inputChannel.getPartitionId().getPartitionId(), inputChannel.getChannelInfo()); runAllScheduledPendingTasks(channel, deadline); readFromOutbound = channel.readOutbound(); assertThat(readFromOutbound).isInstanceOf(PartitionRequest.class); assertThat(((PartitionRequest) readFromOutbound).receiverId) .isEqualTo(inputChannel.getInputChannelId()); assertThat(((PartitionRequest) readFromOutbound).credit).isEqualTo(numExclusiveBuffers); // retrigger subpartition request once again, e.g. due to failures inputGate.retriggerPartitionRequest( inputChannel.getPartitionId().getPartitionId(), inputChannel.getChannelInfo()); runAllScheduledPendingTasks(channel, deadline); readFromOutbound = channel.readOutbound(); assertThat(readFromOutbound).isInstanceOf(PartitionRequest.class); assertThat(((PartitionRequest) readFromOutbound).receiverId) .isEqualTo(inputChannel.getInputChannelId()); assertThat(((PartitionRequest) readFromOutbound).credit).isEqualTo(numExclusiveBuffers); assertThat((Object) channel.readOutbound()).isNull(); } finally { // Release all the buffer resources inputGate.close(); networkBufferPool.destroyAllBufferPools(); networkBufferPool.destroy(); } }
@VisibleForTesting public static void addUserAgentEnvironments(List<String> info) { info.add(String.format(OS_FORMAT, OSUtils.OS_NAME)); if (EnvironmentUtils.isDocker()) { info.add(DOCKER_KEY); } if (EnvironmentUtils.isKubernetes()) { info.add(KUBERNETES_KEY); } if (EnvironmentUtils.isGoogleComputeEngine()) { info.add(GCE_KEY); } else { addEC2Info(info); } }
@Test public void userAgentEnvironmentStringEC2AMI() { String randomProductCode = "random123code"; Mockito.when(EnvironmentUtils.isEC2()).thenReturn(true); Mockito.when(EnvironmentUtils.getEC2ProductCode()).thenReturn(randomProductCode); // When no user data in this ec2, null is returned Mockito.when(EC2MetadataUtils.getUserData()).thenReturn(null); List<String> info = new ArrayList<>(); UpdateCheckUtils.addUserAgentEnvironments(info); Assert.assertEquals(3, info.size()); Assert.assertEquals(String.format(UpdateCheckUtils.OS_FORMAT, OSUtils.OS_NAME), info.get(0)); Assert.assertEquals(String.format(UpdateCheckUtils.PRODUCT_CODE_FORMAT, randomProductCode), info.get(1)); Assert.assertEquals(UpdateCheckUtils.EC2_KEY, info.get(2)); }
@VisibleForTesting static void configureDataSource( BasicDataSource ds, DatabaseMeta databaseMeta, String partitionId, int initialSize, int maximumSize ) throws KettleDatabaseException { // substitute variables and populate pool properties; add credentials Properties connectionPoolProperties = new Properties( databaseMeta.getConnectionPoolingProperties() ); connectionPoolProperties = environmentSubstitute( connectionPoolProperties, databaseMeta ); setPoolProperties( ds, connectionPoolProperties, initialSize, maximumSize ); setCredentials( ds, databaseMeta, partitionId ); // add url/driver class String url = databaseMeta.environmentSubstitute( databaseMeta.getURL( partitionId ) ); ds.setUrl( url ); String clazz = databaseMeta.getDriverClass(); if ( databaseMeta.getDatabaseInterface() != null ) { ds.setDriverClassLoader( databaseMeta.getDatabaseInterface().getClass().getClassLoader() ); } ds.setDriverClassName( clazz ); dataSourcesAttributesMap.put( getDataSourceName( databaseMeta, partitionId ), databaseMeta.getAttributes() ); }
@Test public void testConfigureDataSource() throws KettleDatabaseException { when( dbMeta.getURL( "partId" ) ).thenReturn( "jdbc:foo://server:111" ); when( dbMeta.getUsername() ).thenReturn( "suzy" ); when( dbMeta.getPassword() ).thenReturn( "password" ); ConnectionPoolUtil.configureDataSource( dataSource, dbMeta, "partId", INITIAL_SIZE, MAX_SIZE ); verify( dataSource ).setDriverClassName( "org.pentaho.di.core.database.ConnectionPoolUtilTest" ); verify( dataSource ).setUrl( "jdbc:foo://server:111" ); verify( dataSource ).addConnectionProperty( "user", "suzy" ); verify( dataSource ).addConnectionProperty( "password", "password" ); verify( dataSource ).setInitialSize( INITIAL_SIZE ); verify( dataSource ).setMaxTotal( MAX_SIZE ); }
public static boolean isEligibleForCarbonsDelivery(final Message stanza) { // To properly handle messages exchanged with a MUC (or similar service), the server must be able to identify MUC-related messages. // This can be accomplished by tracking the clients' presence in MUCs, or by checking for the <x xmlns="http://jabber.org/protocol/muc#user"> // element in messages. The following rules apply to MUC-related messages: if (stanza.getChildElement("x", "http://jabber.org/protocol/muc#user") != null) { // A <message/> containing a Direct MUC Invitations (XEP-0249) SHOULD be carbon-copied. if (containsChildElement(stanza, Set.of("x"), "jabber:x:conference")) { return true; } // A <message/> containing a Mediated Invitation SHOULD be carbon-copied. if (stanza.getChildElement("x", "http://jabber.org/protocol/muc#user") != null && stanza.getChildElement("x", "http://jabber.org/protocol/muc#user").element("invite") != null) { return true; } // A private <message/> from a local user to a MUC participant (sent to a full JID) SHOULD be carbon-copied // The server SHOULD limit carbon-copying to the clients sharing a Multi-Session Nick in that MUC, and MAY // inject the <x/> element into such carbon copies. Clients can not respond to carbon-copies of MUC-PMs // related to a MUC they are not joined to. Therefore, they SHOULD either ignore such carbon copies, or // provide a way for the user to join the MUC before answering. if (stanza.getTo() != null && stanza.getTo().getResource() != null && stanza.getFrom() != null && stanza.getFrom().getNode() != null && XMPPServer.getInstance().isLocal(stanza.getFrom())) { return true; // TODO The server SHOULD limit carbon-copying to the clients sharing a Multi-Session Nick in that MUC (OF-2780). } // A private <message/> from a MUC participant (received from a full JID) to a local user SHOULD NOT be // carbon-copied (these messages are already replicated by the MUC service to all joined client instances). if (stanza.getFrom() != null && stanza.getFrom().getResource() != null && stanza.getTo() != null && stanza.getTo().getNode() != null && XMPPServer.getInstance().isLocal(stanza.getTo())) { return false; } } // A <message/> of type "groupchat" SHOULD NOT be carbon-copied. if (stanza.getType() == Message.Type.groupchat) { return false; } // A <message/> is eligible for carbons delivery if it does not contain a <private/> child element... if (containsChildElement(stanza, Set.of("private", "received"), "urn:xmpp:carbons")) { return false; } // and if at least one of the following is true: // ... it is of type "chat". if (stanza.getType() == Message.Type.chat) { return true; } // ... it is of type "normal" and contains a <body> element. if ((stanza.getType() == null || stanza.getType() == Message.Type.normal) && stanza.getBody() != null) { return true; } // ... it contains payload elements typically used in IM if (containsChildElement(stanza, Set.of("request", "received"), "urn:xmpp:receipts") // Message Delivery Receipts (XEP-0184) || containsChildElement(stanza, Set.of("active", "inactive", "gone", "composing", "paused"), "http://jabber.org/protocol/chatstates") // Chat State Notifications (XEP-0085) || (containsChildElement(stanza, Set.of("markable", "received", "displayed", "acknowledged"), "urn:xmpp:chat-markers")) // Chat Markers (XEP-0333)). ) { return true; } // ... it is of type "error" and it was sent in response to a <message/> that was eligible for carbons delivery. // TODO implement me (OF-2779) return false; }
@Test public void testPrivate() throws Exception { // Setup test fixture. final Message input = new Message(); input.getElement().addElement("private", "urn:xmpp:carbons:2"); // Execute system under test. final boolean result = Forwarded.isEligibleForCarbonsDelivery(input); // Verify results. assertFalse(result); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { try { if(!session.getClient().changeWorkingDirectory(directory.getAbsolute())) { throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString()); } if(!session.getClient().setFileType(FTPClient.ASCII_FILE_TYPE)) { // Set transfer type for traditional data socket file listings. The data transfer is over the // data connection in type ASCII or type EBCDIC. throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString()); } final List<String> list = new DataConnectionActionExecutor(session).data(new DataConnectionAction<List<String>>() { @Override public List<String> execute() throws BackgroundException { try { return session.getClient().list(FTPCmd.MLSD); } catch(IOException e) { throw new FTPExceptionMappingService().map(e); } } }); return reader.read(directory, list); } catch(IOException e) { throw new FTPExceptionMappingService().map("Listing directory {0} failed", e, directory); } }
@Test public void testList() throws Exception { final ListService service = new FTPMlsdListService(session); final Path directory = new FTPWorkdirService(session).find(); final Path file = new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new FTPTouchFeature(session).touch(file, new TransferStatus()); final AttributedList<Path> list = service.list(directory, new DisabledListProgressListener()); assertTrue(list.contains(file)); new FTPDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public long getMaxHistoryMs() { return maxHistory != null ? maxHistory.getMilliseconds() : 0; }
@Test public void getMaxHistoryMsZeroByDefault() { assertThat(appender.getMaxHistoryMs(), is(0L)); }
public void initializeSession(AuthenticationRequest authenticationRequest, SAMLBindingContext bindingContext) throws SamlSessionException, SharedServiceClientException { final String httpSessionId = authenticationRequest.getRequest().getSession().getId(); if (authenticationRequest.getFederationName() != null) { findOrInitializeFederationSession(authenticationRequest, httpSessionId); } findOrInitializeSamlSession(authenticationRequest, httpSessionId, bindingContext); }
@Test public void requesterIdIsNotPresentTest() throws SamlSessionException, SharedServiceClientException { authnRequest.setScoping(null); samlSessionService.initializeSession(authenticationRequest, bindingContext); assertNull(authenticationRequest.getSamlSession().getRequesterId()); }
public static List<NameExpr> getNameExprsFromBlock(final BlockStmt toRead, final String exprName) { return toRead.stream() .filter(node -> node instanceof NameExpr && ((NameExpr) node).getName().asString().equals(exprName)) .map(NameExpr.class::cast) .collect(Collectors.toList()); }
@Test void getNameExprsFromBlock() { BlockStmt toRead = new BlockStmt(); List<NameExpr> retrieved = CommonCodegenUtils.getNameExprsFromBlock(toRead, "value"); assertThat(retrieved).isNotNull(); assertThat(retrieved).isEmpty(); toRead = getBlockStmt(); retrieved = CommonCodegenUtils.getNameExprsFromBlock(toRead, "value"); assertThat(retrieved).isNotNull(); assertThat(retrieved).hasSize(2); }
@Override public RelativeRange apply(final Period period) { if (period != null) { return RelativeRange.Builder.builder() .from(period.withYears(0).withMonths(0).plusDays(period.getYears() * 365).plusDays(period.getMonths() * 30).toStandardSeconds().getSeconds()) .build(); } else { return null; } }
@Test void testMonthsPeriodConversion() { final RelativeRange result = converter.apply(Period.months(2)); verifyResult(result, 2 * 30 * 24 * 60 * 60); }
public static TOperationState toTOperationState(OperationStatus operationStatus) { switch (operationStatus) { case INITIALIZED: return TOperationState.INITIALIZED_STATE; case PENDING: return TOperationState.PENDING_STATE; case RUNNING: return TOperationState.RUNNING_STATE; case FINISHED: return TOperationState.FINISHED_STATE; case ERROR: return TOperationState.ERROR_STATE; case TIMEOUT: return TOperationState.TIMEDOUT_STATE; case CANCELED: return TOperationState.CANCELED_STATE; case CLOSED: return TOperationState.CLOSED_STATE; default: throw new IllegalArgumentException( String.format("Unknown operation status: %s.", operationStatus)); } }
@Test public void testConvertOperationStatus() { Map<OperationStatus, TOperationState> expectedMappings = new HashMap<>(); expectedMappings.put(INITIALIZED, TOperationState.INITIALIZED_STATE); expectedMappings.put(PENDING, TOperationState.PENDING_STATE); expectedMappings.put(RUNNING, TOperationState.RUNNING_STATE); expectedMappings.put(FINISHED, TOperationState.FINISHED_STATE); expectedMappings.put(CANCELED, TOperationState.CANCELED_STATE); expectedMappings.put(CLOSED, TOperationState.CLOSED_STATE); expectedMappings.put(ERROR, TOperationState.ERROR_STATE); expectedMappings.put(TIMEOUT, TOperationState.TIMEDOUT_STATE); for (OperationStatus status : expectedMappings.keySet()) { assertThat(expectedMappings.get(status)).isEqualTo(toTOperationState(status)); } }
@Nullable public Integer getIntValue(@IntFormat final int formatType, @IntRange(from = 0) final int offset) { if ((offset + getTypeLen(formatType)) > size()) return null; return switch (formatType) { case FORMAT_UINT8 -> unsignedByteToInt(mValue[offset]); case FORMAT_UINT16_LE -> unsignedBytesToInt(mValue[offset], mValue[offset + 1]); case FORMAT_UINT16_BE -> unsignedBytesToInt(mValue[offset + 1], mValue[offset]); case FORMAT_UINT24_LE -> unsignedBytesToInt( mValue[offset], mValue[offset + 1], mValue[offset + 2], (byte) 0 ); case FORMAT_UINT24_BE -> unsignedBytesToInt( mValue[offset + 2], mValue[offset + 1], mValue[offset], (byte) 0 ); case FORMAT_UINT32_LE -> unsignedBytesToInt( mValue[offset], mValue[offset + 1], mValue[offset + 2], mValue[offset + 3] ); case FORMAT_UINT32_BE -> unsignedBytesToInt( mValue[offset + 3], mValue[offset + 2], mValue[offset + 1], mValue[offset] ); case FORMAT_SINT8 -> unsignedToSigned(unsignedByteToInt(mValue[offset]), 8); case FORMAT_SINT16_LE -> unsignedToSigned(unsignedBytesToInt(mValue[offset], mValue[offset + 1]), 16); case FORMAT_SINT16_BE -> unsignedToSigned(unsignedBytesToInt(mValue[offset + 1], mValue[offset]), 16); case FORMAT_SINT24_LE -> unsignedToSigned(unsignedBytesToInt( mValue[offset], mValue[offset + 1], mValue[offset + 2], (byte) 0 ), 24); case FORMAT_SINT24_BE -> unsignedToSigned(unsignedBytesToInt( (byte) 0, mValue[offset + 2], mValue[offset + 1], mValue[offset] ), 24); case FORMAT_SINT32_LE -> unsignedToSigned(unsignedBytesToInt( mValue[offset], mValue[offset + 1], mValue[offset + 2], mValue[offset + 3] ), 32); case FORMAT_SINT32_BE -> unsignedToSigned(unsignedBytesToInt( mValue[offset + 3], mValue[offset + 2], mValue[offset + 1], mValue[offset] ), 32); default -> null; }; }
@Test public void getValue_SINT16_BE() { final Data data = new Data(new byte[] { (byte) 0xE7, (byte) 0xD0 }); final int value = data.getIntValue(Data.FORMAT_SINT16_BE, 0); assertEquals(-6192, value); }
@Nonnull public static <V> Set<V> findDuplicates(@Nonnull final Collection<V>... collections) { final Set<V> merged = new HashSet<>(); final Set<V> duplicates = new HashSet<>(); for (Collection<V> collection : collections) { for (V o : collection) { if (!merged.add(o)) { duplicates.add(o); } } } return duplicates; }
@Test public void testMultipleCollectionsWithDuplicates() throws Exception { // Setup test fixture. final List<String> input1 = Arrays.asList("a", "b"); final List<String> input2 = Arrays.asList("DUPLICATE", "d"); final List<String> input3 = Arrays.asList("e", "DUPLICATE", "g"); // Execute system under test. @SuppressWarnings("unchecked") final Set<String> result = CollectionUtils.findDuplicates(input1, input2, input3); // Verify results. assertEquals(1, result.size()); assertTrue(result.contains("DUPLICATE")); }
public static ParamType getSchemaFromType(final Type type) { return getSchemaFromType(type, JAVA_TO_ARG_TYPE); }
@Test public void shouldGetStringSchemaFromStringClass() { assertThat( UdfUtil.getSchemaFromType(String.class), equalTo(ParamTypes.STRING) ); }
public static Containerizer from( CommonCliOptions commonCliOptions, ConsoleLogger logger, CacheDirectories cacheDirectories) throws InvalidImageReferenceException, FileNotFoundException { Containerizer containerizer = create(commonCliOptions, logger); applyHandlers(containerizer, logger); applyConfiguration(containerizer, commonCliOptions, cacheDirectories); return containerizer; }
@Test public void testFrom_registryImage() throws InvalidImageReferenceException, IOException { CommonCliOptions commonCliOptions = CommandLine.populateCommand( new CommonCliOptions(), "-t", "registry://gcr.io/test/test-image-ref"); ContainerizerTestProxy containerizer = new ContainerizerTestProxy( Containerizers.from(commonCliOptions, consoleLogger, cacheDirectories)); // description from Containerizer.java assertThat(containerizer.getDescription()).isEqualTo("Building and pushing image"); ImageConfiguration config = containerizer.getImageConfiguration(); assertThat(config.getCredentialRetrievers()).isNotEmpty(); assertThat(config.getDockerClient()).isEmpty(); assertThat(config.getImage().toString()).isEqualTo("gcr.io/test/test-image-ref"); assertThat(config.getTarPath()).isEmpty(); }
@Override public void describe(SensorDescriptor descriptor) { descriptor .name("Xoo Cpd Tokenizer Sensor") .onlyOnLanguages(Xoo.KEY); }
@Test public void testDescriptor() { sensor.describe(new DefaultSensorDescriptor()); }
@Override public String getAddress() { return address; }
@Test void testGetAddress() { assertThat(pekkoRpcService.getAddress()) .isEqualTo(PekkoUtils.getAddress(actorSystem).host().get()); }
public boolean matchesBeacon(Beacon beacon) { // All identifiers must match, or the corresponding region identifier must be null. for (int i = mIdentifiers.size(); --i >= 0; ) { final Identifier identifier = mIdentifiers.get(i); Identifier beaconIdentifier = null; if (i < beacon.mIdentifiers.size()) { beaconIdentifier = beacon.getIdentifier(i); } if ((beaconIdentifier == null && identifier != null) || (beaconIdentifier != null && identifier != null && !identifier.equals(beaconIdentifier))) { return false; } } if (mBluetoothAddress != null && !mBluetoothAddress.equalsIgnoreCase(beacon.mBluetoothAddress)) { return false; } return true; }
@Test public void testBeaconMatchesRegionWithSameBluetoothMac() { Beacon beacon = new AltBeacon.Builder().setId1("1").setId2("2").setId3("3").setRssi(4) .setBeaconTypeCode(5).setTxPower(6).setBluetoothAddress("01:02:03:04:05:06").build(); Region region = new Region("myRegion", "01:02:03:04:05:06"); assertTrue("Beacon should match region with mac the same", region.matchesBeacon(beacon)); }
@Override public int countChars(Note note) { String titleAndContent = note.getTitle() + "\n" + note.getContent(); return Math.toIntExact( Arrays.stream(sanitizeTextForWordsAndCharsCount(note, titleAndContent).split("")) .filter(s -> !s.matches("\\s")) .count()); }
@Test public void countChecklistChars() { String content = CHECKED_SYM + "這是中文測試\n" + UNCHECKED_SYM + "これは日本語のテストです"; Note note = getNote(1L, "这是中文测试", content); note.setChecklist(true); assertEquals(24, new IdeogramsWordCounter().countChars(note)); }
@Override protected void handleProducer(final CommandProducer cmdProducer) { checkArgument(state == State.Connected); final long producerId = cmdProducer.getProducerId(); final long requestId = cmdProducer.getRequestId(); // Use producer name provided by client if present final String producerName = cmdProducer.hasProducerName() ? cmdProducer.getProducerName() : service.generateUniqueProducerName(); final long epoch = cmdProducer.getEpoch(); final boolean userProvidedProducerName = cmdProducer.isUserProvidedProducerName(); final boolean isEncrypted = cmdProducer.isEncrypted(); final Map<String, String> metadata = CommandUtils.metadataFromCommand(cmdProducer); final SchemaData schema = cmdProducer.hasSchema() ? getSchema(cmdProducer.getSchema()) : null; final ProducerAccessMode producerAccessMode = cmdProducer.getProducerAccessMode(); final Optional<Long> topicEpoch = cmdProducer.hasTopicEpoch() ? Optional.of(cmdProducer.getTopicEpoch()) : Optional.empty(); final boolean isTxnEnabled = cmdProducer.isTxnEnabled(); final String initialSubscriptionName = cmdProducer.hasInitialSubscriptionName() ? cmdProducer.getInitialSubscriptionName() : null; final boolean supportsPartialProducer = supportsPartialProducer(); final TopicName topicName = validateTopicName(cmdProducer.getTopic(), requestId, cmdProducer); if (topicName == null) { return; } CompletableFuture<Boolean> isAuthorizedFuture = isTopicOperationAllowed( topicName, TopicOperation.PRODUCE, authenticationData, originalAuthData ); if (!Strings.isNullOrEmpty(initialSubscriptionName)) { isAuthorizedFuture = isAuthorizedFuture.thenCombine( isTopicOperationAllowed(topicName, initialSubscriptionName, TopicOperation.SUBSCRIBE), (canProduce, canSubscribe) -> canProduce && canSubscribe); } isAuthorizedFuture.thenApply(isAuthorized -> { if (!isAuthorized) { String msg = "Client is not authorized to Produce"; log.warn("[{}] {} with role {}", remoteAddress, msg, getPrincipal()); writeAndFlush(Commands.newError(requestId, ServerError.AuthorizationError, msg)); return null; } if (log.isDebugEnabled()) { log.debug("[{}] Client is authorized to Produce with role {}", remoteAddress, getPrincipal()); } CompletableFuture<Producer> producerFuture = new CompletableFuture<>(); CompletableFuture<Producer> existingProducerFuture = producers.putIfAbsent(producerId, producerFuture); if (existingProducerFuture != null) { if (!existingProducerFuture.isDone()) { // There was an early request to create a producer with same producerId. // This can happen when client timeout is lower than the broker timeouts. // We need to wait until the previous producer creation request // either complete or fails. log.warn("[{}][{}] Producer with id is already present on the connection, producerId={}", remoteAddress, topicName, producerId); commandSender.sendErrorResponse(requestId, ServerError.ServiceNotReady, "Producer is already present on the connection"); } else if (existingProducerFuture.isCompletedExceptionally()) { // remove producer with producerId as it's already completed with exception log.warn("[{}][{}] Producer with id is failed to register present on the connection, producerId={}", remoteAddress, topicName, producerId); ServerError error = getErrorCode(existingProducerFuture); producers.remove(producerId, existingProducerFuture); commandSender.sendErrorResponse(requestId, error, "Producer is already failed to register present on the connection"); } else { Producer producer = existingProducerFuture.getNow(null); log.info("[{}] [{}] Producer with the same id is already created:" + " producerId={}, producer={}", remoteAddress, topicName, producerId, producer); commandSender.sendProducerSuccessResponse(requestId, producer.getProducerName(), producer.getSchemaVersion()); } return null; } if (log.isDebugEnabled()) { log.debug("[{}][{}] Creating producer. producerId={}, producerName={}, schema is {}", remoteAddress, topicName, producerId, producerName, schema == null ? "absent" : "present"); } service.getOrCreateTopic(topicName.toString()).thenCompose((Topic topic) -> { // Check max producer limitation to avoid unnecessary ops wasting resources. For example: the new // producer reached max producer limitation, but pulsar did schema check first, it would waste CPU if (((AbstractTopic) topic).isProducersExceeded(producerName)) { log.warn("[{}] Attempting to add producer to topic which reached max producers limit", topic); String errorMsg = "Topic '" + topicName.toString() + "' reached max producers limit"; Throwable t = new BrokerServiceException.ProducerBusyException(errorMsg); return CompletableFuture.failedFuture(t); } // Before creating producer, check if backlog quota exceeded // on topic for size based limit and time based limit CompletableFuture<Void> backlogQuotaCheckFuture = CompletableFuture.allOf( topic.checkBacklogQuotaExceeded(producerName, BacklogQuotaType.destination_storage), topic.checkBacklogQuotaExceeded(producerName, BacklogQuotaType.message_age)); backlogQuotaCheckFuture.thenRun(() -> { // Check whether the producer will publish encrypted messages or not if ((topic.isEncryptionRequired() || encryptionRequireOnProducer) && !isEncrypted && !SystemTopicNames.isSystemTopic(topicName)) { String msg = String.format("Encryption is required in %s", topicName); log.warn("[{}] {}", remoteAddress, msg); if (producerFuture.completeExceptionally(new ServerMetadataException(msg))) { commandSender.sendErrorResponse(requestId, ServerError.MetadataError, msg); } producers.remove(producerId, producerFuture); return; } disableTcpNoDelayIfNeeded(topicName.toString(), producerName); CompletableFuture<SchemaVersion> schemaVersionFuture = tryAddSchema(topic, schema); schemaVersionFuture.exceptionally(exception -> { if (producerFuture.completeExceptionally(exception)) { String message = exception.getMessage(); if (exception.getCause() != null) { message += (" caused by " + exception.getCause()); } commandSender.sendErrorResponse(requestId, BrokerServiceException.getClientErrorCode(exception), message); } log.error("Try add schema failed, remote address {}, topic {}, producerId {}", remoteAddress, topicName, producerId, exception); producers.remove(producerId, producerFuture); return null; }); schemaVersionFuture.thenAccept(schemaVersion -> { topic.checkIfTransactionBufferRecoverCompletely(isTxnEnabled).thenAccept(future -> { CompletionStage<Subscription> createInitSubFuture; if (!Strings.isNullOrEmpty(initialSubscriptionName) && topic.isPersistent() && !topic.getSubscriptions().containsKey(initialSubscriptionName)) { createInitSubFuture = service.isAllowAutoSubscriptionCreationAsync(topicName) .thenCompose(isAllowAutoSubscriptionCreation -> { if (!isAllowAutoSubscriptionCreation) { return CompletableFuture.failedFuture( new BrokerServiceException.NotAllowedException( "Could not create the initial subscription due to" + " the auto subscription creation is not allowed.")); } return topic.createSubscription(initialSubscriptionName, InitialPosition.Earliest, false, null); }); } else { createInitSubFuture = CompletableFuture.completedFuture(null); } createInitSubFuture.whenComplete((sub, ex) -> { if (ex != null) { final Throwable rc = FutureUtil.unwrapCompletionException(ex); if (rc instanceof BrokerServiceException.NotAllowedException) { log.warn("[{}] {} initialSubscriptionName: {}, topic: {}", remoteAddress, rc.getMessage(), initialSubscriptionName, topicName); if (producerFuture.completeExceptionally(rc)) { commandSender.sendErrorResponse(requestId, ServerError.NotAllowedError, rc.getMessage()); } producers.remove(producerId, producerFuture); return; } String msg = "Failed to create the initial subscription: " + ex.getCause().getMessage(); log.warn("[{}] {} initialSubscriptionName: {}, topic: {}", remoteAddress, msg, initialSubscriptionName, topicName); if (producerFuture.completeExceptionally(ex)) { commandSender.sendErrorResponse(requestId, BrokerServiceException.getClientErrorCode(ex), msg); } producers.remove(producerId, producerFuture); return; } buildProducerAndAddTopic(topic, producerId, producerName, requestId, isEncrypted, metadata, schemaVersion, epoch, userProvidedProducerName, topicName, producerAccessMode, topicEpoch, supportsPartialProducer, producerFuture); }); }).exceptionally(exception -> { Throwable cause = exception.getCause(); log.error("producerId {}, requestId {} : TransactionBuffer recover failed", producerId, requestId, exception); if (producerFuture.completeExceptionally(exception)) { commandSender.sendErrorResponse(requestId, ServiceUnitNotReadyException.getClientErrorCode(cause), cause.getMessage()); } producers.remove(producerId, producerFuture); return null; }); }); }); return backlogQuotaCheckFuture; }).exceptionally(exception -> { Throwable cause = exception.getCause(); if (cause instanceof BrokerServiceException.TopicBacklogQuotaExceededException) { BrokerServiceException.TopicBacklogQuotaExceededException tbqe = (BrokerServiceException.TopicBacklogQuotaExceededException) cause; IllegalStateException illegalStateException = new IllegalStateException(tbqe); BacklogQuota.RetentionPolicy retentionPolicy = tbqe.getRetentionPolicy(); if (producerFuture.completeExceptionally(illegalStateException)) { if (retentionPolicy == BacklogQuota.RetentionPolicy.producer_request_hold) { commandSender.sendErrorResponse(requestId, ServerError.ProducerBlockedQuotaExceededError, illegalStateException.getMessage()); } else if (retentionPolicy == BacklogQuota.RetentionPolicy.producer_exception) { commandSender.sendErrorResponse(requestId, ServerError.ProducerBlockedQuotaExceededException, illegalStateException.getMessage()); } } producers.remove(producerId, producerFuture); return null; } else if (cause instanceof BrokerServiceException.TopicMigratedException) { Optional<ClusterUrl> clusterURL = getMigratedClusterUrl(service.getPulsar(), topicName.toString()); if (clusterURL.isPresent()) { log.info("[{}] redirect migrated producer to topic {}: " + "producerId={}, producerName = {}, {}", remoteAddress, topicName, producerId, producerName, cause.getMessage()); boolean msgSent = commandSender.sendTopicMigrated(ResourceType.Producer, producerId, clusterURL.get().getBrokerServiceUrl(), clusterURL.get().getBrokerServiceUrlTls()); if (!msgSent) { log.info("client doesn't support topic migration handling {}-{}-{}", topicName, remoteAddress, producerId); } producers.remove(producerId, producerFuture); closeProducer(producerId, -1L, Optional.empty()); return null; } } // Do not print stack traces for expected exceptions if (cause instanceof NoSuchElementException) { cause = new TopicNotFoundException(String.format("Topic not found %s", topicName.toString())); log.warn("[{}] Failed to load topic {}, producerId={}: Topic not found", remoteAddress, topicName, producerId); } else if (!Exceptions.areExceptionsPresentInChain(cause, ServiceUnitNotReadyException.class, ManagedLedgerException.class)) { log.error("[{}] Failed to create topic {}, producerId={}", remoteAddress, topicName, producerId, exception); } // If client timed out, the future would have been completed // by subsequent close. Send error back to // client, only if not completed already. if (producerFuture.completeExceptionally(exception)) { commandSender.sendErrorResponse(requestId, BrokerServiceException.getClientErrorCode(cause), cause.getMessage()); } producers.remove(producerId, producerFuture); return null; }); return null; }).exceptionally(ex -> { logAuthException(remoteAddress, "producer", getPrincipal(), Optional.of(topicName), ex); commandSender.sendErrorResponse(requestId, ServerError.AuthorizationError, ex.getMessage()); return null; }); }
@Test public void testHandleProducer() throws Exception { final String tName = "persistent://public/default/test-topic"; final long producerId = 1; final MutableInt requestId = new MutableInt(1); final MutableInt epoch = new MutableInt(1); final Map<String, String> metadata = Collections.emptyMap(); final String pName = "p1"; resetChannel(); assertTrue(channel.isActive()); assertEquals(serverCnx.getState(), State.Start); // connect. ByteBuf cConnect = Commands.newConnect("none", "", null); channel.writeInbound(cConnect); assertEquals(serverCnx.getState(), State.Connected); assertTrue(getResponse() instanceof CommandConnected); // There is an in-progress producer registration. ByteBuf cProducer1 = Commands.newProducer(tName, producerId, requestId.incrementAndGet(), pName, false, metadata, null, epoch.incrementAndGet(), false, ProducerAccessMode.Shared, Optional.empty(), false); CompletableFuture existingFuture1 = new CompletableFuture(); serverCnx.getProducers().put(producerId, existingFuture1); channel.writeInbound(cProducer1); Object response1 = getResponse(); assertTrue(response1 instanceof CommandError); CommandError error1 = (CommandError) response1; assertEquals(error1.getError().toString(), ServerError.ServiceNotReady.toString()); assertTrue(error1.getMessage().contains("already present on the connection")); // There is a failed registration. ByteBuf cProducer2 = Commands.newProducer(tName, producerId, requestId.incrementAndGet(), pName, false, metadata, null, epoch.incrementAndGet(), false, ProducerAccessMode.Shared, Optional.empty(), false); CompletableFuture existingFuture2 = new CompletableFuture(); existingFuture2.completeExceptionally(new BrokerServiceException.ProducerBusyException("123")); serverCnx.getProducers().put(producerId, existingFuture2); channel.writeInbound(cProducer2); Object response2 = getResponse(); assertTrue(response2 instanceof CommandError); CommandError error2 = (CommandError) response2; assertEquals(error2.getError().toString(), ServerError.ProducerBusy.toString()); assertTrue(error2.getMessage().contains("already failed to register present on the connection")); // There is an successful registration. ByteBuf cProducer3 = Commands.newProducer(tName, producerId, requestId.incrementAndGet(), pName, false, metadata, null, epoch.incrementAndGet(), false, ProducerAccessMode.Shared, Optional.empty(), false); CompletableFuture existingFuture3 = new CompletableFuture(); org.apache.pulsar.broker.service.Producer serviceProducer = mock(org.apache.pulsar.broker.service.Producer.class); when(serviceProducer.getProducerName()).thenReturn(pName); when(serviceProducer.getSchemaVersion()).thenReturn(new EmptyVersion()); existingFuture3.complete(serviceProducer); serverCnx.getProducers().put(producerId, existingFuture3); channel.writeInbound(cProducer3); Object response3 = getResponse(); assertTrue(response3 instanceof CommandProducerSuccess); CommandProducerSuccess cProducerSuccess = (CommandProducerSuccess) response3; assertEquals(cProducerSuccess.getProducerName(), pName); // cleanup. channel.finish(); }
@VisibleForTesting ExportResult<PhotosContainerResource> exportOneDrivePhotos( TokensAndUrlAuthData authData, Optional<IdOnlyContainerResource> albumData, Optional<PaginationData> paginationData, UUID jobId) throws IOException { Optional<String> albumId = Optional.empty(); if (albumData.isPresent()) { albumId = Optional.of(albumData.get().getId()); } Optional<String> paginationUrl = getDrivePaginationToken(paginationData); MicrosoftDriveItemsResponse driveItemsResponse; if (paginationData.isPresent() || albumData.isPresent()) { driveItemsResponse = getOrCreatePhotosInterface(authData).getDriveItems(albumId, paginationUrl); } else { driveItemsResponse = getOrCreatePhotosInterface(authData) .getDriveItemsFromSpecialFolder(MicrosoftSpecialFolder.FolderType.photos); } PaginationData nextPageData = setNextPageToken(driveItemsResponse); ContinuationData continuationData = new ContinuationData(nextPageData); PhotosContainerResource containerResource; MicrosoftDriveItem[] driveItems = driveItemsResponse.getDriveItems(); List<PhotoAlbum> albums = new ArrayList<>(); List<PhotoModel> photos = new ArrayList<>(); if (driveItems != null && driveItems.length > 0) { for (MicrosoftDriveItem driveItem : driveItems) { PhotoAlbum album = tryConvertDriveItemToPhotoAlbum(driveItem, jobId); if (album != null) { albums.add(album); continuationData.addContainerResource(new IdOnlyContainerResource(driveItem.id)); } PhotoModel photo = tryConvertDriveItemToPhotoModel(albumId, driveItem, jobId); if (photo != null) { photos.add(photo); } } } ExportResult.ResultType result = nextPageData == null ? ExportResult.ResultType.END : ExportResult.ResultType.CONTINUE; containerResource = new PhotosContainerResource(albums, photos); return new ExportResult<>(result, containerResource, continuationData); }
@Test public void exportPhotoWithNextPage() throws IOException { // Setup when(driveItemsResponse.getNextPageLink()).thenReturn(null); MicrosoftDriveItem photoItem = setUpSinglePhoto(IMAGE_URI, PHOTO_ID); when(driveItemsResponse.getDriveItems()).thenReturn(new MicrosoftDriveItem[] {photoItem}); when(driveItemsResponse.getNextPageLink()).thenReturn(DRIVE_PAGE_URL); IdOnlyContainerResource idOnlyContainerResource = new IdOnlyContainerResource(FOLDER_ID); // Run ExportResult<PhotosContainerResource> result = microsoftPhotosExporter.exportOneDrivePhotos( null, Optional.of(idOnlyContainerResource), Optional.empty(), uuid); // Verify method calls verify(photosInterface).getDriveItems(Optional.of(FOLDER_ID), Optional.empty()); verify(driveItemsResponse).getDriveItems(); // Verify pagination token is set ContinuationData continuationData = result.getContinuationData(); StringPaginationToken paginationToken = (StringPaginationToken) continuationData.getPaginationData(); assertThat(paginationToken.getToken()).isEqualTo(DRIVE_TOKEN_PREFIX + DRIVE_PAGE_URL); // Verify no albums are exported Collection<PhotoAlbum> actualAlbums = result.getExportedData().getAlbums(); assertThat(actualAlbums).isEmpty(); // Verify one photo (in an album) should be exported Collection<PhotoModel> actualPhotos = result.getExportedData().getPhotos(); assertThat(actualPhotos.stream().map(PhotoModel::getFetchableUrl).collect(Collectors.toList())) .containsExactly(IMAGE_URI); assertThat(actualPhotos.stream().map(PhotoModel::getAlbumId).collect(Collectors.toList())) .containsExactly(FOLDER_ID); assertThat(actualPhotos.stream().map(PhotoModel::getTitle).collect(Collectors.toList())) .containsExactly(FILENAME); // Verify there are no containers ready for sub-processing List<ContainerResource> actualResources = continuationData.getContainerResources(); assertThat(actualResources).isEmpty(); }
@GetMapping("/readiness") public Result<String> readiness(HttpServletRequest request) { ReadinessResult result = ModuleHealthCheckerHolder.getInstance().checkReadiness(); if (result.isSuccess()) { return Result.success("ok"); } return Result.failure(result.getResultMessage()); }
@Test void testReadinessNamingFailure() { // Naming is not in readiness Mockito.when(configInfoPersistService.configInfoCount(any(String.class))).thenReturn(0); Mockito.when(serverStatusManager.getServerStatus()).thenThrow(new RuntimeException("HealthControllerV2Test.testReadiness")); Result<String> result = healthControllerV2.readiness(null); assertEquals(30000, result.getCode().intValue()); assertEquals("naming not in readiness", result.getMessage()); }
@Override public CompletableFuture<Void> deleteTopicInBroker(String address, DeleteTopicRequestHeader requestHeader, long timeoutMillis) { CompletableFuture<Void> future = new CompletableFuture<>(); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.DELETE_TOPIC_IN_BROKER, requestHeader); remotingClient.invoke(address, request, timeoutMillis).thenAccept(response -> { if (response.getCode() == ResponseCode.SUCCESS) { future.complete(null); } else { log.warn("deleteTopicInBroker getResponseCommand failed, {} {}, header={}", response.getCode(), response.getRemark(), requestHeader); future.completeExceptionally(new MQClientException(response.getCode(), response.getRemark())); } }); return future; }
@Test public void assertDeleteTopicInBrokerWithError() { setResponseError(); DeleteTopicRequestHeader requestHeader = mock(DeleteTopicRequestHeader.class); CompletableFuture<Void> actual = mqClientAdminImpl.deleteTopicInBroker(defaultBrokerAddr, requestHeader, defaultTimeout); Throwable thrown = assertThrows(ExecutionException.class, actual::get); assertTrue(thrown.getCause() instanceof MQClientException); MQClientException mqException = (MQClientException) thrown.getCause(); assertEquals(ResponseCode.SYSTEM_ERROR, mqException.getResponseCode()); assertTrue(mqException.getMessage().contains("CODE: 1 DESC: null")); }
@Override public <VO, VR> KStream<K, VR> join(final KStream<K, VO> otherStream, final ValueJoiner<? super V, ? super VO, ? extends VR> joiner, final JoinWindows windows) { return join(otherStream, toValueJoinerWithKey(joiner), windows); }
@Test public void shouldNotAllowNullMapperOnJoinWithGlobalTableWithNamed() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.join( testGlobalTable, null, MockValueJoiner.TOSTRING_JOINER, Named.as("name"))); assertThat(exception.getMessage(), equalTo("keySelector can't be null")); }
public static void clearObjectArray(Object[] objects, int start, int size) { if (size < COPY_THRESHOLD) { Arrays.fill(objects, start, start + size, null); } else { if (size < NIL_ARRAY_SIZE) { System.arraycopy(NIL_ARRAY, 0, objects, start, size); } else { while (size > NIL_ARRAY_SIZE) { System.arraycopy(NIL_ARRAY, 0, objects, start, NIL_ARRAY_SIZE); size -= NIL_ARRAY_SIZE; start += NIL_ARRAY_SIZE; } System.arraycopy(NIL_ARRAY, 0, objects, start, size); } } }
@Test public void testClearObjectArray() { int[] numObjs = new int[] {100, 500, 1000, 5000, 10000, 100000, 1000000}; for (int numObj : numObjs) { Object[] array = new Object[numObj]; Object o = new Object(); for (int i = 0; i < numObj; i++) { array[i] = o; } ObjectArray.clearObjectArray(array, 0, array.length); for (int i = 0; i < array.length; i++) { Object value = array[i]; if (value != null) { throw new IllegalStateException(String.format("numObj: %d, index: %d", numObj, i)); } } for (int i = 0; i < numObj; i++) { array[i] = o; } ObjectArray.clearObjectArray(array, 1, array.length - 1); for (int i = 1; i < array.length; i++) { Object value = array[i]; if (value != null) { throw new IllegalStateException(String.format("numObj: %d, index: %d", numObj, i)); } } } }
public static UriTemplate create(String template, Charset charset) { return new UriTemplate(template, true, charset); }
@Test void pathStyleExpansionSupportedWithMap() { String template = "/server/matrixParams{;parameters}"; Map<String, Object> parameters = new LinkedHashMap<>(); parameters.put("account", "a"); parameters.put("name", "n"); Map<String, Object> values = new LinkedHashMap<>(); values.put("parameters", parameters); UriTemplate uriTemplate = UriTemplate.create(template, Util.UTF_8); String expanded = uriTemplate.expand(values); assertThat(expanded).isEqualToIgnoringCase("/server/matrixParams;account=a;name=n"); }
@Override public ClientResponse apply(ClientRequest jerseyRequest) { try { final HttpUriRequest apacheRequest = buildApacheRequest(jerseyRequest); final CloseableHttpResponse apacheResponse = client.execute(apacheRequest); final String reasonPhrase = apacheResponse.getReasonPhrase(); final Response.StatusType status = Statuses.from(apacheResponse.getCode(), reasonPhrase == null ? "" : reasonPhrase); final ClientResponse jerseyResponse = new ClientResponse(status, jerseyRequest); for (Header header : apacheResponse.getHeaders()) { jerseyResponse.getHeaders().computeIfAbsent(header.getName(), k -> new ArrayList<>()) .add(header.getValue()); } final HttpEntity httpEntity = apacheResponse.getEntity(); jerseyResponse.setEntityStream(httpEntity != null ? httpEntity.getContent() : new ByteArrayInputStream(new byte[0])); return jerseyResponse; } catch (Exception e) { throw new ProcessingException(e); } }
@Test void multiple_headers_with_the_same_name_are_processed_successfully() throws Exception { final CloseableHttpClient client = mock(CloseableHttpClient.class); final DropwizardApacheConnector dropwizardApacheConnector = new DropwizardApacheConnector(client, null, false); final Header[] apacheHeaders = { new BasicHeader("Set-Cookie", "test1"), new BasicHeader("Set-Cookie", "test2") }; final CloseableHttpResponse apacheResponse = mock(CloseableHttpResponse.class); when(apacheResponse.getCode()).thenReturn(200); when(apacheResponse.getReasonPhrase()).thenReturn("OK"); when(apacheResponse.getHeaders()).thenReturn(apacheHeaders); when(client.execute(Mockito.any())).thenReturn(apacheResponse); final ClientRequest jerseyRequest = mock(ClientRequest.class); when(jerseyRequest.getUri()).thenReturn(URI.create("http://localhost")); when(jerseyRequest.getMethod()).thenReturn("GET"); when(jerseyRequest.getHeaders()).thenReturn(new MultivaluedHashMap<>()); final ClientResponse jerseyResponse = dropwizardApacheConnector.apply(jerseyRequest); assertThat(jerseyResponse.getStatus()).isEqualTo(apacheResponse.getCode()); }
@Override public void importData(JsonReader reader) throws IOException { logger.info("Reading configuration for 1.3"); // this *HAS* to start as an object reader.beginObject(); while (reader.hasNext()) { JsonToken tok = reader.peek(); switch (tok) { case NAME: String name = reader.nextName(); // find out which member it is if (name.equals(CLIENTS)) { readClients(reader); } else if (name.equals(GRANTS)) { readGrants(reader); } else if (name.equals(WHITELISTEDSITES)) { readWhitelistedSites(reader); } else if (name.equals(BLACKLISTEDSITES)) { readBlacklistedSites(reader); } else if (name.equals(AUTHENTICATIONHOLDERS)) { readAuthenticationHolders(reader); } else if (name.equals(ACCESSTOKENS)) { readAccessTokens(reader); } else if (name.equals(REFRESHTOKENS)) { readRefreshTokens(reader); } else if (name.equals(SYSTEMSCOPES)) { readSystemScopes(reader); } else { boolean processed = false; for (MITREidDataServiceExtension extension : extensions) { if (extension.supportsVersion(THIS_VERSION)) { processed = extension.importExtensionData(name, reader); if (processed) { // if the extension processed data, break out of this inner loop // (only the first extension to claim an extension point gets it) break; } } } if (!processed) { // unknown token, skip it reader.skipValue(); } } break; case END_OBJECT: // the object ended, we're done here reader.endObject(); continue; default: logger.debug("Found unexpected entry"); reader.skipValue(); continue; } } fixObjectReferences(); for (MITREidDataServiceExtension extension : extensions) { if (extension.supportsVersion(THIS_VERSION)) { extension.fixExtensionObjectReferences(maps); break; } } maps.clearAll(); }
@Test public void testImportAccessTokens() throws IOException, ParseException { String expiration1 = "2014-09-10T22:49:44.090+00:00"; Date expirationDate1 = formatter.parse(expiration1, Locale.ENGLISH); ClientDetailsEntity mockedClient1 = mock(ClientDetailsEntity.class); when(mockedClient1.getClientId()).thenReturn("mocked_client_1"); AuthenticationHolderEntity mockedAuthHolder1 = mock(AuthenticationHolderEntity.class); when(mockedAuthHolder1.getId()).thenReturn(1L); OAuth2AccessTokenEntity token1 = new OAuth2AccessTokenEntity(); token1.setId(1L); token1.setClient(mockedClient1); token1.setExpiration(expirationDate1); token1.setJwt(JWTParser.parse("eyJhbGciOiJSUzI1NiJ9.eyJleHAiOjE0MTI3ODk5NjgsInN1YiI6IjkwMzQyLkFTREZKV0ZBIiwiYXRfaGFzaCI6InptTmt1QmNRSmNYQktNaVpFODZqY0EiLCJhdWQiOlsiY2xpZW50Il0sImlzcyI6Imh0dHA6XC9cL2xvY2FsaG9zdDo4MDgwXC9vcGVuaWQtY29ubmVjdC1zZXJ2ZXItd2ViYXBwXC8iLCJpYXQiOjE0MTI3ODkzNjh9.xkEJ9IMXpH7qybWXomfq9WOOlpGYnrvGPgey9UQ4GLzbQx7JC0XgJK83PmrmBZosvFPCmota7FzI_BtwoZLgAZfFiH6w3WIlxuogoH-TxmYbxEpTHoTsszZppkq9mNgOlArV4jrR9y3TPo4MovsH71dDhS_ck-CvAlJunHlqhs0")); token1.setAuthenticationHolder(mockedAuthHolder1); token1.setScope(ImmutableSet.of("id-token")); token1.setTokenType("Bearer"); String expiration2 = "2015-01-07T18:31:50.079+00:00"; Date expirationDate2 = formatter.parse(expiration2, Locale.ENGLISH); ClientDetailsEntity mockedClient2 = mock(ClientDetailsEntity.class); when(mockedClient2.getClientId()).thenReturn("mocked_client_2"); AuthenticationHolderEntity mockedAuthHolder2 = mock(AuthenticationHolderEntity.class); when(mockedAuthHolder2.getId()).thenReturn(2L); OAuth2RefreshTokenEntity mockRefreshToken2 = mock(OAuth2RefreshTokenEntity.class); when(mockRefreshToken2.getId()).thenReturn(1L); OAuth2AccessTokenEntity token2 = new OAuth2AccessTokenEntity(); token2.setId(2L); token2.setClient(mockedClient2); token2.setExpiration(expirationDate2); token2.setJwt(JWTParser.parse("eyJhbGciOiJSUzI1NiJ9.eyJleHAiOjE0MTI3OTI5NjgsImF1ZCI6WyJjbGllbnQiXSwiaXNzIjoiaHR0cDpcL1wvbG9jYWxob3N0OjgwODBcL29wZW5pZC1jb25uZWN0LXNlcnZlci13ZWJhcHBcLyIsImp0aSI6IjBmZGE5ZmRiLTYyYzItNGIzZS05OTdiLWU0M2VhMDUwMzNiOSIsImlhdCI6MTQxMjc4OTM2OH0.xgaVpRLYE5MzbgXfE0tZt823tjAm6Oh3_kdR1P2I9jRLR6gnTlBQFlYi3Y_0pWNnZSerbAE8Tn6SJHZ9k-curVG0-ByKichV7CNvgsE5X_2wpEaUzejvKf8eZ-BammRY-ie6yxSkAarcUGMvGGOLbkFcz5CtrBpZhfd75J49BIQ")); token2.setAuthenticationHolder(mockedAuthHolder2); token2.setRefreshToken(mockRefreshToken2); token2.setScope(ImmutableSet.of("openid", "offline_access", "email", "profile")); token2.setTokenType("Bearer"); String configJson = "{" + "\"" + MITREidDataService.SYSTEMSCOPES + "\": [], " + "\"" + MITREidDataService.REFRESHTOKENS + "\": [], " + "\"" + MITREidDataService.CLIENTS + "\": [], " + "\"" + MITREidDataService.GRANTS + "\": [], " + "\"" + MITREidDataService.WHITELISTEDSITES + "\": [], " + "\"" + MITREidDataService.BLACKLISTEDSITES + "\": [], " + "\"" + MITREidDataService.AUTHENTICATIONHOLDERS + "\": [], " + "\"" + MITREidDataService.ACCESSTOKENS + "\": [" + "{\"id\":1,\"clientId\":\"mocked_client_1\",\"expiration\":\"2014-09-10T22:49:44.090+00:00\"," + "\"refreshTokenId\":null,\"idTokenId\":null,\"scope\":[\"id-token\"],\"type\":\"Bearer\"," + "\"authenticationHolderId\":1,\"value\":\"eyJhbGciOiJSUzI1NiJ9.eyJleHAiOjE0MTI3ODk5NjgsInN1YiI6IjkwMzQyLkFTREZKV0ZBIiwiYXRfaGFzaCI6InptTmt1QmNRSmNYQktNaVpFODZqY0EiLCJhdWQiOlsiY2xpZW50Il0sImlzcyI6Imh0dHA6XC9cL2xvY2FsaG9zdDo4MDgwXC9vcGVuaWQtY29ubmVjdC1zZXJ2ZXItd2ViYXBwXC8iLCJpYXQiOjE0MTI3ODkzNjh9.xkEJ9IMXpH7qybWXomfq9WOOlpGYnrvGPgey9UQ4GLzbQx7JC0XgJK83PmrmBZosvFPCmota7FzI_BtwoZLgAZfFiH6w3WIlxuogoH-TxmYbxEpTHoTsszZppkq9mNgOlArV4jrR9y3TPo4MovsH71dDhS_ck-CvAlJunHlqhs0\"}," + "{\"id\":2,\"clientId\":\"mocked_client_2\",\"expiration\":\"2015-01-07T18:31:50.079+00:00\"," + "\"refreshTokenId\":1,\"idTokenId\":1,\"scope\":[\"openid\",\"offline_access\",\"email\",\"profile\"],\"type\":\"Bearer\"," + "\"authenticationHolderId\":2,\"value\":\"eyJhbGciOiJSUzI1NiJ9.eyJleHAiOjE0MTI3OTI5NjgsImF1ZCI6WyJjbGllbnQiXSwiaXNzIjoiaHR0cDpcL1wvbG9jYWxob3N0OjgwODBcL29wZW5pZC1jb25uZWN0LXNlcnZlci13ZWJhcHBcLyIsImp0aSI6IjBmZGE5ZmRiLTYyYzItNGIzZS05OTdiLWU0M2VhMDUwMzNiOSIsImlhdCI6MTQxMjc4OTM2OH0.xgaVpRLYE5MzbgXfE0tZt823tjAm6Oh3_kdR1P2I9jRLR6gnTlBQFlYi3Y_0pWNnZSerbAE8Tn6SJHZ9k-curVG0-ByKichV7CNvgsE5X_2wpEaUzejvKf8eZ-BammRY-ie6yxSkAarcUGMvGGOLbkFcz5CtrBpZhfd75J49BIQ\"}" + " ]" + "}"; logger.debug(configJson); JsonReader reader = new JsonReader(new StringReader(configJson)); final Map<Long, OAuth2AccessTokenEntity> fakeDb = new HashMap<>(); when(tokenRepository.saveAccessToken(isA(OAuth2AccessTokenEntity.class))).thenAnswer(new Answer<OAuth2AccessTokenEntity>() { Long id = 324L; @Override public OAuth2AccessTokenEntity answer(InvocationOnMock invocation) throws Throwable { OAuth2AccessTokenEntity _token = (OAuth2AccessTokenEntity) invocation.getArguments()[0]; if(_token.getId() == null) { _token.setId(id++); } fakeDb.put(_token.getId(), _token); return _token; } }); when(tokenRepository.getAccessTokenById(anyLong())).thenAnswer(new Answer<OAuth2AccessTokenEntity>() { @Override public OAuth2AccessTokenEntity answer(InvocationOnMock invocation) throws Throwable { Long _id = (Long) invocation.getArguments()[0]; return fakeDb.get(_id); } }); when(clientRepository.getClientByClientId(anyString())).thenAnswer(new Answer<ClientDetailsEntity>() { @Override public ClientDetailsEntity answer(InvocationOnMock invocation) throws Throwable { String _clientId = (String) invocation.getArguments()[0]; ClientDetailsEntity _client = mock(ClientDetailsEntity.class); when(_client.getClientId()).thenReturn(_clientId); return _client; } }); when(authHolderRepository.getById(isNull(Long.class))).thenAnswer(new Answer<AuthenticationHolderEntity>() { Long id = 133L; @Override public AuthenticationHolderEntity answer(InvocationOnMock invocation) throws Throwable { AuthenticationHolderEntity _auth = mock(AuthenticationHolderEntity.class); when(_auth.getId()).thenReturn(id); id++; return _auth; } }); dataService.importData(reader); //2 times for token, 2 times to update client, 2 times to update authHolder, 1 times to update refresh token verify(tokenRepository, times(7)).saveAccessToken(capturedAccessTokens.capture()); List<OAuth2AccessTokenEntity> savedAccessTokens = new ArrayList(fakeDb.values()); //capturedAccessTokens.getAllValues(); Collections.sort(savedAccessTokens, new accessTokenIdComparator()); assertThat(savedAccessTokens.size(), is(2)); assertThat(savedAccessTokens.get(0).getClient().getClientId(), equalTo(token1.getClient().getClientId())); assertThat(savedAccessTokens.get(0).getExpiration(), equalTo(token1.getExpiration())); assertThat(savedAccessTokens.get(0).getValue(), equalTo(token1.getValue())); assertThat(savedAccessTokens.get(1).getClient().getClientId(), equalTo(token2.getClient().getClientId())); assertThat(savedAccessTokens.get(1).getExpiration(), equalTo(token2.getExpiration())); assertThat(savedAccessTokens.get(1).getValue(), equalTo(token2.getValue())); }
@Override public CompletableFuture<HeartbeatResponseData> heartbeat( RequestContext context, HeartbeatRequestData request ) { if (!isActive.get()) { return CompletableFuture.completedFuture(new HeartbeatResponseData() .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) ); } if (!isGroupIdNotEmpty(request.groupId())) { return CompletableFuture.completedFuture(new HeartbeatResponseData() .setErrorCode(Errors.INVALID_GROUP_ID.code()) ); } return runtime.scheduleWriteOperation( "classic-group-heartbeat", topicPartitionFor(request.groupId()), Duration.ofMillis(config.offsetCommitTimeoutMs()), coordinator -> coordinator.classicGroupHeartbeat(context, request) ).exceptionally(exception -> handleOperationException( "classic-group-heartbeat", request, exception, (error, __) -> { if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS) { // The group is still loading, so blindly respond return new HeartbeatResponseData() .setErrorCode(Errors.NONE.code()); } else { return new HeartbeatResponseData() .setErrorCode(error.code()); } } )); }
@Test public void testHeartbeatWhenNotStarted() throws ExecutionException, InterruptedException { CoordinatorRuntime<GroupCoordinatorShard, CoordinatorRecord> runtime = mockRuntime(); GroupCoordinatorService service = new GroupCoordinatorService( new LogContext(), createConfig(), runtime, new GroupCoordinatorMetrics(), createConfigManager() ); HeartbeatRequestData request = new HeartbeatRequestData() .setGroupId("foo"); CompletableFuture<HeartbeatResponseData> future = service.heartbeat( requestContext(ApiKeys.CONSUMER_GROUP_HEARTBEAT), request ); assertEquals( new HeartbeatResponseData() .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()), future.get() ); }
@Override public void execute(SensorContext context) { analyse(context, Xoo.KEY, XooRulesDefinition.XOO_REPOSITORY); analyse(context, Xoo2.KEY, XooRulesDefinition.XOO2_REPOSITORY); }
@Test public void testRule() throws IOException { DefaultInputFile inputFile = new TestInputFileBuilder("foo", "src/Foo.xoo") .setLanguage(Xoo.KEY) .initMetadata("a\nb\nc\nd\ne\nf\ng\nh\ni\n") .build(); SensorContextTester context = SensorContextTester.create(temp.newFolder()); context.fileSystem().add(inputFile); sensor.execute(context); assertThat(context.allIssues()).hasSize(10); // One issue per line for (Issue issue : context.allIssues()) { assertThat(issue.gap()).isNull(); } }
@Override public Optional<Flow> findById(String tenantId, String namespace, String id, Optional<Integer> revision, Boolean allowDeleted) { return jdbcRepository .getDslContextWrapper() .transactionResult(configuration -> { DSLContext context = DSL.using(configuration); Select<Record1<String>> from; if (revision.isPresent()) { from = context .select(field("value", String.class)) .from(jdbcRepository.getTable()) .where(this.revisionDefaultFilter(tenantId)) .and(field("namespace").eq(namespace)) .and(field("id", String.class).eq(id)) .and(field("revision", Integer.class).eq(revision.get())); } else { from = context .select(field("value", String.class)) .from(fromLastRevision(true)) .where(allowDeleted ? this.revisionDefaultFilter(tenantId) : this.defaultFilter(tenantId)) .and(field("namespace", String.class).eq(namespace)) .and(field("id", String.class).eq(id)); } return this.jdbcRepository.fetchOne(from); }); }
@Test public void invalidFlow() { dslContextWrapper.transaction(configuration -> { DSLContext context = DSL.using(configuration); context.insertInto(flowRepository.jdbcRepository.getTable()) .set(field("key"), "io.kestra.unittest_invalid") .set(field("source_code"), "") .set(field("value"), JacksonMapper.ofJson().writeValueAsString(Map.of( "id", "invalid", "namespace", "io.kestra.unittest", "revision", 1, "tasks", List.of(Map.of( "id", "invalid", "type", "io.kestra.plugin.core.log.Log", "level", "invalid" )), "deleted", false ))) .execute(); }); Optional<Flow> flow = flowRepository.findById(null, "io.kestra.unittest", "invalid"); assertThat(flow.isPresent(), is(true)); assertThat(flow.get(), instanceOf(FlowWithException.class)); assertThat(((FlowWithException) flow.get()).getException(), containsString("Cannot deserialize value of type `org.slf4j.event.Level`")); }
@Udf public <T> List<T> distinct( @UdfParameter(description = "Array of values to distinct") final List<T> input) { if (input == null) { return null; } final Set<T> distinctVals = Sets.newLinkedHashSetWithExpectedSize(input.size()); distinctVals.addAll(input); return new ArrayList<>(distinctVals); }
@Test public void shouldReturnEmptyForEmptyInput() { final List<Double> result = udf.distinct(new ArrayList<Double>()); assertThat(result, is(Collections.EMPTY_LIST)); }
@GuardedBy("lock") private boolean isLeader(ResourceManager<?> resourceManager) { return running && this.leaderResourceManager == resourceManager; }
@Test void grantLeadership_withExistingLeader_waitTerminationOfExistingLeader() throws Exception { final UUID leaderSessionId1 = UUID.randomUUID(); final UUID leaderSessionId2 = UUID.randomUUID(); final CompletableFuture<UUID> startRmFuture1 = new CompletableFuture<>(); final CompletableFuture<UUID> startRmFuture2 = new CompletableFuture<>(); final CompletableFuture<Void> finishRmTerminationFuture = new CompletableFuture<>(); rmFactoryBuilder .setInitializeConsumer( uuid -> { if (!startRmFuture1.isDone()) { startRmFuture1.complete(uuid); } else { startRmFuture2.complete(uuid); } }) .setTerminateConsumer((ignore) -> blockOnFuture(finishRmTerminationFuture)); createAndStartResourceManager(); // first time grant leadership leaderElection.isLeader(leaderSessionId1).join(); // second time grant leadership final CompletableFuture<LeaderInformation> confirmedLeaderInformation = leaderElection.isLeader(leaderSessionId2); // first RM termination not finished, should not start new RM assertNotComplete(startRmFuture2); // finish first RM termination finishRmTerminationFuture.complete(null); // should start new RM and confirm leader session assertThatFuture(startRmFuture2).eventuallySucceeds().isSameAs(leaderSessionId2); assertThat(confirmedLeaderInformation.get().getLeaderSessionID()) .isSameAs(leaderSessionId2); }
@VisibleForTesting public List<ResourceGroupSelector> getSelectors() { checkMaxRefreshInterval(); if (selectors.get().isEmpty()) { throw new PrestoException(CONFIGURATION_INVALID, "No selectors are configured"); } return selectors.get(); }
@Test(expectedExceptions = PrestoException.class, expectedExceptionsMessageRegExp = "No selectors are configured") public void testInvalidConfiguration() { H2DaoProvider daoProvider = setup("selectors"); H2ResourceGroupsDao dao = daoProvider.get(); dao.createResourceGroupsTable(); dao.createSelectorsTable(); dao.insertResourceGroup(1, "global", "100%", 100, 100, 100, null, null, null, null, null, null, null, null, 0, null, ENVIRONMENT); DbManagerSpecProvider dbManagerSpecProvider = new DbManagerSpecProvider(daoProvider.get(), ENVIRONMENT, new ReloadingResourceGroupConfig()); ReloadingResourceGroupConfigurationManager manager = new ReloadingResourceGroupConfigurationManager( (poolId, listener) -> {}, new ReloadingResourceGroupConfig().setMaxRefreshInterval(Duration.valueOf("1ms")), dbManagerSpecProvider); manager.getSelectors(); }
public static Object[] wrap(Object obj) { if (null == obj) { return null; } if (isArray(obj)) { try { return (Object[]) obj; } catch (Exception e) { final String className = obj.getClass().getComponentType().getName(); switch (className) { case "long": return wrap((long[]) obj); case "int": return wrap((int[]) obj); case "short": return wrap((short[]) obj); case "char": return wrap((char[]) obj); case "byte": return wrap((byte[]) obj); case "boolean": return wrap((boolean[]) obj); case "float": return wrap((float[]) obj); case "double": return wrap((double[]) obj); default: throw new UtilException(e); } } } throw new UtilException(StrUtil.format("[{}] is not Array!", obj.getClass())); }
@Test public void wrapTest() { Object a = new int[]{1, 2, 3, 4}; Object[] wrapA = ArrayUtil.wrap(a); for (Object o : wrapA) { assertInstanceOf(Integer.class, o); } }
private static GuardedByExpression bind(JCTree.JCExpression exp, BinderContext context) { GuardedByExpression expr = BINDER.visit(exp, context); checkGuardedBy(expr != null, String.valueOf(exp)); checkGuardedBy(expr.kind() != Kind.TYPE_LITERAL, "Raw type literal: %s", exp); return expr; }
@Test public void staticFieldGuard() { assertThat( bind( "Test", "lock", forSourceLines( "threadsafety/Test.java", "package threadsafety;", "class Test {", " static final Object lock = new Object();", "}"))) .isEqualTo("(SELECT (TYPE_LITERAL threadsafety.Test) lock)"); }
public static void main(String[] args) { try { FSConfigToCSConfigArgumentHandler fsConfigConversionArgumentHandler = new FSConfigToCSConfigArgumentHandler(); int exitCode = fsConfigConversionArgumentHandler.parseAndConvert(args); if (exitCode != 0) { LOG.error(FATAL, "Error while starting FS configuration conversion, " + "see previous error messages for details!"); } exitFunction.accept(exitCode); } catch (Throwable t) { LOG.error(FATAL, "Error while starting FS configuration conversion!", t); exitFunction.accept(-1); } }
@Test public void testLongHelpSwitch() { FSConfigToCSConfigConverterMain.main(new String[] {"--help"}); verifyHelpText(); assertEquals("Exit code", 0, exitFunc.exitCode); }