focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public LoggerContext apply(LogLevelConfig logLevelConfig, Props props) { if (!ROOT_LOGGER_NAME.equals(logLevelConfig.getRootLoggerName())) { throw new IllegalArgumentException("Value of LogLevelConfig#rootLoggerName must be \"" + ROOT_LOGGER_NAME + "\""); } LoggerContext rootContext = getRootContext(); logLevelConfig.getConfiguredByProperties().forEach((key, value) -> applyLevelByProperty(props, rootContext.getLogger(key), value)); logLevelConfig.getConfiguredByHardcodedLevel().forEach((key, value) -> applyHardcodedLevel(rootContext, key, value)); Level propertyValueAsLevel = getPropertyValueAsLevel(props, LOG_LEVEL.getKey()); boolean traceGloballyEnabled = propertyValueAsLevel == Level.TRACE; logLevelConfig.getOffUnlessTrace().forEach(logger -> applyHardUnlessTrace(rootContext, logger, traceGloballyEnabled)); return rootContext; }
@Test public void apply_sets_logger_to_INFO_if_no_property_is_set() { LogLevelConfig config = newLogLevelConfig().rootLevelFor(WEB_SERVER).build(); LoggerContext context = underTest.apply(config, props); assertThat(context.getLogger(ROOT_LOGGER_NAME).getLevel()).isEqualTo(Level.INFO); }
public CompletableFuture<AckMessageResponse> ackMessage(ProxyContext ctx, AckMessageRequest request) { CompletableFuture<AckMessageResponse> future = new CompletableFuture<>(); try { validateTopicAndConsumerGroup(request.getTopic(), request.getGroup()); String group = request.getGroup().getName(); String topic = request.getTopic().getName(); if (ConfigurationManager.getProxyConfig().isEnableBatchAck()) { future = ackMessageInBatch(ctx, group, topic, request); } else { future = ackMessageOneByOne(ctx, group, topic, request); } } catch (Throwable t) { future.completeExceptionally(t); } return future; }
@Test public void testAckMessage() throws Throwable { ConfigurationManager.getProxyConfig().setEnableBatchAck(false); String msg1 = "msg1"; String msg2 = "msg2"; String msg3 = "msg3"; when(this.messagingProcessor.ackMessage(any(), any(), eq(msg1), anyString(), anyString())) .thenThrow(new ProxyException(ProxyExceptionCode.INVALID_RECEIPT_HANDLE, "receipt handle is expired")); AckResult msg2AckResult = new AckResult(); msg2AckResult.setStatus(AckStatus.OK); when(this.messagingProcessor.ackMessage(any(), any(), eq(msg2), anyString(), anyString())) .thenReturn(CompletableFuture.completedFuture(msg2AckResult)); AckResult msg3AckResult = new AckResult(); msg3AckResult.setStatus(AckStatus.NO_EXIST); when(this.messagingProcessor.ackMessage(any(), any(), eq(msg3), anyString(), anyString())) .thenReturn(CompletableFuture.completedFuture(msg3AckResult)); { AckMessageResponse response = this.ackMessageActivity.ackMessage( createContext(), AckMessageRequest.newBuilder() .setTopic(Resource.newBuilder().setName(TOPIC).build()) .setGroup(Resource.newBuilder().setName(GROUP).build()) .addEntries(AckMessageEntry.newBuilder() .setMessageId(msg1) .setReceiptHandle(buildReceiptHandle(TOPIC, System.currentTimeMillis() - 10000, 1000)) .build()) .build() ).get(); assertEquals(Code.INVALID_RECEIPT_HANDLE, response.getStatus().getCode()); } { AckMessageResponse response = this.ackMessageActivity.ackMessage( createContext(), AckMessageRequest.newBuilder() .setTopic(Resource.newBuilder().setName(TOPIC).build()) .setGroup(Resource.newBuilder().setName(GROUP).build()) .addEntries(AckMessageEntry.newBuilder() .setMessageId(msg2) .setReceiptHandle(buildReceiptHandle(TOPIC, System.currentTimeMillis() - 10000, 1000)) .build()) .build() ).get(); assertEquals(Code.OK, response.getStatus().getCode()); } { AckMessageResponse response = this.ackMessageActivity.ackMessage( createContext(), AckMessageRequest.newBuilder() .setTopic(Resource.newBuilder().setName(TOPIC).build()) .setGroup(Resource.newBuilder().setName(GROUP).build()) .addEntries(AckMessageEntry.newBuilder() .setMessageId(msg3) .setReceiptHandle(buildReceiptHandle(TOPIC, System.currentTimeMillis() - 10000, 1000)) .build()) .build() ).get(); assertEquals(Code.INTERNAL_SERVER_ERROR, response.getStatus().getCode()); } { AckMessageResponse response = this.ackMessageActivity.ackMessage( createContext(), AckMessageRequest.newBuilder() .setTopic(Resource.newBuilder().setName(TOPIC).build()) .setGroup(Resource.newBuilder().setName(GROUP).build()) .addEntries(AckMessageEntry.newBuilder() .setMessageId(msg1) .setReceiptHandle(buildReceiptHandle(TOPIC, System.currentTimeMillis() - 10000, 1000)) .build()) .addEntries(AckMessageEntry.newBuilder() .setMessageId(msg2) .setReceiptHandle(buildReceiptHandle(TOPIC, System.currentTimeMillis(), 3000)) .build()) .addEntries(AckMessageEntry.newBuilder() .setMessageId(msg3) .setReceiptHandle(buildReceiptHandle(TOPIC, System.currentTimeMillis(), 3000)) .build()) .build() ).get(); assertEquals(Code.MULTIPLE_RESULTS, response.getStatus().getCode()); assertEquals(3, response.getEntriesCount()); assertEquals(Code.INVALID_RECEIPT_HANDLE, response.getEntries(0).getStatus().getCode()); assertEquals(Code.OK, response.getEntries(1).getStatus().getCode()); assertEquals(Code.INTERNAL_SERVER_ERROR, response.getEntries(2).getStatus().getCode()); } }
@Override public Map<String, Metric> getMetrics() { final Map<String, Metric> gauges = new HashMap<>(); gauges.put("total.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit() + mxBean.getNonHeapMemoryUsage().getInit()); gauges.put("total.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed() + mxBean.getNonHeapMemoryUsage().getUsed()); gauges.put("total.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax() == -1 ? -1 : mxBean.getHeapMemoryUsage().getMax() + mxBean.getNonHeapMemoryUsage().getMax()); gauges.put("total.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted() + mxBean.getNonHeapMemoryUsage().getCommitted()); gauges.put("heap.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit()); gauges.put("heap.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed()); gauges.put("heap.max", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getMax()); gauges.put("heap.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted()); gauges.put("heap.usage", new RatioGauge() { @Override protected Ratio getRatio() { final MemoryUsage usage = mxBean.getHeapMemoryUsage(); return Ratio.of(usage.getUsed(), usage.getMax()); } }); gauges.put("non-heap.init", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getInit()); gauges.put("non-heap.used", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getUsed()); gauges.put("non-heap.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax()); gauges.put("non-heap.committed", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getCommitted()); gauges.put("non-heap.usage", new RatioGauge() { @Override protected Ratio getRatio() { final MemoryUsage usage = mxBean.getNonHeapMemoryUsage(); return Ratio.of(usage.getUsed(), usage.getMax() == -1 ? usage.getCommitted() : usage.getMax()); } }); for (final MemoryPoolMXBean pool : memoryPools) { final String poolName = name("pools", WHITESPACE.matcher(pool.getName()).replaceAll("-")); gauges.put(name(poolName, "usage"), new RatioGauge() { @Override protected Ratio getRatio() { MemoryUsage usage = pool.getUsage(); return Ratio.of(usage.getUsed(), usage.getMax() == -1 ? usage.getCommitted() : usage.getMax()); } }); gauges.put(name(poolName, "max"), (Gauge<Long>) () -> pool.getUsage().getMax()); gauges.put(name(poolName, "used"), (Gauge<Long>) () -> pool.getUsage().getUsed()); gauges.put(name(poolName, "committed"), (Gauge<Long>) () -> pool.getUsage().getCommitted()); // Only register GC usage metrics if the memory pool supports usage statistics. if (pool.getCollectionUsage() != null) { gauges.put(name(poolName, "used-after-gc"), (Gauge<Long>) () -> pool.getCollectionUsage().getUsed()); } gauges.put(name(poolName, "init"), (Gauge<Long>) () -> pool.getUsage().getInit()); } return Collections.unmodifiableMap(gauges); }
@Test public void hasAGaugeForHeapMax() { final Gauge gauge = (Gauge) gauges.getMetrics().get("heap.max"); assertThat(gauge.getValue()) .isEqualTo(40L); }
public <T> void execute(final AsyncTask<T> task) { try { // some small tasks such as validation can be performed here. task.onPreCall(); } catch (Exception e) { task.onError(e); return; } service.submit(new FutureTask<>(task) { @Override protected void done() { super.done(); try { /* * called in context of background thread. There is other variant possible where result is * posted back and sits in the queue of caller thread which then picks it up for * processing. An example of such a system is Android OS, where the UI elements can only * be updated using UI thread. So result must be posted back in UI thread. */ task.onPostCall(get()); } catch (InterruptedException e) { // should not occur } catch (ExecutionException e) { task.onError(e.getCause()); } } }); }
@Test void testCallException() throws Exception { final var exception = new IOException(); when(task.call()).thenThrow(exception); service.execute(task); verify(task, timeout(2000)).onError(eq(exception)); final var inOrder = inOrder(task); inOrder.verify(task, times(1)).onPreCall(); inOrder.verify(task, times(1)).call(); inOrder.verify(task, times(1)).onError(exception); verifyNoMoreInteractions(task); }
public static boolean isMetaspaceOutOfMemoryError(@Nullable Throwable t) { return isOutOfMemoryErrorWithMessageContaining(t, "Metaspace"); }
@Test void testIsMetaspaceOutOfMemoryErrorCanHandleNullValue() { assertThat(ExceptionUtils.isMetaspaceOutOfMemoryError(null)).isFalse(); }
public static JavaType constructJavaType(Type type) { return mapper.constructType(type); }
@Test void testConstructJavaType() { assertEquals("java.lang.String", JacksonUtils.constructJavaType(String.class).getRawClass().getName()); assertTrue(JacksonUtils.constructJavaType(String.class).isFinal()); }
public void runPickle(Pickle pickle) { try { StepTypeRegistry stepTypeRegistry = createTypeRegistryForPickle(pickle); snippetGenerators = createSnippetGeneratorsForPickle(stepTypeRegistry); // Java8 step definitions will be added to the glue here buildBackendWorlds(); glue.prepareGlue(stepTypeRegistry); TestCase testCase = createTestCaseForPickle(pickle); testCase.run(bus); } finally { glue.removeScenarioScopedGlue(); disposeBackendWorlds(); } }
@Test void aftersteps_executed_for_passed_step() { StubStepDefinition stepDefinition = spy(new StubStepDefinition("some step")); Pickle pickle = createPickleMatchingStepDefinitions(stepDefinition); HookDefinition afteStepHook1 = createHook(); HookDefinition afteStepHook2 = createHook(); TestRunnerSupplier runnerSupplier = new TestRunnerSupplier(bus, runtimeOptions) { @Override public void loadGlue(Glue glue, List<URI> gluePaths) { glue.addAfterHook(afteStepHook1); glue.addAfterHook(afteStepHook2); glue.addStepDefinition(stepDefinition); } }; runnerSupplier.get().runPickle(pickle); InOrder inOrder = inOrder(afteStepHook1, afteStepHook2, stepDefinition); inOrder.verify(stepDefinition).execute(any(Object[].class)); inOrder.verify(afteStepHook2).execute(any(TestCaseState.class)); inOrder.verify(afteStepHook1).execute(any(TestCaseState.class)); }
public static List<String> computeNameParts(String loggerName) { List<String> partList = new ArrayList<String>(); int fromIndex = 0; while (true) { int index = getSeparatorIndexOf(loggerName, fromIndex); if (index == -1) { partList.add(loggerName.substring(fromIndex)); break; } partList.add(loggerName.substring(fromIndex, index)); fromIndex = index + 1; } return partList; }
@Test public void supportNestedClassesWithNestedDot() { // LOGBACK-384 List<String> witnessList = new ArrayList<String>(); witnessList.add("com"); witnessList.add("foo"); witnessList.add("Bar"); witnessList.add("Nested"); witnessList.add("dot"); List<String> partList = LoggerNameUtil.computeNameParts("com.foo.Bar$Nested.dot"); assertEquals(witnessList, partList); }
@Override public int[] transformToIntValuesSV(ValueBlock valueBlock) { int numDocs = valueBlock.getNumDocs(); initIntValuesSV(numDocs); long[] timestamps = _timestampsFunction.transformToLongValuesSV(valueBlock); convert(timestamps, numDocs, _intValuesSV); return _intValuesSV; }
@Test(dataProvider = "testCasesZoned") public void testZoned(String function, ZonedTimeFunction expected, Class<? extends TransformFunction> expectedClass) { for (String zone : new String[]{"Europe/Berlin", "America/New_York", "Asia/Katmandu"}) { ExpressionContext expression = RequestContextUtils.getExpression(String.format("%s(%s, '%s')", function, TIMESTAMP_COLUMN, zone)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); Assert.assertTrue(expectedClass.isInstance(transformFunction)); int[] values = transformFunction.transformToIntValuesSV(_projectionBlock); for (int i = 0; i < _projectionBlock.getNumDocs(); i++) { assertEquals(values[i], expected.apply(_timeValues[i], zone)); } } }
@Override public int getMaxCharLiteralLength() { return 0; }
@Test void assertGetMaxCharLiteralLength() { assertThat(metaData.getMaxCharLiteralLength(), is(0)); }
public static byte[] tryCompress(String str, String encoding) { if (str == null || str.length() == 0) { return new byte[0]; } ByteArrayOutputStream out = new ByteArrayOutputStream(); try (GZIPOutputStream gzip = new GZIPOutputStream(out)) { gzip.write(str.getBytes(encoding)); } catch (Exception e) { log.error("gzip write is fail: {}", e.getMessage()); } return out.toByteArray(); }
@Test public void testTryCompress() throws IOException { String inputString = "This is a test string."; String encoding = "UTF-8"; byte[] compressedBytes = IoUtil.tryCompress(inputString, encoding); Assert.assertNotNull(compressedBytes); Assert.assertTrue(compressedBytes.length > 0); try ( GZIPInputStream gzipInputStream = new GZIPInputStream(new ByteArrayInputStream(compressedBytes))) { byte[] decompressedBytes = new byte[1024]; ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); int readBytes; while ((readBytes = gzipInputStream.read(decompressedBytes)) > 0) { outputStream.write(decompressedBytes, 0, readBytes); } String decompressedString = outputStream.toString(encoding); Assert.assertEquals(inputString, decompressedString); } }
@ApiOperation(value = "Create Or Update Asset Profile (saveAssetProfile)", notes = "Create or update the Asset Profile. When creating asset profile, platform generates asset profile id as " + UUID_WIKI_LINK + "The newly created asset profile id will be present in the response. " + "Specify existing asset profile id to update the asset profile. " + "Referencing non-existing asset profile Id will cause 'Not Found' error. " + NEW_LINE + "Asset profile name is unique in the scope of tenant. Only one 'default' asset profile may exist in scope of tenant. " + "Remove 'id', 'tenantId' from the request body example (below) to create new Asset Profile entity. " + TENANT_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAuthority('TENANT_ADMIN')") @RequestMapping(value = "/assetProfile", method = RequestMethod.POST) @ResponseBody public AssetProfile saveAssetProfile( @Parameter(description = "A JSON value representing the asset profile.") @RequestBody AssetProfile assetProfile) throws Exception { assetProfile.setTenantId(getTenantId()); checkEntity(assetProfile.getId(), assetProfile, Resource.ASSET_PROFILE); return tbAssetProfileService.save(assetProfile, getCurrentUser()); }
@Test public void testSaveAssetProfile() throws Exception { AssetProfile assetProfile = this.createAssetProfile("Asset Profile"); Mockito.reset(tbClusterService, auditLogService); AssetProfile savedAssetProfile = doPost("/api/assetProfile", assetProfile, AssetProfile.class); Assert.assertNotNull(savedAssetProfile); Assert.assertNotNull(savedAssetProfile.getId()); Assert.assertTrue(savedAssetProfile.getCreatedTime() > 0); Assert.assertEquals(assetProfile.getName(), savedAssetProfile.getName()); Assert.assertEquals(assetProfile.getDescription(), savedAssetProfile.getDescription()); Assert.assertEquals(assetProfile.isDefault(), savedAssetProfile.isDefault()); Assert.assertEquals(assetProfile.getDefaultRuleChainId(), savedAssetProfile.getDefaultRuleChainId()); testNotifyEntityBroadcastEntityStateChangeEventOneTime(savedAssetProfile, savedAssetProfile.getId(), savedAssetProfile.getId(), savedTenant.getId(), tenantAdmin.getCustomerId(), tenantAdmin.getId(), tenantAdmin.getEmail(), ActionType.ADDED); savedAssetProfile.setName("New asset profile"); doPost("/api/assetProfile", savedAssetProfile, AssetProfile.class); AssetProfile foundAssetProfile = doGet("/api/assetProfile/" + savedAssetProfile.getId().getId().toString(), AssetProfile.class); Assert.assertEquals(savedAssetProfile.getName(), foundAssetProfile.getName()); testNotifyEntityBroadcastEntityStateChangeEventOneTime(foundAssetProfile, foundAssetProfile.getId(), foundAssetProfile.getId(), savedTenant.getId(), tenantAdmin.getCustomerId(), tenantAdmin.getId(), tenantAdmin.getEmail(), ActionType.UPDATED); }
@Override public void init(FilterConfig config) { // Filters are already available in the container unless a database migration is required. See // org.sonar.server.startup.RegisterServletFilters. List<ServletFilter> servletFilterList = PlatformImpl.getInstance().getContainer().getComponentsByType(ServletFilter.class); List<HttpFilter> httpFilterList = PlatformImpl.getInstance().getContainer().getComponentsByType(HttpFilter.class); init(config, servletFilterList, httpFilterList); }
@Test public void display_servlet_filter_patterns_in_INFO_log() { HttpFilter filter = new PatternFilter(org.sonar.api.web.UrlPattern.builder().includes("/api/issues").excludes("/batch/projects").build()); FilterConfig config = mock(FilterConfig.class); MasterServletFilter master = new MasterServletFilter(); master.init(config, emptyList(), singletonList(filter)); assertThat(logTester.logs(Level.INFO)).containsOnly("Initializing servlet filter PatternFilter [pattern=UrlPattern{inclusions=[/api/issues], exclusions=[/batch/projects]}]"); }
public static void main(String[] args) { // initialize game objects and print their status LOGGER.info("Init objects and print their status"); var objects = List.of( new FlamingAsteroid(0, 0, 5, 5), new SpaceStationMir(1, 1, 2, 2), new Meteoroid(10, 10, 15, 15), new SpaceStationIss(12, 12, 14, 14) ); objects.forEach(o -> LOGGER.info(o.toString())); // collision check LOGGER.info("Collision check"); objects.forEach(o1 -> objects.forEach(o2 -> { if (o1 != o2 && o1.intersectsWith(o2)) { o1.collision(o2); } })); // output eventual object statuses LOGGER.info("Print object status after collision checks"); objects.forEach(o -> LOGGER.info(o.toString())); }
@Test void shouldExecuteApplicationWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
@Override public List<Integer> applyTransforms(List<Integer> originalGlyphIds) { List<Integer> intermediateGlyphsFromGsub = originalGlyphIds; for (String feature : FEATURES_IN_ORDER) { if (!gsubData.isFeatureSupported(feature)) { LOG.debug("the feature {} was not found", feature); continue; } LOG.debug("applying the feature {}", feature); ScriptFeature scriptFeature = gsubData.getFeature(feature); intermediateGlyphsFromGsub = applyGsubFeature(scriptFeature, intermediateGlyphsFromGsub); } return Collections.unmodifiableList(repositionGlyphs(intermediateGlyphsFromGsub)); }
@Test void testApplyTransforms_ref() { // given List<Integer> glyphsAfterGsub = Arrays.asList(85, 104, 440, 82); // when List<Integer> result = gsubWorkerForBengali.applyTransforms(getGlyphIds("ধুর্ত")); // then assertEquals(glyphsAfterGsub, result); }
public SelectorDO getSelector() { return (SelectorDO) getSource(); }
@Test void getSelector() { SelectorDO selector = selectorUpdatedEvent.getSelector(); assertEquals(selectorDO, selector); }
public String getClientLatency() { if (!enabled) { return null; } Instant trackerStart = Instant.now(); String latencyDetails = queue.poll(); // non-blocking pop if (LOG.isDebugEnabled()) { Instant stop = Instant.now(); long elapsed = Duration.between(trackerStart, stop).toMillis(); LOG.debug("Dequeued latency info [{} ms]: {}", elapsed, latencyDetails); } return latencyDetails; }
@Test public void verifyTrackingForAggregateLatencyRecords() throws Exception { // verify that tracking of aggregate latency records works as expected final int numTasks = 100; AbfsPerfTracker abfsPerfTracker = new AbfsPerfTracker(accountName, filesystemName, true); String latencyDetails = abfsPerfTracker.getClientLatency(); assertThat(latencyDetails).describedAs("AbfsPerfTracker should be empty").isNull(); List<Callable<Integer>> tasks = new ArrayList<>(); AbfsJdkHttpOperation httpOperation = new AbfsJdkHttpOperation(url, "GET", new ArrayList<>(), Duration.ofMillis(DEFAULT_HTTP_CONNECTION_TIMEOUT), Duration.ofMillis(DEFAULT_HTTP_READ_TIMEOUT)); for (int i = 0; i < numTasks; i++) { tasks.add(() -> { try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", "oneOperationCallee")) { tracker.registerResult(httpOperation).registerSuccess(true) .registerAggregates(Instant.now(), TEST_AGGREGATE_COUNT); return 0; } }); } for (Future<Integer> fr: executorService.invokeAll(tasks)) { fr.get(); } for (int i = 0; i < numTasks; i++) { latencyDetails = abfsPerfTracker.getClientLatency(); assertThat(latencyDetails).describedAs("AbfsPerfTracker should return non-null record").isNotNull(); assertThat(latencyDetails).describedAs("Latency record should be in the correct format") .containsPattern("h=[^ ]* t=[^ ]* a=bogusFilesystemName c=bogusAccountName cr=oneOperationCaller" + " ce=oneOperationCallee r=Succeeded l=[0-9]+ ls=[0-9]+ lc=" + TEST_AGGREGATE_COUNT + " s=0 e= ci=[^ ]* ri=[^ ]* ct=[^ ]* st=[^ ]* rt=[^ ]* bs=0 br=0 m=GET u=http%3A%2F%2Fwww.microsoft.com%2FbogusFile"); } latencyDetails = abfsPerfTracker.getClientLatency(); assertThat(latencyDetails).describedAs("AbfsPerfTracker should return no record").isNull(); }
@Override public long extractWatermark(IcebergSourceSplit split) { return split.task().files().stream() .map( scanTask -> { Preconditions.checkArgument( scanTask.file().lowerBounds() != null && scanTask.file().lowerBounds().get(eventTimeFieldId) != null, "Missing statistics for column name = %s in file = %s", eventTimeFieldName, eventTimeFieldId, scanTask.file()); return timeUnit.toMillis( Conversions.fromByteBuffer( Types.LongType.get(), scanTask.file().lowerBounds().get(eventTimeFieldId))); }) .min(Comparator.comparingLong(l -> l)) .get(); }
@TestTemplate public void testMultipleFiles() throws IOException { assumeThat(columnName).isEqualTo("timestamp_column"); IcebergSourceSplit combinedSplit = IcebergSourceSplit.fromCombinedScanTask( ReaderUtil.createCombinedScanTask( TEST_RECORDS, temporaryFolder, FileFormat.PARQUET, APPENDER_FACTORY)); ColumnStatsWatermarkExtractor extractor = new ColumnStatsWatermarkExtractor(SCHEMA, columnName, null); assertThat(extractor.extractWatermark(split(0))) .isEqualTo(MIN_VALUES.get(0).get(columnName).longValue()); assertThat(extractor.extractWatermark(split(1))) .isEqualTo(MIN_VALUES.get(1).get(columnName).longValue()); assertThat(extractor.extractWatermark(combinedSplit)) .isEqualTo(Math.min(MIN_VALUES.get(0).get(columnName), MIN_VALUES.get(1).get(columnName))); }
@Override public int actionUpgradeInstances(String appName, List<String> compInstances) throws IOException, YarnException { int result; Container[] toUpgrade = new Container[compInstances.size()]; try { int idx = 0; for (String instanceName : compInstances) { Container container = new Container(); container.setComponentInstanceName(instanceName); container.setState(ContainerState.UPGRADING); toUpgrade[idx++] = container; } String buffer = ServiceApiUtil.CONTAINER_JSON_SERDE.toJson(toUpgrade); ClientResponse response = getApiClient(getInstancesPath(appName)) .put(ClientResponse.class, buffer); result = processResponse(response); } catch (Exception e) { LOG.error("Failed to upgrade component instance: ", e); result = EXIT_EXCEPTION_THROWN; } return result; }
@Test void testInstancesUpgrade() { String appName = "example-app"; try { int result = asc.actionUpgradeInstances(appName, Lists.newArrayList( "comp-1", "comp-2")); assertEquals(EXIT_SUCCESS, result); } catch (IOException | YarnException e) { fail(); } }
Integer calculateFeePrice(Integer withdrawPrice, Integer percent) { Integer feePrice = 0; if (percent != null && percent > 0) { feePrice = MoneyUtils.calculateRatePrice(withdrawPrice, Double.valueOf(percent)); } return feePrice; }
@Test public void testCalculateFeePrice() { Integer withdrawPrice = 100; // 测试手续费比例未设置 Integer percent = null; assertEquals(brokerageWithdrawService.calculateFeePrice(withdrawPrice, percent), 0); // 测试手续费给为0 percent = 0; assertEquals(brokerageWithdrawService.calculateFeePrice(withdrawPrice, percent), 0); // 测试手续费 percent = 1; assertEquals(brokerageWithdrawService.calculateFeePrice(withdrawPrice, percent), 1); }
public static RequestMatcher context(final String context) { return or(by(uri(context)), match(uri(URLs.join(context, ".*")))); }
@Test public void should_create_context_correctly() { RequestMatcher matcher = InternalApis.context("targets"); assertThat(matcher.match(requestByUri("targets/hello")), is(true)); assertThat(matcher.match(requestByUri("targets")), is(true)); }
public String toString(String name) { return toString(name, ""); }
@Test public void testToString_String_String() { System.out.println("toString"); String expResult; String result; Properties props = new Properties(); props.put("value1", "sTr1"); props.put("value2", "str_2"); props.put("empty", ""); props.put("str", "abc"); props.put("boolean", "true"); props.put("float", "24.98"); props.put("int", "12"); props.put("char", "a"); PropertyParser instance = new PropertyParser(props); expResult = "sTr1"; result = instance.toString("value1", "defStr"); assertEquals(expResult, result); expResult = "str_2"; result = instance.toString("value2", "defStr"); assertEquals(expResult, result); expResult = ""; result = instance.toString("empty", "defStr"); assertEquals(expResult, result); expResult = "abc"; result = instance.toString("str", "defStr"); assertEquals(expResult, result); expResult = "true"; result = instance.toString("boolean", "defStr"); assertEquals(expResult, result); expResult = "24.98"; result = instance.toString("float", "defStr"); assertEquals(expResult, result); expResult = "12"; result = instance.toString("int", "defStr"); assertEquals(expResult, result); expResult = "a"; result = instance.toString("char", "defStr"); assertEquals(expResult, result); expResult = "defStr"; result = instance.toString("nonexistent", "defStr"); assertEquals(expResult, result); }
public String anonymize(final ParseTree tree) { return build(tree); }
@Test public void shouldAnonymizeCreateTableAsQueryCorrectly() { final String output = anon.anonymize( "CREATE TABLE my_table AS SELECT user_id, browser_cookie, ip_address\n" + "FROM another_table\n" + "WHERE user_id = 4214\n" + "AND browser_cookie = 'aefde34ec'\n" + "AND ip_address = '10.10.0.2';"); Approvals.verify(output); }
public static NewTopicBuilder defineTopic(String topicName) { return new NewTopicBuilder(topicName); }
@Test public void shouldCreateTopicWithDefaultPartitionsAndReplicationFactorWhenItDoesNotExist() { NewTopic newTopic = TopicAdmin.defineTopic("my-topic") .defaultPartitions() .defaultReplicationFactor() .compacted() .build(); for (int numBrokers = 1; numBrokers < 10; ++numBrokers) { int expectedReplicas = Math.min(3, numBrokers); assertTopicCreation(numBrokers, newTopic, null, null, expectedReplicas, 1); assertTopicCreation(numBrokers, newTopic, 30, null, expectedReplicas, 30); } }
Meter.Type getMetricsType(String remaining) { String type = StringHelper.before(remaining, ":"); return type == null ? DEFAULT_METER_TYPE : MicrometerUtils.getByName(type); }
@Test public void testGetMetricsType() { Meter.Type[] supportedTypes = { Meter.Type.COUNTER, Meter.Type.DISTRIBUTION_SUMMARY, Meter.Type.TIMER }; for (Meter.Type type : supportedTypes) { assertThat(component.getMetricsType(MicrometerUtils.getName(type) + ":metrics-name"), is(type)); } }
public String parse(Function<String, String> propertyMapping) { init(); boolean inPrepare = false; char[] expression = new char[128]; int expressionPos = 0; while (next()) { if (isPrepare()) { inPrepare = true; } else if (inPrepare && isPrepareEnd()) { inPrepare = false; setParsed(propertyMapping.apply(new String(expression, 0, expressionPos)).toCharArray()); expressionPos = 0; } else if (inPrepare) { if (expression.length <= expressionPos) { expression = Arrays.copyOf(expression, (int)(expression.length * 1.5)); } expression[expressionPos++] = symbol; } else if (!isPreparing()) { setParsed(symbol); } } if (isPrepareEnd() && expressionPos > 0) { setParsed(propertyMapping.apply(new String(expression, 0, expressionPos)).toCharArray()); } else { setParsed(symbol); } return new String(newArr, 0, len); }
@Test public void testLarge() { String str = ""; for (int i = 0; i < 1000; i++) { str += "test-" + i; } String result = TemplateParser.parse("test-${name}", Collections.singletonMap("name", str)); Assert.assertEquals(result, "test-" + str); }
public String getId() { return id; }
@Test public void testNonstaticTupleTag() { assertNotEquals(new TupleTag<>().getId(), new TupleTag<>().getId()); assertNotEquals(createNonstaticTupleTag(), createNonstaticTupleTag()); TupleTag<Object> tag = createNonstaticTupleTag(); // Check that the name is derived from the method it is created in. assertThat( Iterables.get(Splitter.on('#').split(tag.getId()), 0), startsWith("org.apache.beam.sdk.values.TupleTagTest.createNonstaticTupleTag")); // Check that after the name there is a ':' followed by a line number, and just make // sure the line number is big enough to be reasonable, so superficial changes don't break // the test. assertThat( Integer.parseInt( Iterables.get( Splitter.on(':').split(Iterables.get(Splitter.on('#').split(tag.getId()), 0)), 1)), greaterThan(15)); }
@VisibleForTesting protected void copyResourcesFromJar(JarFile inputJar) throws IOException { Enumeration<JarEntry> inputJarEntries = inputJar.entries(); // The zip spec allows multiple files with the same name; the Java zip libraries do not. // Keep track of the files we've already written to filter out duplicates. // Also, ignore the old manifest; we want to write our own. Set<String> previousEntryNames = new HashSet<>(ImmutableList.of(JarFile.MANIFEST_NAME)); while (inputJarEntries.hasMoreElements()) { JarEntry inputJarEntry = inputJarEntries.nextElement(); InputStream inputStream = inputJar.getInputStream(inputJarEntry); String entryName = inputJarEntry.getName(); if (previousEntryNames.contains(entryName)) { LOG.debug("Skipping duplicated file {}", entryName); } else { JarEntry outputJarEntry = new JarEntry(inputJarEntry); outputStream.putNextEntry(outputJarEntry); LOG.trace("Copying jar entry {}", inputJarEntry); IOUtils.copy(inputStream, outputStream); previousEntryNames.add(entryName); } } }
@Test public void testCopyResourcesFromJar_ignoresManifest() throws IOException { List<JarEntry> manifestEntry = ImmutableList.of(new JarEntry(JarFile.MANIFEST_NAME)); when(inputJar.entries()).thenReturn(Collections.enumeration(manifestEntry)); jarCreator.copyResourcesFromJar(inputJar); verify(outputStream, never()).putNextEntry(any()); }
@Override public List<URL> lookup(URL query) { return monitorService.lookup(query); }
@Test void testLookUp() { Invoker invoker = mock(Invoker.class); MonitorService monitorService = mock(MonitorService.class); URL queryUrl = URL.valueOf("dubbo://127.0.0.1:7070?interval=20"); given(invoker.getUrl()).willReturn(queryUrl); DubboMonitor dubboMonitor = new DubboMonitor(invoker, monitorService); dubboMonitor.lookup(queryUrl); verify(monitorService).lookup(queryUrl); }
public static Optional<Boolean> parseBooleanExact(final String value) { if (booleanStringMatches(value, true)) { return Optional.of(true); } if (booleanStringMatches(value, false)) { return Optional.of(false); } return Optional.empty(); }
@Test public void shouldParseExactYesAsTrue() { assertThat(SqlBooleans.parseBooleanExact("Ye"), is(Optional.of(true))); }
@Override public void commit() { TableMetadata base = ops.current(); TableMetadata newMetadata = internalApply(base); ops.commit(base, newMetadata); }
@TestTemplate public void testEmptyTransactionalUpdateStatistics() { assertThat(version()).isEqualTo(0); TableMetadata base = readMetadata(); Transaction transaction = table.newTransaction(); transaction.updateStatistics().commit(); transaction.commitTransaction(); assertThat(table.ops().current()).isSameAs(base); assertThat(version()).isEqualTo(0); }
@VisibleForTesting static String formatTimestamp(Long timestampMicro) { // timestampMicro is in "microseconds since epoch" format, // e.g., 1452062291123456L means "2016-01-06 06:38:11.123456 UTC". // Separate into seconds and microseconds. long timestampSec = timestampMicro / 1_000_000; long micros = timestampMicro % 1_000_000; if (micros < 0) { micros += 1_000_000; timestampSec -= 1; } String dayAndTime = DATE_AND_SECONDS_FORMATTER.print(timestampSec * 1000); if (micros == 0) { return String.format("%s UTC", dayAndTime); } return String.format("%s.%06d UTC", dayAndTime, micros); }
@Test public void testFormatTimestampNegative() { assertThat(BigQueryAvroUtils.formatTimestamp(-1L), equalTo("1969-12-31 23:59:59.999999 UTC")); assertThat( BigQueryAvroUtils.formatTimestamp(-100_000L), equalTo("1969-12-31 23:59:59.900000 UTC")); assertThat(BigQueryAvroUtils.formatTimestamp(-1_000_000L), equalTo("1969-12-31 23:59:59 UTC")); // No leap seconds before 1972. 477 leap years from 1 through 1969. assertThat( BigQueryAvroUtils.formatTimestamp(-(1969L * 365 + 477) * 86400 * 1_000_000), equalTo("0001-01-01 00:00:00 UTC")); }
@Override public void computeScanRangeAssignment() throws UserException { if (locations.size() == 0) { return; } long totalSize = computeTotalSize(); long avgNodeScanRangeBytes = totalSize / Math.max(workerProvider.getAllWorkers().size(), 1) + 1; for (ComputeNode computeNode : workerProvider.getAllWorkers()) { assignedScansPerComputeNode.put(computeNode, 0L); reBalanceBytesPerComputeNode.put(computeNode, 0L); hostToBackends.put(computeNode.getHost(), computeNode); } // schedule scan ranges to co-located backends. // and put rest scan ranges into remote scan ranges. List<TScanRangeLocations> remoteScanRangeLocations = Lists.newArrayList(); if (forceScheduleLocal) { for (int i = 0; i < locations.size(); ++i) { TScanRangeLocations scanRangeLocations = locations.get(i); List<ComputeNode> backends = new ArrayList<>(); // select all backends that are co-located with this scan range. for (final TScanRangeLocation location : scanRangeLocations.getLocations()) { Collection<ComputeNode> servers = hostToBackends.get(location.getServer().getHostname()); if (servers == null || servers.isEmpty()) { continue; } backends.addAll(servers); } ComputeNode node = reBalanceScanRangeForComputeNode(backends, avgNodeScanRangeBytes, scanRangeLocations); if (node == null) { remoteScanRangeLocations.add(scanRangeLocations); } else { recordScanRangeAssignment(node, backends, scanRangeLocations); } } } else { remoteScanRangeLocations = locations; } if (remoteScanRangeLocations.isEmpty()) { return; } // use consistent hashing to schedule remote scan ranges HashRing hashRing = makeHashRing(); if (shuffleScanRange) { Collections.shuffle(remoteScanRangeLocations); } // assign scan ranges. for (int i = 0; i < remoteScanRangeLocations.size(); ++i) { TScanRangeLocations scanRangeLocations = remoteScanRangeLocations.get(i); List<ComputeNode> backends = hashRing.get(scanRangeLocations, kCandidateNumber); ComputeNode node = reBalanceScanRangeForComputeNode(backends, avgNodeScanRangeBytes, scanRangeLocations); if (node == null) { throw new RuntimeException("Failed to find backend to execute"); } recordScanRangeAssignment(node, backends, scanRangeLocations); } recordScanRangeStatistic(); }
@Test public void testHdfsScanNodeScanRangeReBalance() throws Exception { SessionVariable sessionVariable = new SessionVariable(); new Expectations() { { hdfsScanNode.getId(); result = scanNodeId; hdfsScanNode.getTableName(); result = "hive_tbl"; hiveTable.getTableLocation(); result = "hdfs://dfs00/dataset/"; ConnectContext.get(); result = context; context.getSessionVariable(); result = sessionVariable; } }; long scanRangeNumber = 10000; long scanRangeSize = 10000; int hostNumber = 3; List<TScanRangeLocations> locations = createScanRanges(scanRangeNumber, scanRangeSize); FragmentScanRangeAssignment assignment = new FragmentScanRangeAssignment(); ImmutableMap<Long, ComputeNode> computeNodes = createComputeNodes(hostNumber); DefaultWorkerProvider workerProvider = new DefaultWorkerProvider( ImmutableMap.of(), computeNodes, ImmutableMap.of(), computeNodes, true ); HDFSBackendSelector selector = new HDFSBackendSelector(hdfsScanNode, locations, assignment, workerProvider, false, false); selector.computeScanRangeAssignment(); long avg = (scanRangeNumber * scanRangeSize) / hostNumber + 1; double variance = 0.2 * avg; Map<Long, Long> stats = computeWorkerIdToReadBytes(assignment, scanNodeId); for (Map.Entry<Long, Long> entry : stats.entrySet()) { System.out.printf("%s -> %d bytes\n", entry.getKey(), entry.getValue()); Assert.assertTrue((entry.getValue() - avg) < variance); } variance = 0.4 / 100 * scanRangeNumber * scanRangeSize; double actual = 0; for (Map.Entry<ComputeNode, Long> entry : selector.reBalanceBytesPerComputeNode.entrySet()) { System.out.printf("%s -> %d bytes re-balance\n", entry.getKey(), entry.getValue()); actual = actual + entry.getValue(); } Assert.assertTrue(actual < variance); }
public static void preserve(FileSystem targetFS, Path path, CopyListingFileStatus srcFileStatus, EnumSet<FileAttribute> attributes, boolean preserveRawXattrs) throws IOException { // strip out those attributes we don't need any more attributes.remove(FileAttribute.BLOCKSIZE); attributes.remove(FileAttribute.CHECKSUMTYPE); // If not preserving anything from FileStatus, don't bother fetching it. FileStatus targetFileStatus = attributes.isEmpty() ? null : targetFS.getFileStatus(path); String group = targetFileStatus == null ? null : targetFileStatus.getGroup(); String user = targetFileStatus == null ? null : targetFileStatus.getOwner(); boolean chown = false; if (attributes.contains(FileAttribute.ACL)) { List<AclEntry> srcAcl = srcFileStatus.getAclEntries(); List<AclEntry> targetAcl = getAcl(targetFS, targetFileStatus); if (!srcAcl.equals(targetAcl)) { targetFS.removeAcl(path); targetFS.setAcl(path, srcAcl); } // setAcl doesn't preserve sticky bit, so also call setPermission if needed. if (srcFileStatus.getPermission().getStickyBit() != targetFileStatus.getPermission().getStickyBit()) { targetFS.setPermission(path, srcFileStatus.getPermission()); } } else if (attributes.contains(FileAttribute.PERMISSION) && !srcFileStatus.getPermission().equals(targetFileStatus.getPermission())) { targetFS.setPermission(path, srcFileStatus.getPermission()); } final boolean preserveXAttrs = attributes.contains(FileAttribute.XATTR); if (preserveXAttrs || preserveRawXattrs) { final String rawNS = StringUtils.toLowerCase(XAttr.NameSpace.RAW.name()); Map<String, byte[]> srcXAttrs = srcFileStatus.getXAttrs(); Map<String, byte[]> targetXAttrs = getXAttrs(targetFS, path); if (srcXAttrs != null && !srcXAttrs.equals(targetXAttrs)) { for (Entry<String, byte[]> entry : srcXAttrs.entrySet()) { String xattrName = entry.getKey(); if (xattrName.startsWith(rawNS) || preserveXAttrs) { targetFS.setXAttr(path, xattrName, entry.getValue()); } } } } // The replication factor can only be preserved for replicated files. // It is ignored when either the source or target file are erasure coded. if (attributes.contains(FileAttribute.REPLICATION) && !targetFileStatus.isDirectory() && !targetFileStatus.isErasureCoded() && !srcFileStatus.isErasureCoded() && srcFileStatus.getReplication() != targetFileStatus.getReplication()) { targetFS.setReplication(path, srcFileStatus.getReplication()); } if (attributes.contains(FileAttribute.GROUP) && !group.equals(srcFileStatus.getGroup())) { group = srcFileStatus.getGroup(); chown = true; } if (attributes.contains(FileAttribute.USER) && !user.equals(srcFileStatus.getOwner())) { user = srcFileStatus.getOwner(); chown = true; } if (chown) { targetFS.setOwner(path, user, group); } if (attributes.contains(FileAttribute.TIMES)) { targetFS.setTimes(path, srcFileStatus.getModificationTime(), srcFileStatus.getAccessTime()); } }
@Test public void testPreserveTimestampOnFile() throws IOException { FileSystem fs = FileSystem.get(config); EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.TIMES); Path dst = new Path("/tmp/dest2"); Path src = new Path("/tmp/src2"); createFile(fs, src); createFile(fs, dst); fs.setPermission(src, fullPerm); fs.setOwner(src, "somebody", "somebody-group"); fs.setTimes(src, 0, 0); fs.setReplication(src, (short) 1); fs.setPermission(dst, noPerm); fs.setOwner(dst, "nobody", "nobody-group"); fs.setTimes(dst, 100, 100); fs.setReplication(dst, (short) 2); CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src)); DistCpUtils.preserve(fs, dst, srcStatus, attributes, false); CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst)); // FileStatus.equals only compares path field, must explicitly compare all fields Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup())); Assert.assertTrue(srcStatus.getAccessTime() == dstStatus.getAccessTime()); Assert.assertTrue(srcStatus.getModificationTime() == dstStatus.getModificationTime()); Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication()); }
public Expr uncheckedCastTo(Type targetType) throws AnalysisException { return new CastExpr(targetType, this); }
@Test public void testUncheckedCastTo() throws AnalysisException { // uncheckedCastTo should return new object // date DateLiteral dateLiteral = new DateLiteral(2020, 4, 5, 12, 0, 5, 0); Assert.assertTrue(dateLiteral.getType().isDatetime()); DateLiteral castLiteral = (DateLiteral) dateLiteral.uncheckedCastTo(Type.DATE); Assert.assertFalse(dateLiteral == castLiteral); Assert.assertTrue(dateLiteral.getType().isDatetime()); Assert.assertTrue(castLiteral.getType().isDate()); Assert.assertEquals(0, castLiteral.getHour()); Assert.assertEquals(0, castLiteral.getMinute()); Assert.assertEquals(0, castLiteral.getSecond()); Assert.assertEquals(12, dateLiteral.getHour()); Assert.assertEquals(0, dateLiteral.getMinute()); Assert.assertEquals(5, dateLiteral.getSecond()); DateLiteral dateLiteral2 = new DateLiteral(2020, 4, 5); Assert.assertTrue(dateLiteral2.getType().isDate()); castLiteral = (DateLiteral) dateLiteral2.uncheckedCastTo(Type.DATETIME); Assert.assertFalse(dateLiteral2 == castLiteral); Assert.assertTrue(dateLiteral2.getType().isDate()); Assert.assertTrue(castLiteral.getType().isDatetime()); Assert.assertEquals(0, castLiteral.getHour()); Assert.assertEquals(0, castLiteral.getMinute()); Assert.assertEquals(0, castLiteral.getSecond()); // float FloatLiteral floatLiteral = new FloatLiteral(0.1, Type.FLOAT); Assert.assertTrue(floatLiteral.getType().isFloat()); FloatLiteral castFloatLiteral = (FloatLiteral) floatLiteral.uncheckedCastTo(Type.DOUBLE); Assert.assertTrue(floatLiteral.getType().isFloat()); Assert.assertTrue(castFloatLiteral.getType().isDouble()); Assert.assertFalse(floatLiteral == castFloatLiteral); FloatLiteral castFloatLiteral2 = (FloatLiteral) floatLiteral.uncheckedCastTo(Type.FLOAT); Assert.assertTrue(floatLiteral == castFloatLiteral2); // int IntLiteral intLiteral = new IntLiteral(200); Assert.assertTrue(intLiteral.getType().isSmallint()); IntLiteral castIntLiteral = (IntLiteral) intLiteral.uncheckedCastTo(Type.INT); Assert.assertTrue(intLiteral.getType().isSmallint()); Assert.assertTrue(castIntLiteral.getType().isInt()); Assert.assertFalse(intLiteral == castIntLiteral); IntLiteral castIntLiteral2 = (IntLiteral) intLiteral.uncheckedCastTo(Type.SMALLINT); Assert.assertTrue(intLiteral == castIntLiteral2); // null NullLiteral nullLiternal = NullLiteral.create(Type.DATE); Assert.assertTrue(nullLiternal.getType().isDate()); NullLiteral castNullLiteral = (NullLiteral) nullLiternal.uncheckedCastTo(Type.DATETIME); Assert.assertTrue(nullLiternal.getType().isDate()); Assert.assertTrue(castNullLiteral.getType().isDatetime()); Assert.assertFalse(nullLiternal == castNullLiteral); NullLiteral castNullLiteral2 = (NullLiteral) nullLiternal.uncheckedCastTo(Type.DATE); Assert.assertTrue(nullLiternal == castNullLiteral2); // string StringLiteral stringLiteral = new StringLiteral("abc"); Assert.assertTrue(stringLiteral.getType().isVarchar()); StringLiteral castStringLiteral = (StringLiteral) stringLiteral.uncheckedCastTo(Type.CHAR); Assert.assertTrue(stringLiteral.getType().isVarchar()); Assert.assertTrue(castStringLiteral.getType().isChar()); Assert.assertFalse(stringLiteral == castStringLiteral); StringLiteral castStringLiteral2 = (StringLiteral) stringLiteral.uncheckedCastTo(Type.VARCHAR); Assert.assertTrue(stringLiteral == castStringLiteral2); }
public static <@NonNull E> CompletableSource resolveScopeFromLifecycle( final LifecycleScopeProvider<E> provider) throws OutsideScopeException { return resolveScopeFromLifecycle(provider, true); }
@Test public void lifecycleCheckEnd_shouldFailIfEndedWithThrowingHandler() { TestLifecycleScopeProvider lifecycle = TestLifecycleScopeProvider.createInitial(STOPPED); final RuntimeException expected = new RuntimeException("Expected"); AutoDisposePlugins.setOutsideScopeHandler( e -> { // Throw it back throw expected; }); testSource(resolveScopeFromLifecycle(lifecycle, true)).assertError(expected); }
public MeasureDto toMeasureDto(Measure measure, Metric metric, Component component) { MeasureDto out = new MeasureDto(); out.setMetricUuid(metric.getUuid()); out.setComponentUuid(component.getUuid()); out.setAnalysisUuid(analysisMetadataHolder.getUuid()); if (measure.hasQualityGateStatus()) { setAlert(out, measure.getQualityGateStatus()); } out.setValue(valueAsDouble(measure)); out.setData(data(measure)); return out; }
@Test public void toMeasureDto_maps_value_and_data_from_data_field_for_LONG_metric() { MeasureDto trueMeasureDto = underTest.toMeasureDto(Measure.newMeasureBuilder().create((long) 456, SOME_DATA), SOME_LONG_METRIC, SOME_COMPONENT); assertThat(trueMeasureDto.getValue()).isEqualTo(456); assertThat(trueMeasureDto.getData()).isEqualTo(SOME_DATA); }
static boolean needWrap(MethodDescriptor methodDescriptor, Class<?>[] parameterClasses, Class<?> returnClass) { String methodName = methodDescriptor.getMethodName(); // generic call must be wrapped if (CommonConstants.$INVOKE.equals(methodName) || CommonConstants.$INVOKE_ASYNC.equals(methodName)) { return true; } // echo must be wrapped if ($ECHO.equals(methodName)) { return true; } boolean returnClassProtobuf = isProtobufClass(returnClass); // Response foo() if (parameterClasses.length == 0) { return !returnClassProtobuf; } int protobufParameterCount = 0; int javaParameterCount = 0; int streamParameterCount = 0; boolean secondParameterStream = false; // count normal and protobuf param for (int i = 0; i < parameterClasses.length; i++) { Class<?> parameterClass = parameterClasses[i]; if (isProtobufClass(parameterClass)) { protobufParameterCount++; } else { if (isStreamType(parameterClass)) { if (i == 1) { secondParameterStream = true; } streamParameterCount++; } else { javaParameterCount++; } } } // more than one stream param if (streamParameterCount > 1) { throw new IllegalStateException("method params error: more than one Stream params. method=" + methodName); } // protobuf only support one param if (protobufParameterCount >= 2) { throw new IllegalStateException("method params error: more than one protobuf params. method=" + methodName); } // server stream support one normal param and one stream param if (streamParameterCount == 1) { if (javaParameterCount + protobufParameterCount > 1) { throw new IllegalStateException( "method params error: server stream does not support more than one normal param." + " method=" + methodName); } // server stream: void foo(Request, StreamObserver<Response>) if (!secondParameterStream) { throw new IllegalStateException( "method params error: server stream's second param must be StreamObserver." + " method=" + methodName); } } if (methodDescriptor.getRpcType() != MethodDescriptor.RpcType.UNARY) { if (MethodDescriptor.RpcType.SERVER_STREAM == methodDescriptor.getRpcType()) { if (!secondParameterStream) { throw new IllegalStateException( "method params error:server stream's second param must be StreamObserver." + " method=" + methodName); } } // param type must be consistent if (returnClassProtobuf) { if (javaParameterCount > 0) { throw new IllegalStateException( "method params error: both normal and protobuf param found. method=" + methodName); } } else { if (protobufParameterCount > 0) { throw new IllegalStateException("method params error method=" + methodName); } } } else { if (streamParameterCount > 0) { throw new IllegalStateException( "method params error: unary method should not contain any StreamObserver." + " method=" + methodName); } if (protobufParameterCount > 0 && returnClassProtobuf) { return false; } // handler reactor or rxjava only consider gen by proto if (isMono(returnClass) || isRx(returnClass)) { return false; } if (protobufParameterCount <= 0 && !returnClassProtobuf) { return true; } // handle grpc stub only consider gen by proto if (GRPC_ASYNC_RETURN_CLASS.equalsIgnoreCase(returnClass.getName()) && protobufParameterCount == 1) { return false; } // handle dubbo generated method if (TRI_ASYNC_RETURN_CLASS.equalsIgnoreCase(returnClass.getName())) { Class<?> actualReturnClass = (Class<?>) ((ParameterizedType) methodDescriptor.getMethod().getGenericReturnType()) .getActualTypeArguments()[0]; boolean actualReturnClassProtobuf = isProtobufClass(actualReturnClass); if (actualReturnClassProtobuf && protobufParameterCount == 1) { return false; } if (!actualReturnClassProtobuf && protobufParameterCount == 0) { return true; } } // todo remove this in future boolean ignore = checkNeedIgnore(returnClass); if (ignore) { return protobufParameterCount != 1; } throw new IllegalStateException("method params error method=" + methodName); } // java param should be wrapped return javaParameterCount > 0; }
@Test void testMultiProtoParameter() throws Exception { Method method = DescriptorService.class.getMethod("testMultiProtobufParameters", HelloReply.class, HelloReply.class); assertThrows(IllegalStateException.class, () -> { MethodDescriptor descriptor = new ReflectionMethodDescriptor(method); needWrap(descriptor); }); }
<T extends MaterialConfig> T getExistingOrDefaultMaterial(T defaultMaterial) { for (MaterialConfig material : this) { if (material.getClass().isAssignableFrom(defaultMaterial.getClass())) { return (T) material; } } return defaultMaterial; }
@Test public void shouldGetExistingOrDefaultMaterialCorrectly() { SvnMaterialConfig svn = svn("http://test.com", false); PackageMaterialConfig p1 = new PackageMaterialConfig("p1"); PackageMaterialConfig p2 = new PackageMaterialConfig("p2"); assertThat(new MaterialConfigs(svn, p2).getExistingOrDefaultMaterial(p1).getPackageId()).isEqualTo("p2"); assertThat(new MaterialConfigs(svn).getExistingOrDefaultMaterial(p1).getPackageId()).isEqualTo("p1"); }
public String getPrefix(final Exchange exchange) { //if regex is set, prefix will not take effect if (ObjectHelper.isNotEmpty(getRegex(exchange))) { return null; } return getOption(BlobExchangeHeaders::getPrefixFromHeaders, configuration::getPrefix, exchange); }
@Test void testIfCorrectOptionsReturnedCorrectlyWithPrefixSet() { final BlobConfiguration configuration = new BlobConfiguration(); // first case: when exchange is set final Exchange exchange = new DefaultExchange(context); final BlobConfigurationOptionsProxy configurationOptionsProxy = new BlobConfigurationOptionsProxy(configuration); configuration.setPrefix("test"); assertEquals("test", configurationOptionsProxy.getPrefix(exchange)); //test header override exchange.getIn().setHeader(BlobConstants.PREFIX, "test2"); assertEquals("test2", configurationOptionsProxy.getPrefix(exchange)); }
public List<String> asList() { return asList(String.class); }
@Test void asList_delegates_to_converter() { DataTable table = createSingleColumnNumberTable(); assertEquals(asList(1L, 2L), table.asList(Long.class)); assertEquals(asList(1L, 2L), table.asList((Type) Long.class)); }
public PrimitiveValue nullValue() { return nullValue; }
@Test void shouldReturnDefaultNullValueWhenSpecified() throws Exception { final String testXmlString = "<types>" + " <type name=\"testTypeDefaultCharNullValue\" primitiveType=\"char\"/>" + "</types>"; final Map<String, Type> map = parseTestXmlWithMap("/types/type", testXmlString); assertNull(((EncodedDataType)map.get("testTypeDefaultCharNullValue")).nullValue()); }
public Object realClone( boolean doClear ) { try { TransMeta transMeta = (TransMeta) super.clone(); if ( doClear ) { transMeta.clear(); } else { // Clear out the things we're replacing below transMeta.databases = new ArrayList<>(); transMeta.steps = new ArrayList<>(); transMeta.hops = new ArrayList<>(); transMeta.notes = new ArrayList<>(); transMeta.dependencies = new ArrayList<>(); transMeta.partitionSchemas = new ArrayList<>(); transMeta.slaveServers = new ArrayList<>(); transMeta.clusterSchemas = new ArrayList<>(); transMeta.namedParams = new NamedParamsDefault(); transMeta.stepChangeListeners = new ArrayList<>(); } for ( DatabaseMeta db : databases ) { transMeta.addDatabase( (DatabaseMeta) db.clone() ); } for ( StepMeta step : steps ) { transMeta.addStep( (StepMeta) step.clone() ); } // PDI-15799: Step references are original yet. Set them to the clones. for ( StepMeta step : transMeta.getSteps() ) { final StepMetaInterface stepMetaInterface = step.getStepMetaInterface(); if ( stepMetaInterface != null ) { final StepIOMetaInterface stepIOMeta = stepMetaInterface.getStepIOMeta(); if ( stepIOMeta != null ) { for ( StreamInterface stream : stepIOMeta.getInfoStreams() ) { String streamStepName = stream.getStepname(); if ( streamStepName != null ) { StepMeta streamStepMeta = transMeta.findStep( streamStepName ); stream.setStepMeta( streamStepMeta ); } } } } } for ( TransHopMeta hop : hops ) { transMeta.addTransHop( (TransHopMeta) hop.clone() ); } for ( NotePadMeta note : notes ) { transMeta.addNote( (NotePadMeta) note.clone() ); } for ( TransDependency dep : dependencies ) { transMeta.addDependency( (TransDependency) dep.clone() ); } for ( SlaveServer slave : slaveServers ) { transMeta.getSlaveServers().add( (SlaveServer) slave.clone() ); } for ( ClusterSchema schema : clusterSchemas ) { transMeta.getClusterSchemas().add( schema.clone() ); } for ( PartitionSchema schema : partitionSchemas ) { transMeta.getPartitionSchemas().add( (PartitionSchema) schema.clone() ); } for ( String key : listParameters() ) { transMeta.addParameterDefinition( key, getParameterDefault( key ), getParameterDescription( key ) ); } return transMeta; } catch ( Exception e ) { e.printStackTrace(); return null; } }
@Test public void testCloneWithParam() throws Exception { TransMeta transMeta = new TransMeta( "transFile", "myTrans" ); transMeta.addParameterDefinition( "key", "defValue", "description" ); Object clone = transMeta.realClone( true ); assertNotNull( clone ); }
public OpenAPI filter(OpenAPI openAPI, OpenAPISpecFilter filter, Map<String, List<String>> params, Map<String, String> cookies, Map<String, List<String>> headers) { OpenAPI filteredOpenAPI = filterOpenAPI(filter, openAPI, params, cookies, headers); if (filteredOpenAPI == null) { return filteredOpenAPI; } OpenAPI clone = new OpenAPI(); clone.info(filteredOpenAPI.getInfo()); clone.openapi(filteredOpenAPI.getOpenapi()); clone.jsonSchemaDialect(filteredOpenAPI.getJsonSchemaDialect()); clone.setSpecVersion(filteredOpenAPI.getSpecVersion()); clone.setExtensions(filteredOpenAPI.getExtensions()); clone.setExternalDocs(filteredOpenAPI.getExternalDocs()); clone.setSecurity(filteredOpenAPI.getSecurity()); clone.setServers(filteredOpenAPI.getServers()); clone.tags(filteredOpenAPI.getTags() == null ? null : new ArrayList<>(openAPI.getTags())); final Set<String> allowedTags = new HashSet<>(); final Set<String> filteredTags = new HashSet<>(); Paths clonedPaths = new Paths(); if (filteredOpenAPI.getPaths() != null) { for (String resourcePath : filteredOpenAPI.getPaths().keySet()) { PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath); PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers); PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags); if (clonedPathItem != null) { if (!clonedPathItem.readOperations().isEmpty()) { clonedPaths.addPathItem(resourcePath, clonedPathItem); } } } clone.paths(clonedPaths); } filteredTags.removeAll(allowedTags); final List<Tag> tags = clone.getTags(); if (tags != null && !filteredTags.isEmpty()) { tags.removeIf(tag -> filteredTags.contains(tag.getName())); if (clone.getTags().isEmpty()) { clone.setTags(null); } } if (filteredOpenAPI.getWebhooks() != null) { for (String resourcePath : filteredOpenAPI.getWebhooks().keySet()) { PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath); PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers); PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags); if (clonedPathItem != null) { if (!clonedPathItem.readOperations().isEmpty()) { clone.addWebhooks(resourcePath, clonedPathItem); } } } } if (filteredOpenAPI.getComponents() != null) { clone.components(new Components()); clone.getComponents().setSchemas(filterComponentsSchema(filter, filteredOpenAPI.getComponents().getSchemas(), params, cookies, headers)); clone.getComponents().setSecuritySchemes(filteredOpenAPI.getComponents().getSecuritySchemes()); clone.getComponents().setCallbacks(filteredOpenAPI.getComponents().getCallbacks()); clone.getComponents().setExamples(filteredOpenAPI.getComponents().getExamples()); clone.getComponents().setExtensions(filteredOpenAPI.getComponents().getExtensions()); clone.getComponents().setHeaders(filteredOpenAPI.getComponents().getHeaders()); clone.getComponents().setLinks(filteredOpenAPI.getComponents().getLinks()); clone.getComponents().setParameters(filteredOpenAPI.getComponents().getParameters()); clone.getComponents().setRequestBodies(filteredOpenAPI.getComponents().getRequestBodies()); clone.getComponents().setResponses(filteredOpenAPI.getComponents().getResponses()); clone.getComponents().setPathItems(filteredOpenAPI.getComponents().getPathItems()); } if (filter.isRemovingUnreferencedDefinitions()) { clone = removeBrokenReferenceDefinitions(clone); } return clone; }
@Test(description = "recursive models, e.g. A-> A or A-> B and B -> A should not result in stack overflow") public void removeUnreferencedDefinitionsOfRecuriveModels() throws IOException { final OpenAPI openAPI = getOpenAPI(RESOURCE_RECURSIVE_MODELS); final RemoveUnreferencedDefinitionsFilter remover = new RemoveUnreferencedDefinitionsFilter(); final OpenAPI filtered = new SpecFilter().filter(openAPI, remover, null, null, null); assertNotNull(filtered.getComponents().getSchemas().get("SelfReferencingModel")); assertNotNull(filtered.getComponents().getSchemas().get("IndirectRecursiveModelA")); assertNotNull(filtered.getComponents().getSchemas().get("IndirectRecursiveModelB")); }
@Override public CompletableFuture<Acknowledge> requestSlot( final SlotID slotId, final JobID jobId, final AllocationID allocationId, final ResourceProfile resourceProfile, final String targetAddress, final ResourceManagerId resourceManagerId, final Time timeout) { // TODO: Filter invalid requests from the resource manager by using the // instance/registration Id try (MdcCloseable ignored = MdcUtils.withContext(MdcUtils.asContextData(jobId))) { log.info( "Receive slot request {} for job {} from resource manager with leader id {}.", allocationId, jobId, resourceManagerId); if (!isConnectedToResourceManager(resourceManagerId)) { final String message = String.format( "TaskManager is not connected to the resource manager %s.", resourceManagerId); log.debug(message); return FutureUtils.completedExceptionally(new TaskManagerException(message)); } tryPersistAllocationSnapshot( new SlotAllocationSnapshot( slotId, jobId, targetAddress, allocationId, resourceProfile)); try { final boolean isConnected = allocateSlotForJob( jobId, slotId, allocationId, resourceProfile, targetAddress); if (isConnected) { offerSlotsToJobManager(jobId); } return CompletableFuture.completedFuture(Acknowledge.get()); } catch (SlotAllocationException e) { log.debug("Could not allocate slot for allocation id {}.", allocationId, e); return FutureUtils.completedExceptionally(e); } } }
@Test void testDynamicSlotAllocation() throws Exception { final AllocationID allocationId = new AllocationID(); try (TaskExecutorTestingContext submissionContext = createTaskExecutorTestingContext(2)) { submissionContext.start(); final CompletableFuture<Tuple3<ResourceID, InstanceID, SlotReport>> initialSlotReportFuture = new CompletableFuture<>(); ResourceManagerId resourceManagerId = createAndRegisterResourceManager(initialSlotReportFuture); initialSlotReportFuture.get(); final ResourceProfile resourceProfile = DEFAULT_RESOURCE_PROFILE.merge( ResourceProfile.newBuilder().setCpuCores(0.1).build()); TaskExecutorGateway selfGateway = submissionContext.taskExecutor.getSelfGateway(TaskExecutorGateway.class); requestSlot( selfGateway, jobId, allocationId, SlotID.getDynamicSlotID(ResourceID.generate()), resourceProfile, submissionContext.jobMasterGateway.getAddress(), resourceManagerId); ResourceID resourceId = ResourceID.generate(); SlotReport slotReport = submissionContext.taskSlotTable.createSlotReport(resourceId); assertThat(slotReport) .containsExactlyInAnyOrder( new SlotStatus(new SlotID(resourceId, 0), DEFAULT_RESOURCE_PROFILE), new SlotStatus(new SlotID(resourceId, 1), DEFAULT_RESOURCE_PROFILE), new SlotStatus( new SlotID(resourceId, 2), resourceProfile, jobId, allocationId)); } }
public static Packet ensureUniqueAndStableStanzaID( final Packet packet, final JID self ) { if ( !JiveGlobals.getBooleanProperty( "xmpp.sid.enabled", true ) ) { return packet; } if ( packet instanceof IQ && !JiveGlobals.getBooleanProperty( "xmpp.sid.iq.enabled", false ) ) { return packet; } if ( packet instanceof Message && !JiveGlobals.getBooleanProperty( "xmpp.sid.message.enabled", true ) ) { return packet; } if ( packet instanceof Presence && !JiveGlobals.getBooleanProperty( "xmpp.sid.presence.enabled", false ) ) { return packet; } final Element parentElement; if ( packet instanceof IQ ) { parentElement = ((IQ) packet).getChildElement(); } else { parentElement = packet.getElement(); } // The packet likely is an IQ result or error, which can, but are not required to have a child element. // To have a consistent behavior for these, we'll not add a stanza-ID here. if ( parentElement == null ) { Log.debug( "Unable to find appropriate element. Not adding stanza-id to packet: {}", packet ); return packet; } // Stanza ID generating entities, which encounter a <stanza-id/> element where the 'by' attribute matches the 'by' // attribute they would otherwise set, MUST delete that element even if they are not adding their own stanza ID. final Iterator<Element> existingElementIterator = parentElement.elementIterator( QName.get( "stanza-id", "urn:xmpp:sid:0" ) ); while (existingElementIterator.hasNext()) { final Element element = existingElementIterator.next(); if (self.toString().equals( element.attributeValue( "by" ) ) ) { Log.warn( "Removing a 'stanza-id' element from an inbound stanza, as its 'by' attribute value matches the value that we would set. Offending stanza: {}", packet ); existingElementIterator.remove(); } } final String id = UUID.randomUUID().toString(); Log.debug( "Using newly generated value '{}' for stanza that has id '{}'.", id, packet.getID() ); final Element stanzaIdElement = parentElement.addElement( QName.get( "stanza-id", "urn:xmpp:sid:0" ) ); stanzaIdElement.addAttribute( "id", id ); stanzaIdElement.addAttribute( "by", self.toString() ); return packet; }
@Test public void testOverwriteStanzaIDElement() throws Exception { // Setup fixture. final Packet input = new Message(); final JID self = new JID( "foobar" ); final String notExpected = "de305d54-75b4-431b-adb2-eb6b9e546013"; final Element toOverwrite = input.getElement().addElement( "stanza-id", "urn:xmpp:sid:0" ); toOverwrite.addAttribute( "by", self.toString() ); toOverwrite.addAttribute( "id", notExpected ); // Execute system under test. final Packet result = StanzaIDUtil.ensureUniqueAndStableStanzaID( input, self ); // Verify results. assertNotNull( result ); final Element stanzaIDElement = result.getElement().element( QName.get( "stanza-id", "urn:xmpp:sid:0" ) ); assertNotNull( stanzaIDElement ); assertDoesNotThrow(() -> UUID.fromString( stanzaIDElement.attributeValue( "id" ) )); assertNotEquals( notExpected, stanzaIDElement.attributeValue( "id" ) ); assertEquals( self.toString(), stanzaIDElement.attributeValue( "by" ) ); }
public Type parse(final String schema) { try { final TypeContext typeContext = parseTypeContext(schema); return getType(typeContext); } catch (final ParsingException e) { throw new KsqlStatementException( "Failed to parse schema", "Failed to parse: " + schema, schema, KsqlStatementException.Problem.STATEMENT, e ); } }
@Test public void shouldGetTypeFromIntArray() { // Given: final String schemaString = "ARRAY<INT>"; // When: final Type type = parser.parse(schemaString); // Then: assertThat(type, is(new Type(SqlTypes.array(SqlTypes.INTEGER)))); }
@Override public void set(K key, V value) { map.set(key, value); }
@Test public void testSet() { adapter.set(23, "test"); assertEquals("test", map.get(23)); }
@Override public int run(InputStream ins, PrintStream outs, PrintStream err, List<String> args) throws Exception { String[] argarry = args.toArray(new String[0]); Options opts = new Options(); Option helpopt = OptionBuilder.hasArg(false).withDescription("print this message").create("help"); Option inopt = OptionBuilder.hasArg().isRequired().withDescription("comma-separated input paths").create("in"); Option outopt = OptionBuilder.hasArg().isRequired().withDescription("The output path.").create("out"); Option pargs = OptionBuilder.hasArg().withDescription( "A string containing the command line arguments to pass to the tethered process. String should be enclosed in quotes") .create("exec_args"); Option popt = OptionBuilder.hasArg().isRequired().withDescription("executable program, usually in HDFS") .create("program"); Option outscopt = OptionBuilder.withType(File.class).hasArg().isRequired() .withDescription("schema file for output of reducer").create("outschema"); Option outscmapopt = OptionBuilder.withType(File.class).hasArg() .withDescription("(optional) map output schema file, if different from outschema").create("outschemamap"); Option redopt = OptionBuilder.withType(Integer.class).hasArg().withDescription("(optional) number of reducers") .create("reduces"); Option cacheopt = OptionBuilder.withType(Boolean.class).hasArg() .withDescription( "(optional) boolean indicating whether or not the executable should be distributed via distributed cache") .create("exec_cached"); Option protoopt = OptionBuilder.hasArg() .withDescription("(optional) specifies the transport protocol 'http' or 'sasl'").create("protocol"); opts.addOption(redopt); opts.addOption(outscopt); opts.addOption(popt); opts.addOption(pargs); opts.addOption(inopt); opts.addOption(outopt); opts.addOption(helpopt); opts.addOption(outscmapopt); opts.addOption(cacheopt); opts.addOption(protoopt); CommandLineParser parser = new GnuParser(); CommandLine line = null; HelpFormatter formatter = new HelpFormatter(); JobConf job = new JobConf(); try { line = parser.parse(opts, argarry); if (line.hasOption("help")) { formatter.printHelp("tether", opts); return 0; } FileInputFormat.addInputPaths(job, line.getOptionValue("in")); FileOutputFormat.setOutputPath(job, new Path(line.getOptionValue("out"))); List<String> exargs = null; Boolean cached = false; if (line.hasOption("exec_args")) { String[] splitargs = line.getOptionValue("exec_args").split(" "); exargs = new ArrayList<>(Arrays.asList(splitargs)); } if (line.hasOption("exec_cached")) { cached = Boolean.parseBoolean(line.getOptionValue("exec_cached")); } TetherJob.setExecutable(job, new File(line.getOptionValue("program")), exargs, cached); File outschema = (File) line.getParsedOptionValue("outschema"); job.set(AvroJob.OUTPUT_SCHEMA, Schema.parse(outschema).toString()); if (line.hasOption("outschemamap")) { job.set(AvroJob.MAP_OUTPUT_SCHEMA, new Schema.Parser().parse((File) line.getParsedOptionValue("outschemamap")).toString()); } if (line.hasOption("reduces")) { job.setNumReduceTasks((Integer) line.getParsedOptionValue("reduces")); } if (line.hasOption("protocol")) { TetherJob.setProtocol(job, line.getOptionValue("protocol")); } } catch (Exception exp) { System.out.println("Unexpected exception: " + exp.getMessage()); formatter.printHelp("tether", opts); return -1; } TetherJob.runJob(job); return 0; }
@Test void test() throws Exception { // Create the schema files. Schema outscheme = new Pair<Utf8, Long>(new Utf8(""), 0L).getSchema(); // we need to write the schemas to a file File midscfile = new File(INPUT_DIR.getPath(), "midschema.avpr"); try (FileWriter hf = new FileWriter(midscfile)) { hf.write(outscheme.toString()); } JobConf job = new JobConf(); String inputPathStr = INPUT_DIR.getPath(); String outputPathStr = OUTPUT_DIR.getPath(); Path outputPath = new Path(outputPathStr); outputPath.getFileSystem(job).delete(outputPath, true); // create the input file WordCountUtil.writeLinesFile(inputPathStr + "/lines.avro"); // create a string of the arguments String execargs = "-classpath " + System.getProperty("java.class.path"); execargs += " org.apache.avro.mapred.tether.WordCountTask"; // Create a list of the arguments to pass to the tull run method java.util.List<String> runargs = new java.util.ArrayList<>(); runargs.addAll(java.util.Arrays.asList("--program", "java")); runargs.addAll(asList("--exec_args", '"' + execargs + '"')); runargs.addAll(asList("--exec_cached", "false")); runargs.addAll(asList("--in", inputPathStr)); runargs.addAll(asList("--out", outputPath.toString())); runargs.addAll(asList("--outschema", midscfile.toString())); TetherTool tool = new TetherTool(); tool.run(null, null, System.err, runargs); // TODO:: We should probably do some validation // validate the output int numWords = 0; DatumReader<Pair<Utf8, Long>> reader = new SpecificDatumReader<>(); try (InputStream cin = new BufferedInputStream(new FileInputStream(outputPathStr + "/part-00000.avro")); DataFileStream<Pair<Utf8, Long>> counts = new DataFileStream<>(cin, reader)) { for (Pair<Utf8, Long> wc : counts) { Assertions.assertEquals(WordCountUtil.COUNTS.get(wc.key().toString()), wc.value(), wc.key().toString()); numWords++; } } Assertions.assertEquals(WordCountUtil.COUNTS.size(), numWords); }
@GetMapping("/user") public String getUserPath(SignupModel form, Model model) { model.addAttribute("name", form.getName()); model.addAttribute("email", form.getEmail()); return view.display(form); }
@Test void testGetUserPath () throws Exception { this.mockMvc.perform(get("/user") .param("name", "Lily") .param("email", "Lily@email.com")) .andExpect(status().isOk()) .andExpect(model().attribute("name", "Lily")) .andExpect(model().attribute("email", "Lily@email.com")) .andReturn(); }
public Timestamp convertDateToTimestamp( Date date ) throws KettleValueException { if ( date == null ) { return null; } Timestamp result = null; if ( date instanceof Timestamp ) { result = (Timestamp) date; } else { result = new Timestamp( date.getTime() ); } return result; }
@Test public void testConvertDateToTimestamp_Null() throws KettleValueException { ValueMetaTimestamp valueMetaTimestamp = new ValueMetaTimestamp(); assertNull( valueMetaTimestamp.convertDateToTimestamp( null ) ); }
public static Instance parseToApiInstance(Service service, InstancePublishInfo instanceInfo) { Instance result = new Instance(); result.setIp(instanceInfo.getIp()); result.setPort(instanceInfo.getPort()); result.setServiceName(NamingUtils.getGroupedName(service.getName(), service.getGroup())); result.setClusterName(instanceInfo.getCluster()); Map<String, String> instanceMetadata = new HashMap<>(instanceInfo.getExtendDatum().size()); for (Map.Entry<String, Object> entry : instanceInfo.getExtendDatum().entrySet()) { switch (entry.getKey()) { case Constants.CUSTOM_INSTANCE_ID: result.setInstanceId(entry.getValue().toString()); break; case Constants.PUBLISH_INSTANCE_ENABLE: result.setEnabled((boolean) entry.getValue()); break; case Constants.PUBLISH_INSTANCE_WEIGHT: result.setWeight((Double) entry.getValue()); break; default: instanceMetadata.put(entry.getKey(), null != entry.getValue() ? entry.getValue().toString() : null); } } result.setMetadata(instanceMetadata); result.setEphemeral(service.isEphemeral()); result.setHealthy(instanceInfo.isHealthy()); return result; }
@Test void testParseToApiInstance() { Instance instance = InstanceUtil.parseToApiInstance(service, instancePublishInfo); assertNotNull(instance); }
static <RequestT, ResponseT> Call<RequestT, ResponseT> of( Caller<RequestT, ResponseT> caller, Coder<ResponseT> responseTCoder) { caller = SerializableUtils.ensureSerializable(caller); return new Call<>( Configuration.<RequestT, ResponseT>builder() .setCaller(caller) .setResponseCoder(responseTCoder) .build()); }
@Test public void givenValidCaller_emitValidResponse() { Result<Response> result = pipeline .apply(Create.of(new Request("a"))) .apply(Call.of(new ValidCaller(), NON_DETERMINISTIC_RESPONSE_CODER)); PAssert.thatSingleton(result.getFailures().apply(Count.globally())).isEqualTo(0L); PAssert.that(result.getResponses()).containsInAnyOrder(new Response("a")); pipeline.run(); }
@Override public int compare(JimfsPath a, JimfsPath b) { return ComparisonChain.start() .compare(a.root(), b.root(), rootOrdering) .compare(a.names(), b.names(), namesOrdering) .result(); }
@Test public void testCompareTo_usingCanonicalForm() { PathService pathService = fakePathService(PathType.unix(), true); JimfsPath path1 = new JimfsPath(pathService, null, ImmutableList.of(Name.create("a", "z"))); JimfsPath path2 = new JimfsPath(pathService, null, ImmutableList.of(Name.create("b", "y"))); JimfsPath path3 = new JimfsPath(pathService, null, ImmutableList.of(Name.create("c", "x"))); assertThat(pathService.compare(path1, path2)).isEqualTo(1); assertThat(pathService.compare(path2, path3)).isEqualTo(1); }
@ConstantFunction(name = "add", argTypes = {DOUBLE, DOUBLE}, returnType = DOUBLE) public static ConstantOperator addDouble(ConstantOperator first, ConstantOperator second) { return ConstantOperator.createDouble(first.getDouble() + second.getDouble()); }
@Test public void addDouble() { assertEquals(200.0, ScalarOperatorFunctions.addDouble(O_DOUBLE_100, O_DOUBLE_100).getDouble(), 1); }
public Notification setFieldValue(String field, @Nullable String value) { fields.put(field, value); return this; }
@Test void toString_shouldReturnTypeAndFields() { Notification notification1 = new Notification("type"); notification1.setFieldValue("key", "value1"); assertThat(notification1).hasToString("Notification{type='type', fields={key=value1}}"); }
@Override public Set<GPUInfo> retrieveResourceInfo(long gpuAmount) throws Exception { Preconditions.checkArgument( gpuAmount > 0, "The gpuAmount should be positive when retrieving the GPU resource information."); final Set<GPUInfo> gpuResources = new HashSet<>(); String output = executeDiscoveryScript(discoveryScriptFile, gpuAmount, args); if (!output.isEmpty()) { String[] indexes = output.split(","); for (String index : indexes) { if (!StringUtils.isNullOrWhitespaceOnly(index)) { gpuResources.add(new GPUInfo(index.trim())); } } } LOG.info("Discover GPU resources: {}.", gpuResources); return Collections.unmodifiableSet(gpuResources); }
@Test void testGPUDriverWithTestScriptExitWithNonZero() throws Exception { final Configuration config = new Configuration(); config.set(GPUDriverOptions.DISCOVERY_SCRIPT_PATH, TESTING_DISCOVERY_SCRIPT_PATH); config.set(GPUDriverOptions.DISCOVERY_SCRIPT_ARG, "--exit-non-zero"); final GPUDriver gpuDriver = new GPUDriver(config); assertThatThrownBy(() -> gpuDriver.retrieveResourceInfo(1)) .isInstanceOf(FlinkException.class); }
@Override public Set<java.util.Map.Entry<K, V>> entrySet() { return entrySet(null); }
@Test public void testEntrySet() { Map<Integer, String> map = redisson.getMap("simple12"); map.put(1, "12"); map.put(2, "33"); map.put(3, "43"); assertThat(map.entrySet().size()).isEqualTo(3); Map<Integer, String> testMap = new HashMap<Integer, String>(map); assertThat(map.entrySet()).containsExactlyElementsOf(testMap.entrySet()); }
public static <K, V, S extends StateStore> Materialized<K, V, S> as(final DslStoreSuppliers storeSuppliers) { Objects.requireNonNull(storeSuppliers, "store type can't be null"); return new Materialized<>(storeSuppliers); }
@Test public void shouldThrowNullPointerIfWindowBytesStoreSupplierIsNull() { final NullPointerException e = assertThrows(NullPointerException.class, () -> Materialized.as((WindowBytesStoreSupplier) null)); assertEquals(e.getMessage(), "supplier can't be null"); }
@Override public String toString() { return "{\"@type\": \"MessageCard\"," + "\"@context\": \"http://schema.org/extensions\"," + "\"themeColor\": \"0076D7\"," + "\"summary\": \"Cruise-Control Alert\"," + "\"sections\": [{" + "\"facts\": [" + _facts.entrySet().stream() .map(e -> "{\"name\": \"" + e.getKey() + "\", \"value\": \"" + e.getValue() + "\"}") .collect(Collectors.joining(",")) + "]}]}"; }
@Test public void testMSTeamsMessageJsonFormat() { String expectedJson = "{\"@type\": \"MessageCard\",\"@context\": \"http://schema.org/extensions\"," + "\"themeColor\": \"0076D7\",\"summary\": \"Cruise-Control Alert\",\"sections\": " + "[{\"facts\": [{\"name\": \"title1\", \"value\": \"description1\"}]}]}"; assertEquals(expectedJson, new MSTeamsMessage(Map.of("title1", "description1")).toString()); }
public String failureMessage( int epoch, OptionalLong deltaUs, boolean isActiveController, long lastCommittedOffset ) { StringBuilder bld = new StringBuilder(); if (deltaUs.isPresent()) { bld.append("event failed with "); } else { bld.append("event unable to start processing because of "); } bld.append(internalException.getClass().getSimpleName()); if (externalException.isPresent()) { bld.append(" (treated as "). append(externalException.get().getClass().getSimpleName()).append(")"); } if (causesFailover()) { bld.append(" at epoch ").append(epoch); } if (deltaUs.isPresent()) { bld.append(" in ").append(deltaUs.getAsLong()).append(" microseconds"); } if (causesFailover()) { if (isActiveController) { bld.append(". Renouncing leadership and reverting to the last committed offset "); bld.append(lastCommittedOffset); } else { bld.append(". The controller is already in standby mode"); } } bld.append("."); if (!isFault && internalException.getMessage() != null) { bld.append(" Exception message: "); bld.append(internalException.getMessage()); } return bld.toString(); }
@Test public void testNullPointerExceptionFailureMessageWhenActive() { assertEquals("event failed with NullPointerException (treated as UnknownServerException) " + "at epoch 123 in 40 microseconds. Renouncing leadership and reverting to the last " + "committed offset 456.", NULL_POINTER.failureMessage(123, OptionalLong.of(40L), true, 456L)); }
public FEELFnResult<TemporalAmount> invoke(@ParameterName("from") Temporal from, @ParameterName("to") Temporal to) { if ( from == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null")); } if ( to == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "to", "cannot be null")); } final LocalDate fromDate = getLocalDateFromTemporal(from); if (fromDate == null) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "is of type not suitable for years and months function")); } final LocalDate toDate = getLocalDateFromTemporal(to); if (toDate == null) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "to", "is of type not suitable for years and months function")); } return FEELFnResult.ofResult(new ComparablePeriod(Period.between(fromDate, toDate).withDays(0))); }
@Test void invokeYearMonth() { FunctionTestUtil.assertResult( yamFunction.invoke(YearMonth.of(2017, 6), Year.of(2020)), ComparablePeriod.of(2, 7, 0)); FunctionTestUtil.assertResult( yamFunction.invoke(YearMonth.of(2017, 6), Year.of(2014)), ComparablePeriod.of(-3, -5, 0)); }
public static byte[] generateCallStubV(Class<?> clazz, Method method) { final AggBatchCallGenerator generator = new AggBatchCallGenerator(clazz, method); generator.declareCallStubClazz(); generator.genBatchUpdateSingle(); generator.finish(); return generator.getByteCode(); }
@Test public void testAggCallSingleStubWithMultiParameters() throws NoSuchMethodException, ClassNotFoundException, InvocationTargetException, IllegalAccessException { Class<?> clazz = SumStringConcat.class; final String genClassName = CallStubGenerator.CLAZZ_NAME.replace("/", "."); Method m = clazz.getMethod("update", SumStringConcat.State.class, String.class, Integer.class); final byte[] updates = CallStubGenerator.generateCallStubV(clazz, m); ClassLoader classLoader = new TestClassLoader(genClassName, updates); final Class<?> stubClazz = classLoader.loadClass(genClassName); Method batchCall = getFirstMethod(stubClazz, "batchCallV"); SumStringConcat concat = new SumStringConcat(); SumStringConcat.State state = new SumStringConcat.State(); int testSize = 1000; String expect = ""; String[] inputs1 = new String[testSize]; Integer[] inputs2 = new Integer[testSize]; for (int i = 0; i < testSize; i++) { inputs1[i] = i + ""; inputs2[i] = i; expect += inputs1[i] + inputs2[i]; } batchCall.invoke(null, testSize, concat, state, inputs1, inputs2); Assertions.assertEquals(expect, state.val); }
@Override public Object getObject(final int columnIndex) throws SQLException { return mergeResultSet.getValue(columnIndex, Object.class); }
@Test void assertGetObjectWithFloat() throws SQLException { float result = 0.0F; when(mergeResultSet.getValue(1, float.class)).thenReturn(result); assertThat(shardingSphereResultSet.getObject(1, float.class), is(result)); when(mergeResultSet.getValue(1, Float.class)).thenReturn(result); assertThat(shardingSphereResultSet.getObject(1, Float.class), is(result)); }
@Nonnull public CompletableFuture<Optional<Account>> getByPhoneNumberIdentifierAsync(final UUID phoneNumberIdentifier) { return getByIndirectLookupAsync(GET_BY_PNI_TIMER, phoneNumberIdentifierConstraintTableName, ATTR_PNI_UUID, AttributeValues.fromUUID(phoneNumberIdentifier)); }
@Test void getByPhoneNumberIdentifierAsync() { assertThat(accounts.getByPhoneNumberIdentifierAsync(UUID.randomUUID()).join()).isEmpty(); final Account account = generateAccount("+14151112222", UUID.randomUUID(), UUID.randomUUID(), List.of(generateDevice(DEVICE_ID_1))); createAccount(account); assertThat(accounts.getByPhoneNumberIdentifierAsync(account.getPhoneNumberIdentifier()).join()).isPresent(); }
@Udf public <T> List<T> concat( @UdfParameter(description = "First array of values") final List<T> left, @UdfParameter(description = "Second array of values") final List<T> right) { if (left == null && right == null) { return null; } final int leftSize = left != null ? left.size() : 0; final int rightSize = right != null ? right.size() : 0; final List<T> result = new ArrayList<>(leftSize + rightSize); if (left != null) { result.addAll(left); } if (right != null) { result.addAll(right); } return result; }
@Test public void shouldReturnNonNullForNullRightInput() { final List<String> input1 = Arrays.asList("foo"); final List<String> result = udf.concat(input1, null); assertThat(result, is(Arrays.asList("foo"))); }
@Override public void listenToService(final String serviceName, final LoadBalancerStateListenerCallback callback) { trace(_log, "listenToService: ", serviceName); _serviceSubscriber.ensureListening(serviceName, callback); }
@Test(groups = { "small", "back-end" }) public void testListenToService() throws InterruptedException { reset(); assertFalse(_state.isListeningToService("service-1")); assertNull(_state.getServiceProperties("service-1")); final CountDownLatch latch = new CountDownLatch(1); LoadBalancerStateListenerCallback callback = new LoadBalancerStateListenerCallback() { @Override public void done(int type, String name) { latch.countDown(); } }; _state.listenToService("service-1", callback); if (!latch.await(5, TimeUnit.SECONDS)) { fail("didn't get callback when listenToService was called"); } assertTrue(_state.isListeningToService("service-1")); assertNotNull(_state.getServiceProperties("service-1")); assertNull(_state.getServiceProperties("service-1").getProperty()); ServiceProperties property = new ServiceProperties("service-1", "cluster-1", "/test", Arrays.asList("random")); _serviceRegistry.put("service-1", property); assertTrue(_state.isListeningToService("service-1")); assertNotNull(_state.getServiceProperties("service-1")); assertEquals(_state.getServiceProperties("service-1").getProperty(), property); }
@Override public void apply(IntentOperationContext<DomainIntent> context) { Optional<IntentData> toUninstall = context.toUninstall(); Optional<IntentData> toInstall = context.toInstall(); List<DomainIntent> uninstallIntents = context.intentsToUninstall(); List<DomainIntent> installIntents = context.intentsToInstall(); if (!toInstall.isPresent() && !toUninstall.isPresent()) { intentInstallCoordinator.intentInstallSuccess(context); return; } if (toUninstall.isPresent()) { IntentData intentData = toUninstall.get(); trackerService.removeTrackedResources(intentData.key(), intentData.intent().resources()); uninstallIntents.forEach(installable -> trackerService.removeTrackedResources(intentData.intent().key(), installable.resources())); } if (toInstall.isPresent()) { IntentData intentData = toInstall.get(); trackerService.addTrackedResources(intentData.key(), intentData.intent().resources()); installIntents.forEach(installable -> trackerService.addTrackedResources(intentData.key(), installable.resources())); } // Generate domain Intent operations DomainIntentOperations.Builder builder = DomainIntentOperations.builder(); DomainIntentOperationsContext domainOperationsContext; uninstallIntents.forEach(builder::remove); installIntents.forEach(builder::add); domainOperationsContext = new DomainIntentOperationsContext() { @Override public void onSuccess(DomainIntentOperations idops) { intentInstallCoordinator.intentInstallSuccess(context); } @Override public void onError(DomainIntentOperations idos) { intentInstallCoordinator.intentInstallFailed(context); } }; log.debug("submitting domain intent {} -> {}", toUninstall.map(x -> x.key().toString()).orElse("<empty>"), toInstall.map(x -> x.key().toString()).orElse("<empty>")); // Submit domain Inten operations with domain context domainIntentService.sumbit(builder.build(domainOperationsContext)); }
@Test public void testNoAnyIntentToApply() { IntentData toInstall = null; IntentData toUninstall = null; IntentOperationContext<DomainIntent> operationContext; IntentInstallationContext context = new IntentInstallationContext(toUninstall, toInstall); operationContext = new IntentOperationContext<>(ImmutableList.of(), ImmutableList.of(), context); installer.apply(operationContext); IntentOperationContext successContext = intentInstallCoordinator.successContext; assertEquals(successContext, operationContext); }
@Override public void getErrors(ErrorCollection errors, String parentLocation) { String location = this.getLocation(parentLocation); errors.checkMissing(location, "name", name); errors.checkMissing(location, "path", path); }
@Test public void shouldDeserializeFromAPILikeObject() { String json = """ { "name": "cobertura", "path": "target/site/cobertura/index.html" }"""; CRTab deserializedValue = gson.fromJson(json,CRTab.class); assertThat(deserializedValue.getName(),is("cobertura")); assertThat(deserializedValue.getPath(),is("target/site/cobertura/index.html")); ErrorCollection errors = deserializedValue.getErrors(); assertTrue(errors.isEmpty()); }
@Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; HostInfo hostInfo = (HostInfo) o; if (!hostname.equals(hostInfo.hostname)) return false; if (services != null ? !services.equals(hostInfo.services) : hostInfo.services != null) return false; return true; }
@Test public void testEquals() { HostInfo a = new HostInfo("foo.yahoo.com", List.of(new ServiceInfo("foo", "bar", null, null, "config-id", "host-name"))); HostInfo b = new HostInfo("foo.yahoo.com", List.of(new ServiceInfo("foo", "bar", null, null, "config-id", "host-name"))); HostInfo c = new HostInfo("foo.yahoo.com", List.of(new ServiceInfo("foo", "baz", null, null, "config-id", "host-name"))); HostInfo d = new HostInfo("foo.yahoo.com", List.of(new ServiceInfo("bar", "baz", null, null, "config-id", "host-name"))); HostInfo e = new HostInfo("bar.yahoo.com", null); assertEquals(a, b); assertNotEquals(a, c); assertNotEquals(a, d); assertNotEquals(a, d); assertNotEquals(c, d); assertNotEquals(c, e); assertNotEquals(d, e); }
static Optional<RateLimitInfo> createRateLimitFromHeaders(Headers headers) { if (headers == null) { return Optional.empty(); } Set<String> names = headers.names(); if (!names.containsAll( Arrays.asList(NS_RATELIMIT_LIMIT, NS_RATELIMIT_REMAINING, NS_RATELIMIT_RESET))) { return Optional.empty(); } return RateLimitInfo.createFromHeaders( headers.get(NS_RATELIMIT_LIMIT), headers.get(NS_RATELIMIT_REMAINING), headers.get(NS_RATELIMIT_RESET)); }
@Test public void testParseHeaders() { Optional<RateLimitInfo> info = NodesmithHttpService.createRateLimitFromHeaders( Headers.of( NodesmithHttpService.NS_RATELIMIT_LIMIT, "500", NodesmithHttpService.NS_RATELIMIT_REMAINING, "442", NodesmithHttpService.NS_RATELIMIT_RESET, "1553385403")); assertTrue(info.isPresent()); assertEquals(500, info.get().getTotalAllowedInWindow()); assertEquals(442, info.get().getRemainingInWindow()); assertEquals(1553385403L, info.get().getWindowResetTime().getEpochSecond()); }
@Override protected void processArguments(LinkedList<PathData> args) throws IOException { Expression expr = getRootExpression(); expr.setOptions(getOptions()); expr.prepare(); super.processArguments(args); expr.finish(); }
@Test public void processArguments() throws IOException { LinkedList<PathData> items = createDirectories(); Find find = new Find(); find.setConf(conf); PrintStream out = mock(PrintStream.class); find.getOptions().setOut(out); PrintStream err = mock(PrintStream.class); find.getOptions().setErr(err); Expression expr = mock(Expression.class); when(expr.apply((PathData) any(), anyInt())).thenReturn(Result.PASS); FileStatusChecker fsCheck = mock(FileStatusChecker.class); Expression test = new TestExpression(expr, fsCheck); find.setRootExpression(test); find.processArguments(items); InOrder inOrder = inOrder(expr); inOrder.verify(expr).setOptions(find.getOptions()); inOrder.verify(expr).prepare(); inOrder.verify(expr).apply(item1, 0); inOrder.verify(expr).apply(item1a, 1); inOrder.verify(expr).apply(item1aa, 2); inOrder.verify(expr).apply(item1b, 1); inOrder.verify(expr).apply(item2, 0); inOrder.verify(expr).apply(item3, 0); inOrder.verify(expr).apply(item4, 0); inOrder.verify(expr).apply(item5, 0); inOrder.verify(expr).apply(item5a, 1); inOrder.verify(expr).apply(item5b, 1); inOrder.verify(expr).apply(item5c, 1); inOrder.verify(expr).apply(item5ca, 2); inOrder.verify(expr).apply(item5d, 1); inOrder.verify(expr).apply(item5e, 1); inOrder.verify(expr).finish(); verifyNoMoreInteractions(expr); InOrder inOrderFsCheck = inOrder(fsCheck); inOrderFsCheck.verify(fsCheck).check(item1.stat); inOrderFsCheck.verify(fsCheck).check(item1a.stat); inOrderFsCheck.verify(fsCheck).check(item1aa.stat); inOrderFsCheck.verify(fsCheck).check(item1b.stat); inOrderFsCheck.verify(fsCheck).check(item2.stat); inOrderFsCheck.verify(fsCheck).check(item3.stat); inOrderFsCheck.verify(fsCheck).check(item4.stat); inOrderFsCheck.verify(fsCheck).check(item5.stat); inOrderFsCheck.verify(fsCheck).check(item5a.stat); inOrderFsCheck.verify(fsCheck).check(item5b.stat); inOrderFsCheck.verify(fsCheck).check(item5c.stat); inOrderFsCheck.verify(fsCheck).check(item5ca.stat); inOrderFsCheck.verify(fsCheck).check(item5d.stat); inOrderFsCheck.verify(fsCheck).check(item5e.stat); verifyNoMoreInteractions(fsCheck); verifyNoMoreInteractions(out); verifyNoMoreInteractions(err); }
public long getBytesOutCounter() { return bytesOutCounter.longValue(); }
@Test public void testGetBytesOutCounter() { stats.bytesOutCounter = 1L; consumer.updateStats(stats); assertEquals(consumer.getBytesOutCounter(), 1L); }
public static KeyGeneratorType inferKeyGeneratorType( Option<String> recordsKeyFields, String partitionFields) { boolean autoGenerateRecordKeys = !recordsKeyFields.isPresent(); if (autoGenerateRecordKeys) { return inferKeyGeneratorTypeForAutoKeyGen(partitionFields); } else { if (!StringUtils.isNullOrEmpty(partitionFields)) { int numPartFields = partitionFields.split(",").length; int numRecordKeyFields = recordsKeyFields.get().split(",").length; if (numPartFields == 1 && numRecordKeyFields == 1) { return KeyGeneratorType.SIMPLE; } return KeyGeneratorType.COMPLEX; } return KeyGeneratorType.NON_PARTITION; } }
@Test public void testInferKeyGeneratorType() { assertEquals( KeyGeneratorType.SIMPLE, KeyGenUtils.inferKeyGeneratorType(Option.of("col1"), "partition1")); assertEquals( KeyGeneratorType.COMPLEX, KeyGenUtils.inferKeyGeneratorType(Option.of("col1"), "partition1,partition2")); assertEquals( KeyGeneratorType.COMPLEX, KeyGenUtils.inferKeyGeneratorType(Option.of("col1,col2"), "partition1")); assertEquals( KeyGeneratorType.COMPLEX, KeyGenUtils.inferKeyGeneratorType(Option.of("col1,col2"), "partition1,partition2")); assertEquals( KeyGeneratorType.NON_PARTITION, KeyGenUtils.inferKeyGeneratorType(Option.of("col1,col2"), "")); assertEquals( KeyGeneratorType.NON_PARTITION, KeyGenUtils.inferKeyGeneratorType(Option.of("col1,col2"), null)); }
public void addAll(List<String> buttonNames) { buttonNames.forEach(name -> super.add(new KeyboardButton(name))); }
@Test void shouldAddAllButtons() { final KeyboardRow keyboardRow = new KeyboardRow(); keyboardRow.addAll(BUTTON_NAMES); assertEquals(2, keyboardRow.size()); assertEquals("Carlotta Valdes", keyboardRow.get(0).getText()); assertEquals("Jimmy Stewart", keyboardRow.get(1).getText()); }
public static String stripNameSpacePrefixes(String path) { if (path.indexOf(':') == -1) { return path; } StringBuilder sb = new StringBuilder(); for (String s : StringUtils.split(path, '/', false)) { sb.append('/'); int pos = s.lastIndexOf(':'); if (pos == -1) { sb.append(s); } else { sb.append(s.substring(pos + 1)); } } return sb.toString(); }
@Test void testStripNameSpacePrefixes() { assertEquals("/", XmlUtils.stripNameSpacePrefixes("/")); assertEquals("/foo", XmlUtils.stripNameSpacePrefixes("/foo")); assertEquals("/bar", XmlUtils.stripNameSpacePrefixes("/foo:bar")); assertEquals("/bar/baz", XmlUtils.stripNameSpacePrefixes("/foo:bar/foo:baz")); assertEquals("/bar/baz/@ban", XmlUtils.stripNameSpacePrefixes("/foo:bar/foo:baz/@ban")); }
private static void loadBalancer(XmlGenerator gen, LoadBalancer loadBalancer) { if (loadBalancer == null) { return; } String type; if (loadBalancer instanceof RandomLB) { type = "random"; } else if (loadBalancer instanceof RoundRobinLB) { type = "round-robin"; } else { type = "custom"; } if ("custom".equals(type)) { gen.node("load-balancer", loadBalancer.getClass().getName(), "type", type); } else { gen.node("load-balancer", null, "type", type); } }
@Test public void loadBalancer() { clientConfig.setLoadBalancer(new RandomLB()); ClientConfig newClientConfig = newConfigViaGenerator(); LoadBalancer actual = newClientConfig.getLoadBalancer(); assertTrue(actual instanceof RandomLB); String actualClassName = newClientConfig.getLoadBalancerClassName(); assertNull(actualClassName); }
public boolean completeJoinFuture( ClassicGroupMember member, JoinGroupResponseData response ) { if (member.isAwaitingJoin()) { member.awaitingJoinFuture().complete(response); member.setAwaitingJoinFuture(null); numMembersAwaitingJoinResponse--; return true; } return false; }
@Test public void testCompleteJoinFuture() throws Exception { JoinGroupRequestProtocolCollection protocols = new JoinGroupRequestProtocolCollection(); protocols.add(new JoinGroupRequestProtocol() .setName("roundrobin") .setMetadata(new byte[0])); ClassicGroupMember member = new ClassicGroupMember( memberId, Optional.empty(), clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, protocolType, protocols ); CompletableFuture<JoinGroupResponseData> joinGroupFuture = new CompletableFuture<>(); group.add(member, joinGroupFuture); assertTrue(group.hasAllMembersJoined()); assertTrue( group.completeJoinFuture(member, new JoinGroupResponseData() .setMemberId(member.memberId()) .setErrorCode(Errors.NONE.code())) ); assertEquals(Errors.NONE.code(), joinGroupFuture.get().errorCode()); assertEquals(memberId, joinGroupFuture.get().memberId()); assertFalse(member.isAwaitingJoin()); assertEquals(0, group.numAwaitingJoinResponse()); }
public static void disableConsumption(KafkaConsumerWrapper kafkaConsumerWrapper, Set<String> prohibitionTopics) { Set<String> originalTopics = kafkaConsumerWrapper.getOriginalTopics(); // Not subscribed to any Topic, so no action is required if (originalTopics.size() == 0) { return; } Collection<TopicPartition> originalPartitions = kafkaConsumerWrapper.getOriginalPartitions(); KafkaConsumer<?, ?> kafkaConsumer = kafkaConsumerWrapper.getKafkaConsumer(); Collection<String> subtractTopics = CollectionUtils.subtract(originalTopics, prohibitionTopics); if (kafkaConsumerWrapper.isAssign()) { kafkaConsumer.assign(originalPartitions.stream().filter(obj -> subtractTopics.contains(obj.topic())) .collect(Collectors.toSet())); return; } kafkaConsumer.subscribe(subtractTopics); }
@Test public void testDisableConsumptionByAssign() { KafkaConsumer<?, ?> mockConsumer = Mockito.mock(KafkaConsumer.class); KafkaConsumerWrapper kafkaConsumerWrapper = new KafkaConsumerWrapper(mockConsumer); HashSet<String> originalTopics = new HashSet<>(); originalTopics.add("testTopic-1"); originalTopics.add("testTopic-2"); HashSet<TopicPartition> originalPartitions = new HashSet<>(); TopicPartition topicPartition1 = new TopicPartition("testTopic-1", 0); TopicPartition topicPartition2 = new TopicPartition("testTopic-1", 1); TopicPartition topicPartition3 = new TopicPartition("testTopic-2", 0); originalPartitions.add(topicPartition1); originalPartitions.add(topicPartition2); originalPartitions.add(topicPartition3); kafkaConsumerWrapper.setOriginalTopics(originalTopics); kafkaConsumerWrapper.setOriginalPartitions(originalPartitions); kafkaConsumerWrapper.setAssign(true); Set<String> prohibitionTopics = new HashSet<>(); prohibitionTopics.add("testTopic-2"); prohibitionTopics.add("testTopic-3"); KafkaConsumerController.disableConsumption(kafkaConsumerWrapper, prohibitionTopics); originalPartitions.remove(topicPartition3); Mockito.verify(mockConsumer, Mockito.times(1)).assign(originalPartitions); }
public Command create( final ConfiguredStatement<? extends Statement> statement, final KsqlExecutionContext context) { return create(statement, context.getServiceContext(), context); }
@Test public void shouldValidatePauseQuery() { // Given: givenPause(); // When: commandFactory.create(configuredStatement, executionContext); // Then: verify(executionContext).getPersistentQuery(QUERY_ID); verify(query1).pause(); }
private ExitStatus run() { try { init(); return new Processor().processNamespace().getExitStatus(); } catch (IllegalArgumentException e) { System.out.println(e + ". Exiting ..."); return ExitStatus.ILLEGAL_ARGUMENTS; } catch (IOException e) { System.out.println(e + ". Exiting ..."); LOG.error(e + ". Exiting ..."); return ExitStatus.IO_EXCEPTION; } finally { dispatcher.shutdownNow(); } }
@Test(timeout = 300000) public void testTwoReplicaSameStorageTypeShouldNotSelect() throws Exception { // HDFS-8147 final Configuration conf = new HdfsConfiguration(); initConf(conf); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(3) .storageTypes( new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.ARCHIVE } }).build(); try { cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); final String file = "/testForTwoReplicaSameStorageTypeShouldNotSelect"; // write to DISK final FSDataOutputStream out = dfs.create(new Path(file), (short) 2); out.writeChars("testForTwoReplicaSameStorageTypeShouldNotSelect"); out.close(); // verify before movement LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0); StorageType[] storageTypes = lb.getStorageTypes(); for (StorageType storageType : storageTypes) { Assert.assertTrue(StorageType.DISK == storageType); } // move to ARCHIVE dfs.setStoragePolicy(new Path(file), "COLD"); int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", file.toString() }); Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc); // Wait till namenode notified about the block location details waitForLocatedBlockWithArchiveStorageType(dfs, file, 2); } finally { cluster.shutdown(); } }
protected boolean doSubmit(final String selectorId, final List<? extends CommonUpstream> upstreamList) { List<CommonUpstream> commonUpstreamList = CommonUpstreamUtils.convertCommonUpstreamList(upstreamList); if (CollectionUtils.isEmpty(upstreamList)) { return true; } return commonUpstreamList.stream().map(upstream -> upstreamCheckService.checkAndSubmit(selectorId, upstream)) .collect(Collectors.toList()).stream().findAny().orElse(false); }
@Test public void testDoSubmit() { assertTrue(abstractShenyuClientRegisterService.doSubmit("Selector_Id", new ArrayList<>())); CommonUpstream commonUpstream = new CommonUpstream(); when(upstreamCheckService.checkAndSubmit(any(), any())).thenReturn(true); assertTrue(abstractShenyuClientRegisterService.doSubmit("Selector_Id", Collections.singletonList(commonUpstream))); }
public boolean equalTo(MemoryBuffer buf2, int offset1, int offset2, int len) { final long pos1 = address + offset1; final long pos2 = buf2.address + offset2; checkArgument(pos1 < addressLimit); checkArgument(pos2 < buf2.addressLimit); return Platform.arrayEquals(heapMemory, pos1, buf2.heapMemory, pos2, len); }
@Test public void testEqualTo() { MemoryBuffer buf1 = MemoryUtils.buffer(16); MemoryBuffer buf2 = MemoryUtils.buffer(16); buf1.putInt64(0, 10); buf2.putInt64(0, 10); buf1.putByte(9, (byte) 1); buf2.putByte(9, (byte) 1); Assert.assertTrue(buf1.equalTo(buf2, 0, 0, buf1.size())); buf1.putByte(9, (byte) 2); Assert.assertFalse(buf1.equalTo(buf2, 0, 0, buf1.size())); }
@Override public void initialize(ServiceConfiguration config) throws IOException { String data = config.getProperties().getProperty(CONF_PULSAR_PROPERTY_KEY); if (StringUtils.isEmpty(data)) { data = System.getProperty(CONF_SYSTEM_PROPERTY_KEY); } if (StringUtils.isEmpty(data)) { throw new IOException("No basic authentication config provided"); } @Cleanup BufferedReader reader = null; try { byte[] bytes = readData(data); reader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(bytes))); } catch (Exception e) { throw new IllegalArgumentException(e); } users = new HashMap<>(); for (String line : reader.lines().toArray(s -> new String[s])) { List<String> splitLine = Arrays.asList(line.split(":")); if (splitLine.size() != 2) { throw new IOException("The format of the password auth conf file is invalid"); } users.put(splitLine.get(0), splitLine.get(1)); } }
@Test public void testLoadBase64FromSystemProperties() throws Exception { @Cleanup AuthenticationProviderBasic provider = new AuthenticationProviderBasic(); ServiceConfiguration serviceConfiguration = new ServiceConfiguration(); System.setProperty("pulsar.auth.basic.conf", basicAuthConfBase64); provider.initialize(serviceConfiguration); testAuthenticate(provider); }
@Override public MergeAppend appendFile(DataFile file) { add(file); return this; }
@TestTemplate public void testChangedPartitionSpecMergeExisting() { Snapshot snap1 = commit(table, table.newAppend().appendFile(FILE_A), branch); long id1 = snap1.snapshotId(); validateSnapshot(null, snap1, 1, FILE_A); // create a second compatible manifest Snapshot snap2 = commit(table, table.newFastAppend().appendFile(FILE_B), branch); long id2 = snap2.snapshotId(); validateSnapshot(snap1, snap2, 2, FILE_B); TableMetadata base = readMetadata(); assertThat(snap2.allManifests(table.io())).hasSize(2); ManifestFile manifest = snap2.allManifests(table.io()).get(0); // build the new spec using the table's schema, which uses fresh IDs PartitionSpec newSpec = PartitionSpec.builderFor(base.schema()).bucket("data", 16).bucket("id", 4).build(); // commit the new partition spec to the table manually table.ops().commit(base, base.updatePartitionSpec(newSpec)); Snapshot snap3 = latestSnapshot(table, branch); V2Assert.assertEquals("Snapshot sequence number should be 2", 2, snap3.sequenceNumber()); V2Assert.assertEquals( "Last sequence number should be 2", 2, readMetadata().lastSequenceNumber()); V1Assert.assertEquals( "Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber()); DataFile newFileY = DataFiles.builder(table.spec()) .withPath("/path/to/data-y.parquet") .withFileSizeInBytes(10) .withPartitionPath("data_bucket=2/id_bucket=3") .withRecordCount(1) .build(); Snapshot lastSnapshot = commit(table, table.newAppend().appendFile(newFileY), branch); V2Assert.assertEquals("Snapshot sequence number should be 3", 3, lastSnapshot.sequenceNumber()); V2Assert.assertEquals( "Last sequence number should be 3", 3, readMetadata().lastSequenceNumber()); V1Assert.assertEquals( "Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber()); assertThat(lastSnapshot.allManifests(table.io())).hasSize(2); assertThat(lastSnapshot.allManifests(table.io())).doesNotContain(manifest); validateManifest( lastSnapshot.allManifests(table.io()).get(0), dataSeqs(3L), fileSeqs(3L), ids(lastSnapshot.snapshotId()), files(newFileY), statuses(Status.ADDED)); validateManifest( lastSnapshot.allManifests(table.io()).get(1), dataSeqs(2L, 1L), fileSeqs(2L, 1L), ids(id2, id1), files(FILE_B, FILE_A), statuses(Status.EXISTING, Status.EXISTING)); }
@Override public List<Spellbook> findAllSpellbooks() { return spellbookDao.findAll(); }
@Test void testFindAllSpellbooks() { final var wizardDao = mock(WizardDao.class); final var spellbookDao = mock(SpellbookDao.class); final var spellDao = mock(SpellDao.class); final var service = new MagicServiceImpl(wizardDao, spellbookDao, spellDao); verifyNoInteractions(wizardDao, spellbookDao, spellDao); service.findAllSpellbooks(); verify(spellbookDao).findAll(); verifyNoMoreInteractions(wizardDao, spellbookDao, spellDao); }
public OffsetFetchResponseData.OffsetFetchResponseGroup fetchOffsets( OffsetFetchRequestData.OffsetFetchRequestGroup request, long lastCommittedOffset ) throws ApiException { final boolean requireStable = lastCommittedOffset == Long.MAX_VALUE; boolean failAllPartitions = false; try { validateOffsetFetch(request, lastCommittedOffset); } catch (GroupIdNotFoundException ex) { failAllPartitions = true; } final List<OffsetFetchResponseData.OffsetFetchResponseTopics> topicResponses = new ArrayList<>(request.topics().size()); final TimelineHashMap<String, TimelineHashMap<Integer, OffsetAndMetadata>> groupOffsets = failAllPartitions ? null : offsets.offsetsByGroup.get(request.groupId(), lastCommittedOffset); request.topics().forEach(topic -> { final OffsetFetchResponseData.OffsetFetchResponseTopics topicResponse = new OffsetFetchResponseData.OffsetFetchResponseTopics().setName(topic.name()); topicResponses.add(topicResponse); final TimelineHashMap<Integer, OffsetAndMetadata> topicOffsets = groupOffsets == null ? null : groupOffsets.get(topic.name(), lastCommittedOffset); topic.partitionIndexes().forEach(partitionIndex -> { final OffsetAndMetadata offsetAndMetadata = topicOffsets == null ? null : topicOffsets.get(partitionIndex, lastCommittedOffset); if (requireStable && hasPendingTransactionalOffsets(request.groupId(), topic.name(), partitionIndex)) { topicResponse.partitions().add(new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(partitionIndex) .setErrorCode(Errors.UNSTABLE_OFFSET_COMMIT.code()) .setCommittedOffset(INVALID_OFFSET) .setCommittedLeaderEpoch(-1) .setMetadata("")); } else if (offsetAndMetadata == null) { topicResponse.partitions().add(new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(partitionIndex) .setCommittedOffset(INVALID_OFFSET) .setCommittedLeaderEpoch(-1) .setMetadata("")); } else { topicResponse.partitions().add(new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(partitionIndex) .setCommittedOffset(offsetAndMetadata.committedOffset) .setCommittedLeaderEpoch(offsetAndMetadata.leaderEpoch.orElse(-1)) .setMetadata(offsetAndMetadata.metadata)); } }); }); return new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId(request.groupId()) .setTopics(topicResponses); }
@Test public void testFetchOffsetsWithUnknownGroup() { OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build(); List<OffsetFetchRequestData.OffsetFetchRequestTopics> request = Arrays.asList( new OffsetFetchRequestData.OffsetFetchRequestTopics() .setName("foo") .setPartitionIndexes(Arrays.asList(0, 1)), new OffsetFetchRequestData.OffsetFetchRequestTopics() .setName("bar") .setPartitionIndexes(Collections.singletonList(0)) ); List<OffsetFetchResponseData.OffsetFetchResponseTopics> expectedResponse = Arrays.asList( new OffsetFetchResponseData.OffsetFetchResponseTopics() .setName("foo") .setPartitions(Arrays.asList( mkInvalidOffsetPartitionResponse(0), mkInvalidOffsetPartitionResponse(1) )), new OffsetFetchResponseData.OffsetFetchResponseTopics() .setName("bar") .setPartitions(Collections.singletonList( mkInvalidOffsetPartitionResponse(0) )) ); assertEquals(expectedResponse, context.fetchOffsets("group", request, Long.MAX_VALUE)); }
public ZeebeService getZeebeService() { return zeebeService; }
@Test void getZeebeService() { // Verify Defaults ZeebeComponent component = new ZeebeComponent(); assertEquals(ZeebeConstants.DEFAULT_GATEWAY_HOST, component.getGatewayHost()); assertEquals(ZeebeConstants.DEFAULT_GATEWAY_PORT, component.getGatewayPort()); }
public Future<KafkaVersionChange> reconcile() { return getPods() .compose(this::detectToAndFromVersions) .compose(i -> prepareVersionChange()); }
@Test public void testNewClusterWithoutVersion(VertxTestContext context) { VersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(null, null, null), mockRos(List.of()) ); Checkpoint async = context.checkpoint(); vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { assertThat(c.from(), is(VERSIONS.defaultVersion())); assertThat(c.to(), is(VERSIONS.defaultVersion())); assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); async.flag(); }))); }
public synchronized Set<String> getTopicNames() { return topicNames; }
@Test public void testGetTopicNameShouldReturnCorrectValue() { for (String topicName : testManager.getTopicNames()) { assertThat(topicName).matches(TEST_ID + "-\\d" + "-\\d{8}-\\d{6}-\\d{6}"); } }
public static byte[] compress(String urlString) throws MalformedURLException { byte[] compressedBytes = null; if (urlString != null) { // Figure the compressed bytes can't be longer than the original string. byte[] byteBuffer = new byte[urlString.length()]; int byteBufferIndex = 0; Arrays.fill(byteBuffer, (byte) 0x00); Pattern urlPattern = Pattern.compile(EDDYSTONE_URL_REGEX); Matcher urlMatcher = urlPattern.matcher(urlString); if (urlMatcher.matches()) { // www. String wwwdot = urlMatcher.group(EDDYSTONE_URL_WWW_GROUP); boolean haswww = (wwwdot != null); // Protocol. String rawProtocol = urlMatcher.group(EDDYSTONE_URL_PROTOCOL_GROUP); String protocol = rawProtocol.toLowerCase(); if (protocol.equalsIgnoreCase(URL_PROTOCOL_HTTP)) { byteBuffer[byteBufferIndex] = (haswww ? EDDYSTONE_URL_PROTOCOL_HTTP_WWW : EDDYSTONE_URL_PROTOCOL_HTTP); } else { byteBuffer[byteBufferIndex] = (haswww ? EDDYSTONE_URL_PROTOCOL_HTTPS_WWW : EDDYSTONE_URL_PROTOCOL_HTTPS); } byteBufferIndex++; // Fully-qualified domain name (FQDN). This includes the hostname and any other components after the dots // but BEFORE the first single slash in the URL. byte[] hostnameBytes = urlMatcher.group(EDDYSTONE_URL_FQDN_GROUP).getBytes(); String rawHostname = new String(hostnameBytes); String hostname = rawHostname.toLowerCase(); String[] domains = hostname.split(Pattern.quote(".")); boolean consumedSlash = false; if (domains != null) { // Write the hostname/subdomains prior to the last one. If there's only one (e. g. http://localhost) // then that's the only thing to write out. byte[] periodBytes = {'.'}; int writableDomainsCount = (domains.length == 1 ? 1 : domains.length - 1); for (int domainIndex = 0; domainIndex < writableDomainsCount; domainIndex++) { // Write out leading period, if necessary. if (domainIndex > 0) { System.arraycopy(periodBytes, 0, byteBuffer, byteBufferIndex, periodBytes.length); byteBufferIndex += periodBytes.length; } byte[] domainBytes = domains[domainIndex].getBytes(); int domainLength = domainBytes.length; System.arraycopy(domainBytes, 0, byteBuffer, byteBufferIndex, domainLength); byteBufferIndex += domainLength; } // Is the TLD one that we can encode? if (domains.length > 1) { String tld = "." + domains[domains.length - 1]; String slash = urlMatcher.group(EDDYSTONE_URL_SLASH_GROUP); String encodableTLDCandidate = (slash == null ? tld : tld + slash); byte encodedTLDByte = encodedByteForTopLevelDomain(encodableTLDCandidate); if (encodedTLDByte != TLD_NOT_ENCODABLE) { byteBuffer[byteBufferIndex++] = encodedTLDByte; consumedSlash = (slash != null); } else { byte[] tldBytes = tld.getBytes(); int tldLength = tldBytes.length; System.arraycopy(tldBytes, 0, byteBuffer, byteBufferIndex, tldLength); byteBufferIndex += tldLength; } } } // Optional slash. if (! consumedSlash) { String slash = urlMatcher.group(EDDYSTONE_URL_SLASH_GROUP); if (slash != null) { int slashLength = slash.length(); System.arraycopy(slash.getBytes(), 0, byteBuffer, byteBufferIndex, slashLength); byteBufferIndex += slashLength; } } // Path. String path = urlMatcher.group(EDDYSTONE_URL_PATH_GROUP); if (path != null) { int pathLength = path.length(); System.arraycopy(path.getBytes(), 0, byteBuffer, byteBufferIndex, pathLength); byteBufferIndex += pathLength; } // Copy the result. compressedBytes = new byte[byteBufferIndex]; System.arraycopy(byteBuffer, 0, compressedBytes, 0, compressedBytes.length); } else { throw new MalformedURLException(); } } else { throw new MalformedURLException(); } return compressedBytes; }
@Test public void testCompressWithDomainInCaps() throws MalformedURLException { String testURL = "http://GOO.GL/C2HC48"; byte[] expectedBytes = {0x02, 'g', 'o', 'o', '.', 'g', 'l', '/', 'C', '2', 'H', 'C', '4', '8'}; String hexBytes = bytesToHex(UrlBeaconUrlCompressor.compress(testURL)); assertTrue(Arrays.equals(expectedBytes, UrlBeaconUrlCompressor.compress(testURL))); }
@VisibleForTesting void recover() { try (DbSession dbSession = dbClient.openSession(false)) { Profiler profiler = Profiler.create(LOGGER).start(); long beforeDate = system2.now() - minAgeInMs; IndexingResult result = new IndexingResult(); Collection<EsQueueDto> items = dbClient.esQueueDao().selectForRecovery(dbSession, beforeDate, loopLimit); while (!items.isEmpty()) { IndexingResult loopResult = new IndexingResult(); groupItemsByDocType(items).asMap().forEach((type, typeItems) -> loopResult.add(doIndex(dbSession, type, typeItems))); result.add(loopResult); if (loopResult.getSuccessRatio() <= CIRCUIT_BREAKER_IN_PERCENT) { LOGGER.error(LOG_PREFIX + "too many failures [{}/{} documents], waiting for next run", loopResult.getFailures(), loopResult.getTotal()); break; } if (loopResult.getTotal() == 0L) { break; } items = dbClient.esQueueDao().selectForRecovery(dbSession, beforeDate, loopLimit); } if (result.getTotal() > 0L) { profiler.stopInfo(LOG_PREFIX + format("%d documents processed [%d failures]", result.getTotal(), result.getFailures())); } } catch (Throwable t) { LOGGER.error(LOG_PREFIX + "fail to recover documents", t); } }
@Test public void stop_run_if_too_many_failures() { IntStream.range(0, 10).forEach(i -> insertItem(FOO_TYPE, "" + i)); advanceInTime(); // 10 docs to process, by groups of 3. // The first group successfully recovers only 1 docs --> above 30% of failures --> stop run PartiallyFailingIndexer indexer = new PartiallyFailingIndexer(FOO_TYPE, 1); MapSettings settings = new MapSettings() .setProperty("sonar.search.recovery.loopLimit", "3"); underTest = newRecoveryIndexer(settings.asConfig(), indexer); underTest.recover(); assertThatLogsContain(ERROR, "Elasticsearch recovery - too many failures [2/3 documents], waiting for next run"); assertThatQueueHasSize(9); // The indexer must have been called once and only once. assertThat(indexer.called).hasSize(3); }
public static void validateSchema(@Nonnull Schema schema, @Nonnull SchemaConformingTransformerV2Config transformerConfig) { validateSchemaFieldNames(schema.getPhysicalColumnNames(), transformerConfig); String indexableExtrasFieldName = transformerConfig.getIndexableExtrasField(); if (null != indexableExtrasFieldName) { SchemaConformingTransformer.getAndValidateExtrasFieldType(schema, indexableExtrasFieldName); } String unindexableExtrasFieldName = transformerConfig.getUnindexableExtrasField(); if (null != unindexableExtrasFieldName) { SchemaConformingTransformer.getAndValidateExtrasFieldType(schema, indexableExtrasFieldName); } validateSchemaAndCreateTree(schema, transformerConfig); }
@Test public void testOverlappingSchemaFields() { try { Schema schema = createDefaultSchemaBuilder().addSingleValueDimension("a.b", DataType.STRING) .addSingleValueDimension("a.b.c", DataType.INT).build(); SchemaConformingTransformerV2.validateSchema(schema, new SchemaConformingTransformerV2Config(null, INDEXABLE_EXTRAS_FIELD_NAME, null, null, null, null, null, null, null, null, null, null, null)); } catch (Exception ex) { fail("Should not have thrown any exception when overlapping schema occurs"); } try { // This is a repeat of the previous test but with fields reversed just in case they are processed in order Schema schema = createDefaultSchemaBuilder().addSingleValueDimension("a.b.c", DataType.INT) .addSingleValueDimension("a.b", DataType.STRING).build(); SchemaConformingTransformerV2.validateSchema(schema, new SchemaConformingTransformerV2Config(null, INDEXABLE_EXTRAS_FIELD_NAME, null, null, null, null, null, null, null, null, null, null, null)); } catch (Exception ex) { fail("Should not have thrown any exception when overlapping schema occurs"); } }
public void reset() { this.status = null; this.latch = new CountDownLatch(this.count); }
@Test public void testReset() throws Exception { testAwaitRun(); this.done.await(); assertTrue(true); this.done.reset(); assertNull(this.done.getStatus()); testAwaitRun(); assertTrue(this.done.getStatus().isOk()); }
@Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { try { decodeMsg(msg); } catch (Throwable t) { notifyAllChannelsOfErrorAndClose(t); } }
@Test void testNotifyCreditAvailable() throws Exception { final CreditBasedPartitionRequestClientHandler handler = new CreditBasedPartitionRequestClientHandler(); final NetworkBufferAllocator allocator = new NetworkBufferAllocator(handler); final EmbeddedChannel channel = new EmbeddedChannel(handler); final PartitionRequestClient client = new NettyPartitionRequestClient( channel, handler, mock(ConnectionID.class), mock(PartitionRequestClientFactory.class)); final NetworkBufferPool networkBufferPool = new NetworkBufferPool(10, 32); final SingleInputGate inputGate = createSingleInputGate(2, networkBufferPool); final RemoteInputChannel[] inputChannels = new RemoteInputChannel[2]; inputChannels[0] = createRemoteInputChannel(inputGate, client); inputChannels[1] = createRemoteInputChannel(inputGate, client); try { inputGate.setInputChannels(inputChannels); final BufferPool bufferPool = networkBufferPool.createBufferPool(6, 6); inputGate.setBufferPool(bufferPool); inputGate.setupChannels(); inputChannels[0].requestSubpartitions(); inputChannels[1].requestSubpartitions(); // The two input channels should send partition requests assertThat(channel.isWritable()).isTrue(); Object readFromOutbound = channel.readOutbound(); assertThat(readFromOutbound).isInstanceOf(PartitionRequest.class); assertThat(inputChannels[0].getInputChannelId()) .isEqualTo(((PartitionRequest) readFromOutbound).receiverId); assertThat(((PartitionRequest) readFromOutbound).credit).isEqualTo(2); readFromOutbound = channel.readOutbound(); assertThat(readFromOutbound).isInstanceOf(PartitionRequest.class); assertThat(inputChannels[1].getInputChannelId()) .isEqualTo(((PartitionRequest) readFromOutbound).receiverId); assertThat(((PartitionRequest) readFromOutbound).credit).isEqualTo(2); // The buffer response will take one available buffer from input channel, and it will // trigger // requesting (backlog + numExclusiveBuffers - numAvailableBuffers) floating buffers final BufferResponse bufferResponse1 = createBufferResponse( TestBufferFactory.createBuffer(32), 0, inputChannels[0].getInputChannelId(), 1, allocator); final BufferResponse bufferResponse2 = createBufferResponse( TestBufferFactory.createBuffer(32), 0, inputChannels[1].getInputChannelId(), 1, allocator); handler.channelRead(mock(ChannelHandlerContext.class), bufferResponse1); handler.channelRead(mock(ChannelHandlerContext.class), bufferResponse2); assertThat(inputChannels[0].getUnannouncedCredit()).isEqualTo(2); assertThat(inputChannels[1].getUnannouncedCredit()).isEqualTo(2); channel.runPendingTasks(); // The two input channels should notify credits availability via the writable channel readFromOutbound = channel.readOutbound(); assertThat(readFromOutbound).isInstanceOf(AddCredit.class); assertThat(inputChannels[0].getInputChannelId()) .isEqualTo(((AddCredit) readFromOutbound).receiverId); assertThat(((AddCredit) readFromOutbound).credit).isEqualTo(2); readFromOutbound = channel.readOutbound(); assertThat(readFromOutbound).isInstanceOf(AddCredit.class); assertThat(inputChannels[1].getInputChannelId()) .isEqualTo(((AddCredit) readFromOutbound).receiverId); assertThat(((AddCredit) readFromOutbound).credit).isEqualTo(2); assertThat((Object) channel.readOutbound()).isNull(); ByteBuf channelBlockingBuffer = blockChannel(channel); // Trigger notify credits availability via buffer response on the condition of an // un-writable channel final BufferResponse bufferResponse3 = createBufferResponse( TestBufferFactory.createBuffer(32), 1, inputChannels[0].getInputChannelId(), 1, allocator); handler.channelRead(mock(ChannelHandlerContext.class), bufferResponse3); assertThat(inputChannels[0].getUnannouncedCredit()).isOne(); assertThat(inputChannels[1].getUnannouncedCredit()).isZero(); channel.runPendingTasks(); // The input channel will not notify credits via un-writable channel assertThat(channel.isWritable()).isFalse(); assertThat((Object) channel.readOutbound()).isNull(); // Flush the buffer to make the channel writable again channel.flush(); assertThat(channelBlockingBuffer).isSameAs(channel.readOutbound()); // The input channel should notify credits via channel's writability changed event assertThat(channel.isWritable()).isTrue(); readFromOutbound = channel.readOutbound(); assertThat(readFromOutbound).isInstanceOf(AddCredit.class); assertThat(((AddCredit) readFromOutbound).credit).isOne(); assertThat(inputChannels[0].getUnannouncedCredit()).isZero(); assertThat(inputChannels[1].getUnannouncedCredit()).isZero(); // no more messages assertThat((Object) channel.readOutbound()).isNull(); } finally { releaseResource(inputGate, networkBufferPool); channel.close(); } }
@Nonnull @Override public Result addChunk(ByteBuf buf, @Nullable SocketAddress remoteAddress) { if (!buf.isReadable(2)) { return new Result(null, false); } try { final IpfixParser.MessageDescription messageDescription = shallowParser.shallowParseMessage(buf); final long observationDomainId = messageDescription.getHeader().observationDomainId(); addTemplateKeyInCache(remoteAddress, messageDescription, observationDomainId); // TODO handle options templates // collects all data records that are now ready to be sent final Set<ShallowDataSet> packetsToSendCollection = new HashSet<>(); // the set of template records to include in the newly created message that is our "aggregate result" final Set<Integer> bufferedTemplateIdList = new HashSet<>(); if (!messageDescription.declaredTemplateIds().isEmpty()) { // if we have new templates, look for buffered data records that we have all the templates for now final Set<Integer> knownTemplateIdsList = new HashSet<>(); collectAllTemplateIds(remoteAddress, observationDomainId, knownTemplateIdsList); final Queue<ShallowDataSet> bufferedPackets = packetCache.getIfPresent(TemplateKey.idForExporter(remoteAddress, observationDomainId)); handleBufferedPackets(packetsToSendCollection, bufferedTemplateIdList, knownTemplateIdsList, bufferedPackets); } boolean packetBuffered = false; // the list of template keys to return in the result ( TODO this copies all of the templates all the time :( ) final Set<TemplateKey> templatesList = new HashSet<>(templateCache.asMap().keySet()); bufferedTemplateIdList.addAll(messageDescription.referencedTemplateIds()); LOG.debug("Finding the needed templates for the buffered and current packets"); for (int templateId : bufferedTemplateIdList) { final TemplateKey templateKey = new TemplateKey(remoteAddress, observationDomainId, templateId); final Object template = templateCache.getIfPresent(templateKey); if (template == null) { LOG.debug("Template is null, packet needs to be buffered until templates have been received."); try { final TemplateKey newTemplateKey = TemplateKey.idForExporter(remoteAddress, observationDomainId); final Queue<ShallowDataSet> bufferedPackets = packetCache.get(newTemplateKey, ConcurrentLinkedQueue::new); final byte[] bytes = ByteBufUtil.getBytes(buf); bufferedPackets.addAll(messageDescription.dataSets()); packetBuffered = true; } catch (ExecutionException ignored) { // the loader cannot fail, it only creates a new queue } } else { LOG.debug("Template [{}] has been added to template list.", templateKey); templatesList.add(templateKey); packetsToSendCollection.addAll(messageDescription.dataSets()); } } // if we have buffered this packet, don't try to process it now. we still need all the templates for it if (packetBuffered) { LOG.debug("Packet has been buffered and will not be processed now, returning result."); return new Result(null, true); } // if we didn't buffer anything but also didn't have anything queued that can be processed, don't proceed. if (packetsToSendCollection.isEmpty()) { LOG.debug("Packet has not been buffered and no packet is queued."); return new Result(null, true); } final IpfixJournal.RawIpfix.Builder journalBuilder = IpfixJournal.RawIpfix.newBuilder(); buildJournalObject(packetsToSendCollection, templatesList, journalBuilder); final IpfixJournal.RawIpfix rawIpfix = journalBuilder.build(); return getCompleteResult(rawIpfix); } catch (Exception e) { LOG.error("Unable to aggregate IPFIX message due to the following error ", e); return new Result(null, false); } }
@Ignore("Not ready. Has InvalidIPFixMessageVersion.") @Test public void multipleMessagesTemplateLater() throws IOException { final ByteBuf datasetOnlyBytes = Utils.readPacket("dataset-only.ipfix"); final ByteBuf withTemplatesBytes = Utils.readPacket("templates-data.ipfix"); final IpfixAggregator ipfixAggregator = new IpfixAggregator(); final CodecAggregator.Result resultQueued = ipfixAggregator.addChunk(datasetOnlyBytes, someAddress); assertThat(resultQueued.isValid()).isTrue(); assertThat(resultQueued.getMessage()).isNull(); final CodecAggregator.Result resultComplete = ipfixAggregator.addChunk(withTemplatesBytes, someAddress); assertThat(resultComplete.isValid()).isTrue(); assertThat(resultComplete.getMessage()).isNotNull(); final IpfixMessage ipfixMessage = new IpfixParser(standardDefinition).parseMessage(resultComplete.getMessage()); assertThat(ipfixMessage.flows()).hasSize(4); }
@Override protected boolean redirectMatches(String requestedRedirect, String redirectUri) { if (isStrictMatch()) { // we're doing a strict string match for all clients return Strings.nullToEmpty(requestedRedirect).equals(redirectUri); } else { // otherwise do the prefix-match from the library return super.redirectMatches(requestedRedirect, redirectUri); } }
@Test public void testHeartMode() { when(config.isHeartMode()).thenReturn(true); // this is not an exact match boolean res1 = resolver.redirectMatches(pathUri, goodUri); assertThat(res1, is(false)); // this is an exact match boolean res2 = resolver.redirectMatches(goodUri, goodUri); assertThat(res2, is(true)); }
public static Optional<Integer> getLatestSchemaId( final SchemaRegistryClient srClient, final String topic, final boolean isKey ) { final String subject = KsqlConstants.getSRSubject(topic, isKey); return getLatestSchema(srClient, subject).map(SchemaMetadata::getId); }
@Test public void shouldReturnSchemaIdFromSubjectValue() throws Exception { // Given: when(schemaMetadata.getId()).thenReturn(123); when(schemaRegistryClient.getLatestSchemaMetadata("bar-value")) .thenReturn(schemaMetadata); // When: final Optional<Integer> schemaId = SchemaRegistryUtil.getLatestSchemaId(schemaRegistryClient, "bar", false); // Then: assertThat(schemaId.get(), equalTo(123)); }
public static byte[] encryptWithMySQL41(final byte[] password, final byte[] seed) throws NoSuchAlgorithmException { MessageDigest messageDigest = MessageDigest.getInstance("SHA-1"); byte[] passwordSha1 = messageDigest.digest(password); byte[] concatSeed = concatSeed(messageDigest, seed, messageDigest.digest(passwordSha1)); return xor(passwordSha1, concatSeed, concatSeed.length); }
@Test void assertEncryptWithMySQL41() throws NoSuchAlgorithmException { byte[] passwordBytes = "password".getBytes(); byte[] seed = getRandomSeed(); assertThat(PasswordEncryption.encryptWithMySQL41(passwordBytes, seed), is(getMySQL41ExpectedPassword())); }
static String headerLine(CSVFormat csvFormat) { return String.join(String.valueOf(csvFormat.getDelimiter()), csvFormat.getHeader()); }
@Test public void givenInvalidCsvRecord_throws() { CSVFormat csvFormat = csvFormat().withQuote('"'); PCollection<String> input = pipeline.apply(Create.of(headerLine(csvFormat), "a,\"1,1.1", "b,2,2.2", "c,3,3.3")); CsvIOStringToCsvRecord underTest = new CsvIOStringToCsvRecord(csvFormat); CsvIOParseResult<List<String>> result = input.apply(underTest); PAssert.thatSingleton(result.getErrors().apply(Count.globally())).isEqualTo(1L); pipeline.run(); }