focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public boolean processRow( StepMetaInterface smi, StepDataInterface sdi ) throws KettleException { meta = (PrioritizeStreamsMeta) smi; data = (PrioritizeStreamsData) sdi; if ( first ) { if ( meta.getStepName() != null || meta.getStepName().length > 0 ) { data.stepnrs = meta.getStepName().length; data.rowSets = new RowSet[data.stepnrs]; for ( int i = 0; i < data.stepnrs; i++ ) { data.rowSets[i] = findInputRowSet( meta.getStepName()[i] ); if ( i > 0 ) { // Compare layout of first stream with the current stream checkInputLayoutValid( data.rowSets[0].getRowMeta(), data.rowSets[i].getRowMeta() ); } } } else { // error throw new KettleException( BaseMessages.getString( PKG, "PrioritizeStreams.Error.NotInputSteps" ) ); } data.currentRowSet = data.rowSets[0]; } // end if first, part 1 Object[] input = getOneRow(); while ( input == null && data.stepnr < data.stepnrs - 1 && !isStopped() ) { input = getOneRow(); } if ( input == null ) { // no more input to be expected... setOutputDone(); return false; } if ( first ) { // Take the row Meta from the first rowset read data.outputRowMeta = data.currentRowSet.getRowMeta(); first = false; } putRow( data.outputRowMeta, input ); return true; }
@Test public void testProcessRow() throws KettleException { PrioritizeStreamsMeta meta = new PrioritizeStreamsMeta(); meta.setStepName( new String[] { "high", "medium", "low" } ); PrioritizeStreamsData data = new PrioritizeStreamsData(); PrioritizeStreamsInner step = new PrioritizeStreamsInner( stepMockHelper ); try { step.processRow( meta, data ); } catch ( NullPointerException e ) { fail( "NullPointerException detecded, seems that RowMetaInterface was not set for RowSet you are attempting" + "to read from." ); } Assert.assertEquals( "First waiting for row set is 'high'", data.currentRowSet.getClass(), SingleRowRowSet.class ); }
@Override public Object getValueFromResultSet( ResultSet rs, ValueMetaInterface val, int i ) throws KettleDatabaseException { Object data = null; try { switch ( val.getType() ) { case ValueMetaInterface.TYPE_BOOLEAN: data = Boolean.valueOf( rs.getBoolean( i + 1 ) ); break; case ValueMetaInterface.TYPE_NUMBER: data = new Double( rs.getDouble( i + 1 ) ); break; case ValueMetaInterface.TYPE_BIGNUMBER: data = rs.getBigDecimal( i + 1 ); break; case ValueMetaInterface.TYPE_INTEGER: data = Long.valueOf( rs.getLong( i + 1 ) ); break; case ValueMetaInterface.TYPE_STRING: if ( val.isStorageBinaryString() ) { data = rs.getBytes( i + 1 ); } else { data = rs.getString( i + 1 ); } break; case ValueMetaInterface.TYPE_BINARY: if ( supportsGetBlob() ) { Blob blob = rs.getBlob( i + 1 ); if ( blob != null ) { data = blob.getBytes( 1L, (int) blob.length() ); } else { data = null; } } else { data = rs.getBytes( i + 1 ); } break; case ValueMetaInterface.TYPE_TIMESTAMP: case ValueMetaInterface.TYPE_DATE: if ( val.getOriginalColumnType() == java.sql.Types.TIME ) { // Neoview can not handle getDate / getTimestamp for a Time column data = rs.getTime( i + 1 ); break; // Time is a subclass of java.util.Date, the default date // will be 1970-01-01 } else if ( val.getPrecision() != 1 && supportsTimeStampToDateConversion() ) { data = rs.getTimestamp( i + 1 ); break; // Timestamp extends java.util.Date } else { data = rs.getDate( i + 1 ); break; } default: break; } if ( rs.wasNull() ) { data = null; } } catch ( SQLException e ) { throw new KettleDatabaseException( "Unable to get value '" + val.toStringMeta() + "' from database resultset, index " + i, e ); } return data; }
@Test public void testGetValueFromResultSet() throws Exception { Object rtn = null; ResultSet resultSet = Mockito.mock( ResultSet.class ); ResultSetMetaData metaData = Mockito.mock( ResultSetMetaData.class ); Mockito.when( resultSet.getMetaData() ).thenReturn( metaData ); Mockito.when( resultSet.getTimestamp( 1 ) ).thenReturn( new java.sql.Timestamp( 65535 ) ); Mockito.when( resultSet.getTime( 2 ) ).thenReturn( new java.sql.Time( 1000 ) ); Mockito.when( resultSet.getTimestamp( 3 ) ).thenReturn( new java.sql.Timestamp( 65535 ) ); // ValueMetaDate -> Timestamp ValueMetaTimestamp ts = new ValueMetaTimestamp( "FOO" ); ts.setOriginalColumnType( java.sql.Types.TIMESTAMP ); ValueMetaDate tm = new ValueMetaDate( "BAR" ); tm.setOriginalColumnType( java.sql.Types.TIME ); ValueMetaDate dt = new ValueMetaDate( "WIBBLE" ); dt.setOriginalColumnType( java.sql.Types.DATE ); rtn = nativeMeta.getValueFromResultSet( resultSet, ts, 0 ); assertNotNull( rtn ); assertEquals( "java.sql.Timestamp", rtn.getClass().getName() ); rtn = nativeMeta.getValueFromResultSet( resultSet, tm, 1 ); assertNotNull( rtn ); assertEquals( "java.sql.Time", rtn.getClass().getName() ); rtn = nativeMeta.getValueFromResultSet( resultSet, dt, 2 ); assertNotNull( rtn ); assertEquals( "java.sql.Timestamp", rtn.getClass().getName() ); Mockito.when( resultSet.wasNull() ).thenReturn( true ); rtn = nativeMeta.getValueFromResultSet( resultSet, new ValueMetaString( "WOBBLE" ), 3 ); assertNull( rtn ); // Verify that getDate is not called, getTime is called once, and getTimestamp was called 2 times (once for TimeStamp, once for Date) Mockito.verify( resultSet, Mockito.times( 0 ) ).getDate( Mockito.anyInt() ); Mockito.verify( resultSet, Mockito.times( 1 ) ).getTime( Mockito.anyInt() ); Mockito.verify( resultSet, Mockito.times( 2 ) ).getTimestamp( Mockito.anyInt() ); // Now that the date stuff is done, validate the behaviors of other aspects of getValueFromResultSet Mockito.when( resultSet.wasNull() ).thenReturn( false ); Mockito.when( resultSet.getBoolean( 1 ) ).thenReturn( new Boolean( true ) ); Mockito.when( resultSet.getDouble( 1 ) ).thenReturn( new Double( 15 ) ); Mockito.when( resultSet.getBigDecimal( 1 ) ).thenReturn( new BigDecimal( "15" ) ); Mockito.when( resultSet.getLong( 1 ) ).thenReturn( new Long( "15" ) ); Mockito.when( resultSet.getString( 1 ) ).thenReturn( "ASTRING" ); Mockito.when( resultSet.getBytes( 1 ) ).thenReturn( "ASTRING".getBytes() ); Blob mockBlob = Mockito.mock( Blob.class ); byte[] bytes = "FOO".getBytes(); ByteArrayInputStream bais = new ByteArrayInputStream( bytes ); Mockito.when( mockBlob.getBinaryStream() ).thenReturn( bais ); Mockito.when( mockBlob.length() ).thenReturn( new Long( bytes.length ) ); Mockito.when( mockBlob.getBytes( Mockito.anyLong(), Mockito.anyInt() ) ).thenReturn( bytes ); Mockito.when( resultSet.getBlob( 1 ) ).thenReturn( mockBlob ); rtn = nativeMeta.getValueFromResultSet( resultSet, new ValueMetaBoolean( "FOO" ), 0 ); assertNotNull( rtn ); assertTrue( rtn instanceof Boolean ); rtn = nativeMeta.getValueFromResultSet( resultSet, new ValueMetaNumber( "FOO", 15, 5 ), 0 ); assertNotNull( rtn ); assertTrue( rtn instanceof Double ); rtn = nativeMeta.getValueFromResultSet( resultSet, new ValueMetaBigNumber( "FOO", 15, 5 ), 0 ); assertNotNull( rtn ); assertTrue( rtn instanceof BigDecimal ); rtn = nativeMeta.getValueFromResultSet( resultSet, new ValueMetaInteger( "FOO", 5, 0 ), 0 ); assertNotNull( rtn ); assertTrue( rtn instanceof Long ); rtn = nativeMeta.getValueFromResultSet( resultSet, new ValueMetaString( "FOO", 25, 0 ), 0 ); assertNotNull( rtn ); assertTrue( rtn instanceof String ); ValueMetaString binStr = new ValueMetaString( "FOO" ); binStr.setStorageType( ValueMetaString.STORAGE_TYPE_BINARY_STRING ); rtn = nativeMeta.getValueFromResultSet( resultSet, binStr, 0 ); assertNotNull( rtn ); assertTrue( rtn instanceof byte[] ); rtn = nativeMeta.getValueFromResultSet( resultSet, new ValueMetaBinary( "FOO", 150, 0 ), 0 ); assertNotNull( rtn ); assertTrue( rtn instanceof byte[] ); try { Mockito.when( resultSet.getBoolean( 15 ) ).thenThrow( new SQLException( "Expected Exception Here" ) ); rtn = nativeMeta.getValueFromResultSet( resultSet, new ValueMetaBoolean( "FOO" ), 14 ); fail( "Should not get here" ); } catch ( Exception someException ) { assertTrue( someException instanceof KettleDatabaseException ); } }
public Resource getCapacityAtTime(long tick) { long convertedTime = (tick % timePeriod); return super.getCapacityAtTime(convertedTime); }
@Test public void testMixPeriodicAndNonPeriodic() throws PlanningException { int[] alloc = { 2, 5, 0 }; long[] timeSteps = { 1L, 2L, 3L }; RLESparseResourceAllocation tempPeriodic = ReservationSystemTestUtil .generateRLESparseResourceAllocation(alloc, timeSteps); PeriodicRLESparseResourceAllocation periodic = new PeriodicRLESparseResourceAllocation(tempPeriodic, 10L); int[] alloc2 = { 10, 10, 0 }; long[] timeSteps2 = { 12L, 13L, 14L }; RLESparseResourceAllocation nonPeriodic = ReservationSystemTestUtil .generateRLESparseResourceAllocation(alloc2, timeSteps2); RLESparseResourceAllocation merged = RLESparseResourceAllocation.merge(nonPeriodic.getResourceCalculator(), Resource.newInstance(100 * 1024, 100), periodic, nonPeriodic, RLESparseResourceAllocation.RLEOperator.add, 2, 25); Assert.assertEquals(Resource.newInstance(5, 5), merged.getCapacityAtTime(2L)); Assert.assertEquals(Resource.newInstance(0, 0), merged.getCapacityAtTime(3L)); Assert.assertEquals(Resource.newInstance(2, 2), merged.getCapacityAtTime(11L)); Assert.assertEquals(Resource.newInstance(15, 15), merged.getCapacityAtTime(12L)); Assert.assertEquals(Resource.newInstance(10, 10), merged.getCapacityAtTime(13L)); Assert.assertEquals(Resource.newInstance(0, 0), merged.getCapacityAtTime(14L)); Assert.assertEquals(Resource.newInstance(2, 2), merged.getCapacityAtTime(21L)); Assert.assertEquals(Resource.newInstance(5, 5), merged.getCapacityAtTime(22L)); Assert.assertEquals(Resource.newInstance(0, 0), merged.getCapacityAtTime(23L)); }
@Override public int hashCode() { return Objects.hash(super.hashCode(), file); }
@Test public void hashcode_depends_on_file_and_TextBlock() { TextBlock textBlock = new TextBlock(1, 2); assertThat(new InProjectDuplicate(FILE_1, textBlock)).hasSameHashCodeAs(new InProjectDuplicate(FILE_1, textBlock)); assertThat(new InProjectDuplicate(FILE_1, textBlock).hashCode()).isNotEqualTo(new InProjectDuplicate(FILE_2, textBlock).hashCode()); assertThat(new InProjectDuplicate(FILE_1, textBlock).hashCode()).isNotEqualTo(new InProjectDuplicate(FILE_2, new TextBlock(1, 1)).hashCode()); }
public static void checkPositiveInteger(final Properties props, final String propKey, final MaskAlgorithm<?, ?> algorithm) { checkRequired(props, propKey, algorithm); try { int integerValue = Integer.parseInt(props.getProperty(propKey)); ShardingSpherePreconditions.checkState(integerValue > 0, () -> new AlgorithmInitializationException(algorithm, "%s must be a positive integer.", propKey)); } catch (final NumberFormatException ex) { throw new AlgorithmInitializationException(algorithm, "%s must be a valid integer number", propKey); } }
@Test void assertCheckPositiveIntegerFailedWithNotInteger() { Properties props = PropertiesBuilder.build(new Property("key", "123.0")); assertThrows(AlgorithmInitializationException.class, () -> MaskAlgorithmPropertiesChecker.checkPositiveInteger(props, "key", mock(MaskAlgorithm.class))); }
Mono<ResponseEntity<Void>> update(UUID id, Post post) { return client.put() .uri(uriBuilder -> uriBuilder.path("/posts/{id}").build(id)) .contentType(MediaType.APPLICATION_JSON) .bodyValue(post) .exchangeToMono(response -> { if (response.statusCode().equals(HttpStatus.NO_CONTENT)) { return response.toBodilessEntity(); } return response.createError(); }); }
@SneakyThrows @Test public void testUpdatePost() { var id = UUID.randomUUID(); var data = new Post(null, "title1", "content1", Status.DRAFT, null); stubFor(put("/posts/" + id) .willReturn( aResponse() .withStatus(204) ) ); postClient.update(id, data) .as(StepVerifier::create) .consumeNextWith( entity -> assertThat(entity.getStatusCode().value()).isEqualTo(204) ) .verifyComplete(); verify(putRequestedFor(urlEqualTo("/posts/" + id)) .withHeader("Content-Type", equalTo("application/json")) .withRequestBody(equalToJson(Json.write(data))) ); }
public static Builder builder() { return new Builder(ImmutableList.of()); }
@Test public void shouldThrowOnDuplicateValueColumnName() { // Given: final Builder builder = LogicalSchema.builder() .valueColumn(VALUE, BIGINT); // When: final Exception e = assertThrows( KsqlException.class, () -> builder.valueColumn(VALUE, BIGINT) ); // Then: assertThat(e.getMessage(), containsString("Duplicate value columns found in schema: `value` BIGINT")); }
public static int getChildIndex(ViewParent parent, View child) { try { if (!(parent instanceof ViewGroup)) { return -1; } ViewGroup viewParent = (ViewGroup) parent; final String childIdName = SAViewUtils.getViewId(child); String childClassName = SnapCache.getInstance().getCanonicalName(child.getClass()); int index = 0; for (int i = 0; i < viewParent.getChildCount(); i++) { View brother = viewParent.getChildAt(i); if (!Pathfinder.hasClassName(brother, childClassName)) { continue; } String brotherIdName = SAViewUtils.getViewId(brother); if (null != childIdName && !childIdName.equals(brotherIdName)) { index++; continue; } if (brother == child) { return index; } index++; } return -1; } catch (Exception e) { SALog.printStackTrace(e); return -1; } }
@Test public void getChildIndex() { LinearLayout linearLayout = new LinearLayout(mApplication); TextView textView1 = new TextView(mApplication); textView1.setText("child1"); TextView textView2 = new TextView(mApplication); textView2.setText("child2"); linearLayout.addView(textView1); linearLayout.addView(textView2); Assert.assertEquals(0, SAViewUtils.getChildIndex(linearLayout, textView1)); Assert.assertEquals(1, SAViewUtils.getChildIndex(linearLayout, textView2)); }
public static boolean delete(File path) { if (!path.exists()) { return false; } boolean ret = true; if (path.isDirectory()) { File[] files = path.listFiles(); if (files != null) { for (File f : files) { ret = ret && delete(f); } } } return ret && path.delete(); }
@Test public void testDeleteDir() throws IOException { File dir = new File(TemporaryFolderFinder.resolve("testdir")); dir.mkdirs(); File path = File.createTempFile("test", "suffix", dir); Utils.delete(dir); assertThat(dir.exists(), is(false)); assertThat(path.exists(), is(false)); }
public boolean isSuspended(ApplicationId application) { return orchestrator.getAllSuspendedApplications().contains(application); }
@Test public void testSuspension() { deployApp(testApp); assertFalse(applicationRepository.isSuspended(applicationId())); orchestrator.suspend(applicationId()); assertTrue(applicationRepository.isSuspended(applicationId())); }
@Override public void submit(Intent intent) { checkPermission(INTENT_WRITE); checkNotNull(intent, INTENT_NULL); IntentData data = IntentData.submit(intent); store.addPending(data); }
@Test public void intentWithoutInstaller() { MockIntent intent = new MockIntent(MockIntent.nextId()); listener.setLatch(1, Type.INSTALL_REQ); listener.setLatch(1, Type.CORRUPT); service.submit(intent); listener.await(Type.INSTALL_REQ); listener.await(Type.CORRUPT); verifyState(); }
@Override public void shutDown() throws NacosException { serviceInfoHolder.shutdown(); clientProxy.shutdown(); NotifyCenter.deregisterSubscriber(changeNotifier); }
@Test void testConstructorWithServerList() throws NacosException, NoSuchFieldException, IllegalAccessException { NacosNamingService namingService = new NacosNamingService("localhost"); try { Field namespaceField = NacosNamingService.class.getDeclaredField("namespace"); namespaceField.setAccessible(true); String namespace = (String) namespaceField.get(namingService); assertEquals(UtilAndComs.DEFAULT_NAMESPACE_ID, namespace); } finally { namingService.shutDown(); } }
@Override public InterpreterResult interpret(String st, InterpreterContext context) throws InterpreterException { curIntpContext = context; // redirect java stdout/stdout to interpreter output. Because pyspark may call java code. PrintStream originalStdout = System.out; PrintStream originalStderr = System.err; try { System.setOut(new PrintStream(context.out)); System.setErr(new PrintStream(context.out)); Utils.printDeprecateMessage(sparkInterpreter.getSparkVersion(), context, properties); return super.interpret(st, context); } finally { System.setOut(originalStdout); System.setErr(originalStderr); } }
@Override @Test public void testFailtoLaunchPythonProcess() throws InterpreterException { tearDown(); intpGroup = new InterpreterGroup(); Properties properties = new Properties(); properties.setProperty(SparkStringConstants.APP_NAME_PROP_NAME, "Zeppelin Test"); properties.setProperty("spark.pyspark.python", "invalid_python"); properties.setProperty("zeppelin.python.useIPython", "false"); properties.setProperty("zeppelin.python.gatewayserver_address", "127.0.0.1"); properties.setProperty("zeppelin.spark.maxResult", "3"); interpreter = new LazyOpenInterpreter(new PySparkInterpreter(properties)); interpreter.setInterpreterGroup(intpGroup); Interpreter sparkInterpreter = new LazyOpenInterpreter(new SparkInterpreter(properties)); sparkInterpreter.setInterpreterGroup(intpGroup); LazyOpenInterpreter iPySparkInterpreter = new LazyOpenInterpreter(new IPySparkInterpreter(properties)); iPySparkInterpreter.setInterpreterGroup(intpGroup); intpGroup.put("note", new LinkedList<Interpreter>()); intpGroup.get("note").add(interpreter); intpGroup.get("note").add(sparkInterpreter); intpGroup.get("note").add(iPySparkInterpreter); InterpreterContext.set(getInterpreterContext()); try { interpreter.interpret("1+1", getInterpreterContext()); fail("Should fail to open PySparkInterpreter"); } catch (InterpreterException e) { String stacktrace = ExceptionUtils.getStackTrace(e); assertTrue(stacktrace.contains("No such file or directory"), stacktrace); } }
@Override @SuppressWarnings("unchecked") public <T> T getFeature(final Class<T> type, final Distribution.Method method) { if(type == Index.class) { return (T) this; } if(type == DistributionLogging.class) { return (T) new GoogleStorageLoggingFeature(session); } return null; }
@Test public void testFeatures() { final DistributionConfiguration d = new GoogleStorageWebsiteDistributionConfiguration(session); assertNotNull(d.getFeature(Index.class, Distribution.WEBSITE)); assertNotNull(d.getFeature(DistributionLogging.class, Distribution.WEBSITE)); assertNull(d.getFeature(Cname.class, Distribution.WEBSITE)); }
public static UriTemplate create(String template, Charset charset) { return new UriTemplate(template, true, charset); }
@Test void skipAlreadyEncodedLiteral() { String template = "https://www.example.com/A%20Team"; UriTemplate uriTemplate = UriTemplate.create(template, Util.UTF_8); String expandedTemplate = uriTemplate.expand(Collections.emptyMap()); assertThat(expandedTemplate).isEqualToIgnoringCase("https://www.example.com/A%20Team"); }
public static String getDefaultHost(@Nullable String strInterface, @Nullable String nameserver, boolean tryfallbackResolution) throws UnknownHostException { if (strInterface == null || "default".equals(strInterface)) { return cachedHostname; } if (nameserver != null && "default".equals(nameserver)) { nameserver = null; } String[] hosts = getHosts(strInterface, nameserver, tryfallbackResolution); return hosts[0]; }
@Test public void testNullDnsServer() throws Exception { String host = DNS.getDefaultHost(getLoopbackInterface(), null); Assertions.assertThat(host) .isEqualTo(DNS.getDefaultHost(getLoopbackInterface())); }
public K getKey() { if (key == null && serializationService != null) { key = serializationService.toObject(keyData); } return key; }
@Test public void testGetKey_withDataKey() { assertEquals("key", dataEvent.getKey()); }
protected void generateEipStatistics() { // generate the statistics for each EIP within a route for (Route route : routeMap.values()) { Map<Integer, List<EipStatistic>> eipStatisticMap = new HashMap<>(); Components components = route.getComponents(); Map<String, List<EipAttribute>> eipAttributesMap = components.getAttributeMap(); eipAttributesMap.forEach((key, eipAttributes) -> { // 'rest' is a route attribute, not an EIP, so it doesn't make sense to include it if (!key.equals(REST)) { eipAttributes.forEach(eipAttribute -> { EipStatistic eipStatistic = new EipStatistic(); eipStatistic.setId(key); eipStatistic.setTested(eipAttribute.getExchangesTotal() > 0); eipStatistic.setTotalProcessingTime(eipAttribute.getTotalProcessingTime()); eipStatistic.setProperties(eipAttribute.getProperties()); eipAttribute.getChildEipMap().forEach((childKey, childEipList) -> { childEipList.forEach(childEip -> { ChildEipStatistic childEipStatistic = new ChildEipStatistic(); childEipStatistic.setId(childEip.getId()); generateChildEipStatistics(childEip, childEipStatistic); eipStatistic.getChildEipStatisticMap().put(childKey, childEipStatistic); }); }); List<EipStatistic> eipStatisticList; if (eipStatisticMap.containsKey(eipAttribute.getIndex())) { eipStatisticList = eipStatisticMap.get(eipAttribute.getIndex()); } else { eipStatisticList = new ArrayList<>(); } eipStatisticList.add(eipStatistic); eipStatisticMap.put(eipAttribute.getIndex(), eipStatisticList); }); } }); RouteStatistic routeStatistic = routeStatisticMap.get(route.getId()); routeStatistic.setEipStatisticMap(eipStatisticMap); routeStatisticMap.put(route.getId(), routeStatistic); } }
@Test public void testGenerateEipStatistics() throws IllegalAccessException, IOException { Mockito .doReturn(outputPath().getPath()) .when(processor).writeReportIndex(any(String.class), any(File.class)); @SuppressWarnings("unchecked") Map<String, RouteStatistic> routeStatisticMap = (Map<String, RouteStatistic>) FieldUtils.readDeclaredField(processor, "routeStatisticMap", true); assertAll( () -> assertNotNull(routeStatisticMap), () -> assertTrue(routeStatisticMap.isEmpty())); processor.parseAllTestResults(xmlPath()); processor.gatherBestRouteCoverages(); processor.generateRouteStatistics("test project", outputPath()); processor.generateEipStatistics(); assertAll( () -> assertNotNull(routeStatisticMap), () -> assertFalse(routeStatisticMap.isEmpty())); RouteStatistic result = routeStatisticMap.get(GREETINGS_ROUTE); assertAll( () -> assertNotNull(result), () -> assertNotNull(result.getEipStatisticMap()), () -> assertEquals(3, result.getEipStatisticMap().size())); }
public FileSystem get(Key key) { synchronized (mLock) { Value value = mCacheMap.get(key); FileSystem fs; if (value == null) { // On cache miss, create and insert a new FileSystem instance, fs = FileSystem.Factory.create(FileSystemContext.create(key.mSubject, key.mConf)); mCacheMap.put(key, new Value(fs, 1)); } else { fs = value.mFileSystem; value.mRefCount.getAndIncrement(); } return new InstanceCachingFileSystem(fs, key); } }
@Test public void doubleClose() throws IOException { Key key1 = createTestFSKey("user1"); FileSystem fs1 = mFileSystemCache.get(key1); FileSystem fs2 = mFileSystemCache.get(key1); assertSame(getDelegatedFileSystem(fs1), getDelegatedFileSystem(fs2)); fs1.close(); assertTrue(fs1.isClosed()); assertFalse(fs2.isClosed()); fs1.close(); assertTrue(fs1.isClosed()); assertFalse(fs2.isClosed()); }
static void populateEvaluateNodeWithScoreDistributions(final BlockStmt toPopulate, final List<ScoreDistribution> scoreDistributionsParam) { final Expression scoreDistributionsExpression; if (scoreDistributionsParam == null) { scoreDistributionsExpression = new NullLiteralExpr(); } else { int counter = 0; final NodeList<Expression> scoreDistributionsArguments = new NodeList<>(); for (ScoreDistribution scoreDistribution : scoreDistributionsParam) { String nestedVariableName = String.format("scoreDistribution_%s", counter); scoreDistributionsArguments.add(getKiePMMLScoreDistribution(nestedVariableName, scoreDistribution)); counter ++; } scoreDistributionsExpression = new MethodCallExpr(); ((MethodCallExpr)scoreDistributionsExpression).setScope(new NameExpr(Arrays.class.getSimpleName())); ((MethodCallExpr)scoreDistributionsExpression).setName("asList"); ((MethodCallExpr)scoreDistributionsExpression).setArguments(scoreDistributionsArguments); } CommonCodegenUtils.setVariableDeclaratorValue(toPopulate, SCORE_DISTRIBUTIONS, scoreDistributionsExpression); }
@Test void populateEvaluateNodeWithScoreDistributions() { final BlockStmt toPopulate = new BlockStmt(); final VariableDeclarator variableDeclarator = new VariableDeclarator(); variableDeclarator.setType("List"); variableDeclarator.setName(SCORE_DISTRIBUTIONS); toPopulate.addStatement(new VariableDeclarationExpr(variableDeclarator)); assertThat(variableDeclarator.getInitializer()).isNotPresent(); // Without probability List<ScoreDistribution> scoreDistributions = getRandomPMMLScoreDistributions(false); KiePMMLNodeFactory.populateEvaluateNodeWithScoreDistributions(toPopulate, scoreDistributions); commonVerifyEvaluateNodeWithScoreDistributions(variableDeclarator, scoreDistributions); // With probability scoreDistributions = getRandomPMMLScoreDistributions(true); KiePMMLNodeFactory.populateEvaluateNodeWithScoreDistributions(toPopulate, scoreDistributions); commonVerifyEvaluateNodeWithScoreDistributions(variableDeclarator, scoreDistributions); }
@Override public NetworkClientDelegate.PollResult poll(final long currentTimeMs) { if (this.coordinator != null) return EMPTY; if (coordinatorRequestState.canSendRequest(currentTimeMs)) { NetworkClientDelegate.UnsentRequest request = makeFindCoordinatorRequest(currentTimeMs); return new NetworkClientDelegate.PollResult(request); } return new NetworkClientDelegate.PollResult(coordinatorRequestState.remainingBackoffMs(currentTimeMs)); }
@Test public void testBackoffAfterRetriableFailure() { CoordinatorRequestManager coordinatorManager = setupCoordinatorManager(GROUP_ID); expectFindCoordinatorRequest(coordinatorManager, Errors.COORDINATOR_LOAD_IN_PROGRESS); verifyNoInteractions(backgroundEventHandler); time.sleep(RETRY_BACKOFF_MS - 1); assertEquals(Collections.emptyList(), coordinatorManager.poll(time.milliseconds()).unsentRequests); time.sleep(1); expectFindCoordinatorRequest(coordinatorManager, Errors.NONE); }
public static boolean areLocalAssignmentsEquivalent(LocalAssignment first, LocalAssignment second) { if (first == null && second == null) { return true; } if (first != null && second != null) { if (first.get_topology_id().equals(second.get_topology_id())) { Set<ExecutorInfo> aexec = new HashSet<>(first.get_executors()); Set<ExecutorInfo> bexec = new HashSet<>(second.get_executors()); if (aexec.equals(bexec)) { boolean firstHasResources = first.is_set_resources(); boolean secondHasResources = second.is_set_resources(); if (!firstHasResources && !secondHasResources) { return true; } if (firstHasResources && secondHasResources) { WorkerResources firstResources = first.get_resources(); WorkerResources secondResources = second.get_resources(); return customWorkerResourcesEquality(firstResources, secondResources); } } } } return false; }
@Test public void testEquivalent() { LocalAssignment a = mkLocalAssignment("A", mkExecutorInfoList(1, 2, 3, 4, 5), mkWorkerResources(100.0, 100.0, 100.0)); LocalAssignment aResized = mkLocalAssignment("A", mkExecutorInfoList(1, 2, 3, 4, 5), mkWorkerResources(100.0, 200.0, 100.0)); LocalAssignment b = mkLocalAssignment("B", mkExecutorInfoList(1, 2, 3, 4, 5, 6), mkWorkerResources(100.0, 100.0, 100.0)); LocalAssignment bReordered = mkLocalAssignment("B", mkExecutorInfoList(6, 5, 4, 3, 2, 1), mkWorkerResources(100.0, 100.0, 100.0)); LocalAssignment c = mkLocalAssignment("C", mkExecutorInfoList(188, 261),mkWorkerResources(400.0,10000.0,0.0)); WorkerResources workerResources = mkWorkerResources(400.0, 10000.0, 0.0); Map<String, Double> additionalResources = workerResources.get_resources(); if( additionalResources == null) additionalResources = new HashMap<>(); additionalResources.put("network.resource.units", 0.0); workerResources.set_resources(additionalResources); LocalAssignment cReordered = mkLocalAssignment("C", mkExecutorInfoList(188, 261), workerResources); assertTrue(EquivalenceUtils.areLocalAssignmentsEquivalent(c,cReordered)); assertTrue(EquivalenceUtils.areLocalAssignmentsEquivalent(null, null)); assertTrue(EquivalenceUtils.areLocalAssignmentsEquivalent(a, a)); assertTrue(EquivalenceUtils.areLocalAssignmentsEquivalent(b, bReordered)); assertTrue(EquivalenceUtils.areLocalAssignmentsEquivalent(bReordered, b)); assertFalse(EquivalenceUtils.areLocalAssignmentsEquivalent(a, aResized)); assertFalse(EquivalenceUtils.areLocalAssignmentsEquivalent(aResized, a)); assertFalse(EquivalenceUtils.areLocalAssignmentsEquivalent(a, null)); assertFalse(EquivalenceUtils.areLocalAssignmentsEquivalent(null, b)); assertFalse(EquivalenceUtils.areLocalAssignmentsEquivalent(a, b)); }
public Account changeNumber(final Account account, final String targetNumber, @Nullable final IdentityKey pniIdentityKey, @Nullable final Map<Byte, ECSignedPreKey> pniSignedPreKeys, @Nullable final Map<Byte, KEMSignedPreKey> pniPqLastResortPreKeys, @Nullable final Map<Byte, Integer> pniRegistrationIds) throws InterruptedException, MismatchedDevicesException { final String originalNumber = account.getNumber(); final UUID originalPhoneNumberIdentifier = account.getPhoneNumberIdentifier(); if (originalNumber.equals(targetNumber)) { if (pniIdentityKey != null) { throw new IllegalArgumentException("change number must supply a changed phone number; otherwise use updatePniKeys"); } return account; } validateDevices(account, pniSignedPreKeys, pniPqLastResortPreKeys, pniRegistrationIds); final AtomicReference<Account> updatedAccount = new AtomicReference<>(); accountLockManager.withLock(List.of(account.getNumber(), targetNumber), () -> { redisDelete(account); // There are three possible states for accounts associated with the target phone number: // // 1. An account exists with the target number; the caller has proved ownership of the number, so delete the // account with the target number. This will leave a "deleted account" record for the deleted account mapping // the UUID of the deleted account to the target phone number. We'll then overwrite that so it points to the // original number to facilitate switching back and forth between numbers. // 2. No account with the target number exists, but one has recently been deleted. In that case, add a "deleted // account" record that maps the ACI of the recently-deleted account to the now-abandoned original phone number // of the account changing its number (which facilitates ACI consistency in cases that a party is switching // back and forth between numbers). // 3. No account with the target number exists at all, in which case no additional action is needed. final Optional<UUID> recentlyDeletedAci = accounts.findRecentlyDeletedAccountIdentifier(targetNumber); final Optional<Account> maybeExistingAccount = getByE164(targetNumber); final Optional<UUID> maybeDisplacedUuid; if (maybeExistingAccount.isPresent()) { delete(maybeExistingAccount.get()).join(); maybeDisplacedUuid = maybeExistingAccount.map(Account::getUuid); } else { maybeDisplacedUuid = recentlyDeletedAci; } final UUID uuid = account.getUuid(); final UUID phoneNumberIdentifier = phoneNumberIdentifiers.getPhoneNumberIdentifier(targetNumber); CompletableFuture.allOf( keysManager.deleteSingleUsePreKeys(phoneNumberIdentifier), keysManager.deleteSingleUsePreKeys(originalPhoneNumberIdentifier)) .join(); final Collection<TransactWriteItem> keyWriteItems = buildPniKeyWriteItems(uuid, phoneNumberIdentifier, pniSignedPreKeys, pniPqLastResortPreKeys); final Account numberChangedAccount = updateWithRetries( account, a -> { setPniKeys(account, pniIdentityKey, pniRegistrationIds); return true; }, a -> accounts.changeNumber(a, targetNumber, phoneNumberIdentifier, maybeDisplacedUuid, keyWriteItems), () -> accounts.getByAccountIdentifier(uuid).orElseThrow(), AccountChangeValidator.NUMBER_CHANGE_VALIDATOR); updatedAccount.set(numberChangedAccount); }, accountLockExecutor); return updatedAccount.get(); }
@Test void testChangePhoneNumber() throws InterruptedException, MismatchedDevicesException { final String originalNumber = "+14152222222"; final String targetNumber = "+14153333333"; final UUID uuid = UUID.randomUUID(); final UUID originalPni = UUID.randomUUID(); Account account = AccountsHelper.generateTestAccount(originalNumber, uuid, originalPni, new ArrayList<>(), new byte[UnidentifiedAccessUtil.UNIDENTIFIED_ACCESS_KEY_LENGTH]); account = accountsManager.changeNumber(account, targetNumber, null, null, null, null); assertEquals(targetNumber, account.getNumber()); assertTrue(phoneNumberIdentifiersByE164.containsKey(targetNumber)); verify(keysManager).deleteSingleUsePreKeys(originalPni); verify(keysManager).deleteSingleUsePreKeys(phoneNumberIdentifiersByE164.get(targetNumber)); }
@SuppressWarnings("MethodLength") static void dissectControlRequest( final ArchiveEventCode eventCode, final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder); HEADER_DECODER.wrap(buffer, offset + encodedLength); encodedLength += MessageHeaderDecoder.ENCODED_LENGTH; switch (eventCode) { case CMD_IN_CONNECT: CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendConnect(builder); break; case CMD_IN_CLOSE_SESSION: CLOSE_SESSION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendCloseSession(builder); break; case CMD_IN_START_RECORDING: START_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording(builder); break; case CMD_IN_STOP_RECORDING: STOP_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecording(builder); break; case CMD_IN_REPLAY: REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplay(builder); break; case CMD_IN_STOP_REPLAY: STOP_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplay(builder); break; case CMD_IN_LIST_RECORDINGS: LIST_RECORDINGS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordings(builder); break; case CMD_IN_LIST_RECORDINGS_FOR_URI: LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingsForUri(builder); break; case CMD_IN_LIST_RECORDING: LIST_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecording(builder); break; case CMD_IN_EXTEND_RECORDING: EXTEND_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording(builder); break; case CMD_IN_RECORDING_POSITION: RECORDING_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendRecordingPosition(builder); break; case CMD_IN_TRUNCATE_RECORDING: TRUNCATE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTruncateRecording(builder); break; case CMD_IN_STOP_RECORDING_SUBSCRIPTION: STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingSubscription(builder); break; case CMD_IN_STOP_POSITION: STOP_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopPosition(builder); break; case CMD_IN_FIND_LAST_MATCHING_RECORD: FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendFindLastMatchingRecord(builder); break; case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS: LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingSubscriptions(builder); break; case CMD_IN_START_BOUNDED_REPLAY: BOUNDED_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartBoundedReplay(builder); break; case CMD_IN_STOP_ALL_REPLAYS: STOP_ALL_REPLAYS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopAllReplays(builder); break; case CMD_IN_REPLICATE: REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate(builder); break; case CMD_IN_STOP_REPLICATION: STOP_REPLICATION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplication(builder); break; case CMD_IN_START_POSITION: START_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartPosition(builder); break; case CMD_IN_DETACH_SEGMENTS: DETACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDetachSegments(builder); break; case CMD_IN_DELETE_DETACHED_SEGMENTS: DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDeleteDetachedSegments(builder); break; case CMD_IN_PURGE_SEGMENTS: PURGE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeSegments(builder); break; case CMD_IN_ATTACH_SEGMENTS: ATTACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAttachSegments(builder); break; case CMD_IN_MIGRATE_SEGMENTS: MIGRATE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendMigrateSegments(builder); break; case CMD_IN_AUTH_CONNECT: AUTH_CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAuthConnect(builder); break; case CMD_IN_KEEP_ALIVE: KEEP_ALIVE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendKeepAlive(builder); break; case CMD_IN_TAGGED_REPLICATE: TAGGED_REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTaggedReplicate(builder); break; case CMD_IN_START_RECORDING2: START_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording2(builder); break; case CMD_IN_EXTEND_RECORDING2: EXTEND_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording2(builder); break; case CMD_IN_STOP_RECORDING_BY_IDENTITY: STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingByIdentity(builder); break; case CMD_IN_PURGE_RECORDING: PURGE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeRecording(builder); break; case CMD_IN_REPLICATE2: REPLICATE_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate2(builder); break; case CMD_IN_REQUEST_REPLAY_TOKEN: REPLAY_TOKEN_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplayToken(builder); break; default: builder.append(": unknown command"); } }
@Test void controlRequestStopReplay() { internalEncodeLogHeader(buffer, 0, 90, 90, () -> 1_125_000_000L); final StopReplayRequestEncoder requestEncoder = new StopReplayRequestEncoder(); requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder) .controlSessionId(5) .correlationId(42) .replaySessionId(66); dissectControlRequest(CMD_IN_STOP_REPLAY, buffer, 0, builder); assertEquals("[1.125000000] " + CONTEXT + ": " + CMD_IN_STOP_REPLAY.name() + " [90/90]:" + " controlSessionId=5" + " correlationId=42" + " replaySessionId=66", builder.toString()); }
public String getStatus() { return status; }
@Test public void testGetStatus() { // Test the getStatus method assertEquals("committed", event.getStatus()); }
@Override public AbstractWALEvent decode(final ByteBuffer data, final BaseLogSequenceNumber logSequenceNumber) { AbstractWALEvent result; byte[] bytes = new byte[data.remaining()]; data.get(bytes); String dataText = new String(bytes, StandardCharsets.UTF_8); if (decodeWithTX) { result = decodeDataWithTX(dataText); } else { result = decodeDataIgnoreTX(dataText); } result.setLogSequenceNumber(logSequenceNumber); return result; }
@Test void assertDecodeWriteRowEventWithMoney() { MppTableData tableData = new MppTableData(); tableData.setTableName("public.test"); tableData.setOpType("INSERT"); tableData.setColumnsName(new String[]{"data"}); tableData.setColumnsType(new String[]{"money"}); tableData.setColumnsVal(new String[]{"'$1.08'"}); ByteBuffer data = ByteBuffer.wrap(JsonUtils.toJsonString(tableData).getBytes()); WriteRowEvent actual = (WriteRowEvent) new MppdbDecodingPlugin(null, false, false).decode(data, logSequenceNumber); assertThat(actual.getLogSequenceNumber(), is(logSequenceNumber)); assertThat(actual.getTableName(), is("test")); Object byteaObj = actual.getAfterRow().get(0); assertThat(byteaObj, is("1.08")); }
@Override public String resolve(Method method, Object[] arguments, String spelExpression) { if (StringUtils.isEmpty(spelExpression)) { return spelExpression; } if (spelExpression.matches(PLACEHOLDER_SPEL_REGEX) && stringValueResolver != null) { return stringValueResolver.resolveStringValue(spelExpression); } if (spelExpression.matches(METHOD_SPEL_REGEX)) { SpelRootObject rootObject = new SpelRootObject(method, arguments); MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer); Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext); return (String) evaluated; } if (spelExpression.matches(BEAN_SPEL_REGEX)) { SpelRootObject rootObject = new SpelRootObject(method, arguments); MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer); evaluationContext.setBeanResolver(new BeanFactoryResolver(this.beanFactory)); Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext); return (String) evaluated; } return spelExpression; }
@Test public void testA0() throws Exception { String testExpression = "#a0"; String firstArgument = "test"; DefaultSpelResolverTest target = new DefaultSpelResolverTest(); Method testMethod = target.getClass().getMethod("testMethod", String.class); String result = sut.resolve(testMethod, new Object[]{firstArgument}, testExpression); assertThat(result).isEqualTo(firstArgument); }
@Udf public <T> List<String> mapKeys(final Map<String, T> input) { if (input == null) { return null; } return Lists.newArrayList(input.keySet()); }
@Test public void shouldHandleComplexValueTypes() { final Map<String, Map<String, List<Double>>> input = Maps.newHashMap(); final Map<String, List<Double>> entry1 = Maps.newHashMap(); entry1.put("apple", Arrays.asList(Double.valueOf(12.34), Double.valueOf(56.78))); entry1.put("banana", Arrays.asList(Double.valueOf(43.21), Double.valueOf(87.65))); input.put("foo", entry1); final Map<String, List<Double>> entry2 = Maps.newHashMap(); entry2.put("cherry", Arrays.asList(Double.valueOf(12.34), Double.valueOf(56.78))); entry2.put("date", Arrays.asList(Double.valueOf(43.21), Double.valueOf(87.65))); input.put("bar", entry2); assertThat(udf.mapKeys(input), containsInAnyOrder("foo", "bar")); }
@VisibleForTesting Map<String, DefaultColumnAction> computeDefaultColumnActionMap() { Map<String, DefaultColumnAction> defaultColumnActionMap = new HashMap<>(); // Compute ADD and UPDATE actions. for (FieldSpec fieldSpecInSchema : _schema.getAllFieldSpecs()) { if (fieldSpecInSchema.isVirtualColumn()) { continue; } String column = fieldSpecInSchema.getName(); FieldSpec.FieldType fieldTypeInSchema = fieldSpecInSchema.getFieldType(); ColumnMetadata columnMetadata = _segmentMetadata.getColumnMetadataFor(column); if (columnMetadata != null) { // Column exists in the segment, check if we need to update the value. // Only check for auto-generated column. if (!columnMetadata.isAutoGenerated()) { continue; } // Check the field type matches. FieldSpec fieldSpecInMetadata = columnMetadata.getFieldSpec(); FieldSpec.FieldType fieldTypeInMetadata = fieldSpecInMetadata.getFieldType(); if (fieldTypeInMetadata != fieldTypeInSchema) { String failureMessage = "Field type: " + fieldTypeInMetadata + " for auto-generated column: " + column + " does not match field type: " + fieldTypeInSchema + " in schema, throw exception to drop and re-download the segment."; throw new RuntimeException(failureMessage); } // Check the data type and default value matches. DataType dataTypeInMetadata = fieldSpecInMetadata.getDataType(); DataType dataTypeInSchema = fieldSpecInSchema.getDataType(); boolean isSingleValueInMetadata = fieldSpecInMetadata.isSingleValueField(); boolean isSingleValueInSchema = fieldSpecInSchema.isSingleValueField(); String defaultValueInMetadata = fieldSpecInMetadata.getDefaultNullValueString(); String defaultValueInSchema = fieldSpecInSchema.getDefaultNullValueString(); if (fieldTypeInMetadata == DIMENSION) { if (dataTypeInMetadata != dataTypeInSchema) { defaultColumnActionMap.put(column, DefaultColumnAction.UPDATE_DIMENSION_DATA_TYPE); } else if (!defaultValueInSchema.equals(defaultValueInMetadata)) { defaultColumnActionMap.put(column, DefaultColumnAction.UPDATE_DIMENSION_DEFAULT_VALUE); } else if (isSingleValueInMetadata != isSingleValueInSchema) { defaultColumnActionMap.put(column, DefaultColumnAction.UPDATE_DIMENSION_NUMBER_OF_VALUES); } } else if (fieldTypeInMetadata == METRIC) { if (dataTypeInMetadata != dataTypeInSchema) { defaultColumnActionMap.put(column, DefaultColumnAction.UPDATE_METRIC_DATA_TYPE); } else if (!defaultValueInSchema.equals(defaultValueInMetadata)) { defaultColumnActionMap.put(column, DefaultColumnAction.UPDATE_METRIC_DEFAULT_VALUE); } else if (isSingleValueInMetadata != isSingleValueInSchema) { defaultColumnActionMap.put(column, DefaultColumnAction.UPDATE_METRIC_NUMBER_OF_VALUES); } } else if (fieldTypeInMetadata == DATE_TIME) { if (dataTypeInMetadata != dataTypeInSchema) { defaultColumnActionMap.put(column, DefaultColumnAction.UPDATE_DATE_TIME_DATA_TYPE); } else if (!defaultValueInSchema.equals(defaultValueInMetadata)) { defaultColumnActionMap.put(column, DefaultColumnAction.UPDATE_DATE_TIME_DEFAULT_VALUE); } } } else { // Column does not exist in the segment, add default value for it. switch (fieldTypeInSchema) { case DIMENSION: defaultColumnActionMap.put(column, DefaultColumnAction.ADD_DIMENSION); break; case METRIC: defaultColumnActionMap.put(column, DefaultColumnAction.ADD_METRIC); break; case DATE_TIME: defaultColumnActionMap.put(column, DefaultColumnAction.ADD_DATE_TIME); break; default: LOGGER.warn("Skip adding default column for column: {} with field type: {}", column, fieldTypeInSchema); break; } } } // Compute REMOVE actions. for (ColumnMetadata columnMetadata : _segmentMetadata.getColumnMetadataMap().values()) { String column = columnMetadata.getColumnName(); // Only remove auto-generated columns if (!_schema.hasColumn(column) && columnMetadata.isAutoGenerated()) { FieldSpec.FieldType fieldTypeInMetadata = columnMetadata.getFieldSpec().getFieldType(); if (fieldTypeInMetadata == DIMENSION) { defaultColumnActionMap.put(column, DefaultColumnAction.REMOVE_DIMENSION); } else if (fieldTypeInMetadata == METRIC) { defaultColumnActionMap.put(column, DefaultColumnAction.REMOVE_METRIC); } else if (fieldTypeInMetadata == DATE_TIME) { defaultColumnActionMap.put(column, DefaultColumnAction.REMOVE_DATE_TIME); } } } return defaultColumnActionMap; }
@Test public void testComputeDefaultColumnActionMapForCommittedSegment() { // Dummy IndexLoadingConfig IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(); // Same schema Schema schema0 = new Schema.SchemaBuilder().setSchemaName("testTable").addSingleValueDimension("column1", FieldSpec.DataType.INT) .addSingleValueDimension("column2", FieldSpec.DataType.INT) .addSingleValueDimension("column3", FieldSpec.DataType.STRING) .addSingleValueDimension("column4", FieldSpec.DataType.STRING) .addSingleValueDimension("column5", FieldSpec.DataType.STRING) .addSingleValueDimension("column6", FieldSpec.DataType.INT) .addSingleValueDimension("column7", FieldSpec.DataType.INT) .addSingleValueDimension("column8", FieldSpec.DataType.INT) .addSingleValueDimension("column9", FieldSpec.DataType.INT) .addSingleValueDimension("column10", FieldSpec.DataType.INT) .addSingleValueDimension("column13", FieldSpec.DataType.INT) .addSingleValueDimension("count", FieldSpec.DataType.INT) .addSingleValueDimension("daysSinceEpoch", FieldSpec.DataType.INT) .addSingleValueDimension("weeksSinceEpochSunday", FieldSpec.DataType.INT).build(); BaseDefaultColumnHandler defaultColumnHandler = new V3DefaultColumnHandler(_segmentDirectory, _committedSegmentMetadata, indexLoadingConfig, schema0, _writer); Assert.assertEquals(defaultColumnHandler.computeDefaultColumnActionMap(), Collections.EMPTY_MAP); // Add single-value dimension in the schema Schema schema1 = new Schema.SchemaBuilder().setSchemaName("testTable").addSingleValueDimension("column1", FieldSpec.DataType.INT) .addSingleValueDimension("column2", FieldSpec.DataType.INT) .addSingleValueDimension("column3", FieldSpec.DataType.STRING) .addSingleValueDimension("column4", FieldSpec.DataType.STRING) .addSingleValueDimension("column5", FieldSpec.DataType.STRING) .addSingleValueDimension("column6", FieldSpec.DataType.INT) .addSingleValueDimension("column7", FieldSpec.DataType.INT) .addSingleValueDimension("column8", FieldSpec.DataType.INT) .addSingleValueDimension("column9", FieldSpec.DataType.INT) .addSingleValueDimension("column10", FieldSpec.DataType.INT) .addSingleValueDimension("column11", FieldSpec.DataType.INT) // add column11 .addSingleValueDimension("column13", FieldSpec.DataType.INT) .addSingleValueDimension("count", FieldSpec.DataType.INT) .addSingleValueDimension("daysSinceEpoch", FieldSpec.DataType.INT) .addSingleValueDimension("weeksSinceEpochSunday", FieldSpec.DataType.INT).build(); defaultColumnHandler = new V3DefaultColumnHandler(_segmentDirectory, _committedSegmentMetadata, indexLoadingConfig, schema1, _writer); Assert.assertEquals(defaultColumnHandler.computeDefaultColumnActionMap(), ImmutableMap.of("column11", BaseDefaultColumnHandler.DefaultColumnAction.ADD_DIMENSION)); // Add multi-value dimension in the schema Schema schema2 = new Schema.SchemaBuilder().setSchemaName("testTable").addSingleValueDimension("column1", FieldSpec.DataType.INT) .addSingleValueDimension("column2", FieldSpec.DataType.INT) .addSingleValueDimension("column3", FieldSpec.DataType.STRING) .addSingleValueDimension("column4", FieldSpec.DataType.STRING) .addSingleValueDimension("column5", FieldSpec.DataType.STRING) .addSingleValueDimension("column6", FieldSpec.DataType.INT) .addSingleValueDimension("column7", FieldSpec.DataType.INT) .addSingleValueDimension("column8", FieldSpec.DataType.INT) .addSingleValueDimension("column9", FieldSpec.DataType.INT) .addSingleValueDimension("column10", FieldSpec.DataType.INT) .addMultiValueDimension("column11", FieldSpec.DataType.INT) // add column11 .addSingleValueDimension("column13", FieldSpec.DataType.INT) .addSingleValueDimension("count", FieldSpec.DataType.INT) .addSingleValueDimension("daysSinceEpoch", FieldSpec.DataType.INT) .addSingleValueDimension("weeksSinceEpochSunday", FieldSpec.DataType.INT).build(); defaultColumnHandler = new V3DefaultColumnHandler(_segmentDirectory, _committedSegmentMetadata, indexLoadingConfig, schema2, _writer); Assert.assertEquals(defaultColumnHandler.computeDefaultColumnActionMap(), ImmutableMap.of("column11", BaseDefaultColumnHandler.DefaultColumnAction.ADD_DIMENSION)); // Add metric in the schema Schema schema3 = new Schema.SchemaBuilder().setSchemaName("testTable").addSingleValueDimension("column1", FieldSpec.DataType.INT) .addSingleValueDimension("column2", FieldSpec.DataType.INT) .addSingleValueDimension("column3", FieldSpec.DataType.STRING) .addSingleValueDimension("column4", FieldSpec.DataType.STRING) .addSingleValueDimension("column5", FieldSpec.DataType.STRING) .addSingleValueDimension("column6", FieldSpec.DataType.INT) .addSingleValueDimension("column7", FieldSpec.DataType.INT) .addSingleValueDimension("column8", FieldSpec.DataType.INT) .addSingleValueDimension("column9", FieldSpec.DataType.INT) .addSingleValueDimension("column10", FieldSpec.DataType.INT) .addSingleValueDimension("column13", FieldSpec.DataType.INT) .addSingleValueDimension("count", FieldSpec.DataType.INT) .addSingleValueDimension("daysSinceEpoch", FieldSpec.DataType.INT) .addSingleValueDimension("weeksSinceEpochSunday", FieldSpec.DataType.INT) .addMetric("column11", FieldSpec.DataType.INT).build(); // add column11 defaultColumnHandler = new V3DefaultColumnHandler(_segmentDirectory, _committedSegmentMetadata, indexLoadingConfig, schema3, _writer); Assert.assertEquals(defaultColumnHandler.computeDefaultColumnActionMap(), ImmutableMap.of("column11", BaseDefaultColumnHandler.DefaultColumnAction.ADD_METRIC)); // Add metric in the schema Schema schema4 = new Schema.SchemaBuilder().setSchemaName("testTable").addSingleValueDimension("column1", FieldSpec.DataType.INT) .addSingleValueDimension("column2", FieldSpec.DataType.INT) .addSingleValueDimension("column3", FieldSpec.DataType.STRING) .addSingleValueDimension("column4", FieldSpec.DataType.STRING) .addSingleValueDimension("column5", FieldSpec.DataType.STRING) .addSingleValueDimension("column6", FieldSpec.DataType.INT) .addSingleValueDimension("column7", FieldSpec.DataType.INT) .addSingleValueDimension("column8", FieldSpec.DataType.INT) .addSingleValueDimension("column9", FieldSpec.DataType.INT) .addSingleValueDimension("column10", FieldSpec.DataType.INT) .addSingleValueDimension("column13", FieldSpec.DataType.INT) .addSingleValueDimension("count", FieldSpec.DataType.INT) .addSingleValueDimension("daysSinceEpoch", FieldSpec.DataType.INT) .addSingleValueDimension("weeksSinceEpochSunday", FieldSpec.DataType.INT) .addDateTime("column11", FieldSpec.DataType.INT, "1:HOURS:EPOCH", "1:HOURS").build(); // add column11 defaultColumnHandler = new V3DefaultColumnHandler(_segmentDirectory, _committedSegmentMetadata, indexLoadingConfig, schema4, _writer); Assert.assertEquals(defaultColumnHandler.computeDefaultColumnActionMap(), ImmutableMap.of("column11", BaseDefaultColumnHandler.DefaultColumnAction.ADD_DATE_TIME)); // Do not remove non-autogenerated column in the segmentMetadata Schema schema5 = new Schema.SchemaBuilder().setSchemaName("testTable").addSingleValueDimension("column1", FieldSpec.DataType.INT) .addSingleValueDimension("column3", FieldSpec.DataType.STRING) // remove column2 .addSingleValueDimension("column4", FieldSpec.DataType.STRING) .addSingleValueDimension("column5", FieldSpec.DataType.STRING) .addSingleValueDimension("column6", FieldSpec.DataType.INT) .addSingleValueDimension("column7", FieldSpec.DataType.INT) .addSingleValueDimension("column8", FieldSpec.DataType.INT) .addSingleValueDimension("column9", FieldSpec.DataType.INT) .addSingleValueDimension("column10", FieldSpec.DataType.INT) .addSingleValueDimension("column13", FieldSpec.DataType.INT) .addSingleValueDimension("count", FieldSpec.DataType.INT) .addSingleValueDimension("daysSinceEpoch", FieldSpec.DataType.INT) .addSingleValueDimension("weeksSinceEpochSunday", FieldSpec.DataType.INT).build(); defaultColumnHandler = new V3DefaultColumnHandler(_segmentDirectory, _committedSegmentMetadata, indexLoadingConfig, schema5, _writer); Assert.assertEquals(defaultColumnHandler.computeDefaultColumnActionMap(), Collections.EMPTY_MAP); // Do not update non-autogenerated column in the schema Schema schema6 = new Schema.SchemaBuilder().setSchemaName("testTable").addSingleValueDimension("column1", FieldSpec.DataType.INT) .addSingleValueDimension("column2", FieldSpec.DataType.STRING) // update datatype .addSingleValueDimension("column3", FieldSpec.DataType.STRING) .addSingleValueDimension("column4", FieldSpec.DataType.STRING) .addSingleValueDimension("column5", FieldSpec.DataType.STRING) .addSingleValueDimension("column6", FieldSpec.DataType.INT) .addSingleValueDimension("column7", FieldSpec.DataType.INT) .addSingleValueDimension("column8", FieldSpec.DataType.INT) .addSingleValueDimension("column9", FieldSpec.DataType.INT) .addSingleValueDimension("column10", FieldSpec.DataType.INT) .addSingleValueDimension("column13", FieldSpec.DataType.INT) .addSingleValueDimension("count", FieldSpec.DataType.INT) .addSingleValueDimension("daysSinceEpoch", FieldSpec.DataType.INT) .addSingleValueDimension("weeksSinceEpochSunday", FieldSpec.DataType.INT).build(); defaultColumnHandler = new V3DefaultColumnHandler(_segmentDirectory, _committedSegmentMetadata, indexLoadingConfig, schema6, _writer); Assert.assertEquals(defaultColumnHandler.computeDefaultColumnActionMap(), Collections.EMPTY_MAP); }
@Override public int compare(String version1, String version2) { if(ObjectUtil.equal(version1, version2)) { return 0; } if (version1 == null && version2 == null) { return 0; } else if (version1 == null) {// null或""视为最小版本,排在前 return -1; } else if (version2 == null) { return 1; } return CompareUtil.compare(Version.of(version1), Version.of(version2)); }
@Test public void versionComparatorTest3() { int compare = VersionComparator.INSTANCE.compare(null, "1.12.1c"); assertTrue(compare < 0); // 自反测试 compare = VersionComparator.INSTANCE.compare("1.12.1c", null); assertTrue(compare > 0); }
@Override public void queueSynchronisationTask() { findManagedInstanceService() .ifPresent(ManagedInstanceService::queueSynchronisationTask); }
@Test public void queueSynchronisationTask_whenManagedInstanceServices_shouldDelegatesToRightService() { NeverManagedInstanceService neverManagedInstanceService = spy(new NeverManagedInstanceService()); AlwaysManagedInstanceService alwaysManagedInstanceService = spy(new AlwaysManagedInstanceService()); Set<ManagedInstanceService> delegates = Set.of(neverManagedInstanceService, alwaysManagedInstanceService); DelegatingManagedServices managedInstanceService = new DelegatingManagedServices(delegates); managedInstanceService.queueSynchronisationTask(); verify(neverManagedInstanceService, never()).queueSynchronisationTask(); verify(alwaysManagedInstanceService).queueSynchronisationTask(); }
static SearchArgument convert(Expression expr, TypeDescription readSchema) { Map<Integer, String> idToColumnName = ORCSchemaUtil.idToOrcName(ORCSchemaUtil.convert(readSchema)); SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); ExpressionVisitors.visit(expr, new ExpressionToSearchArgument(builder, idToColumnName)) .invoke(); return builder.build(); }
@Test public void testEvolvedSchema() { Schema fileSchema = new Schema( required(1, "int", Types.IntegerType.get()), optional(2, "long_to_be_dropped", Types.LongType.get())); Schema evolvedSchema = new Schema( required(1, "int_renamed", Types.IntegerType.get()), optional(3, "float_added", Types.FloatType.get())); TypeDescription readSchema = ORCSchemaUtil.buildOrcProjection(evolvedSchema, ORCSchemaUtil.convert(fileSchema)); Expression expr = equal("int_renamed", 1); Expression boundFilter = Binder.bind(evolvedSchema.asStruct(), expr, true); SearchArgument expected = SearchArgumentFactory.newBuilder().equals("`int`", Type.LONG, 1L).build(); SearchArgument actual = ExpressionToSearchArgument.convert(boundFilter, readSchema); assertThat(actual.toString()).isEqualTo(expected.toString()); // for columns not in the file, buildOrcProjection will append field names with _r<ID> // this will be passed down to ORC, but ORC will handle such cases and return a TruthValue // during evaluation expr = equal("float_added", 1); boundFilter = Binder.bind(evolvedSchema.asStruct(), expr, true); expected = SearchArgumentFactory.newBuilder().equals("`float_added_r3`", Type.FLOAT, 1.0).build(); actual = ExpressionToSearchArgument.convert(boundFilter, readSchema); assertThat(actual.toString()).isEqualTo(expected.toString()); }
@GET @Produces(MediaType.TEXT_HTML) public Response auth( @QueryParam("scope") String scope, @QueryParam("state") String state, @QueryParam("response_type") String responseType, @QueryParam("client_id") String clientId, @QueryParam("redirect_uri") String redirectUri, @QueryParam("nonce") String nonce, @HeaderParam("Accept-Language") @DefaultValue("de-DE") String acceptLanguage) { var uri = mustParse(redirectUri); var res = authService.auth( new AuthorizationRequest(scope, state, responseType, clientId, uri, nonce)); var locale = getNegotiatedLocale(acceptLanguage); var form = pages.selectIdpForm(res.identityProviders(), locale); return Response.ok(form, MediaType.TEXT_HTML_TYPE) .cookie(createSessionCookie(res.sessionId())) .build(); }
@Test void auth_success() { var sessionId = IdGenerator.generateID(); var authService = mock(AuthService.class); when(authService.auth(any())).thenReturn(new AuthorizationResponse(List.of(), sessionId)); var sut = new AuthEndpoint(authService); var scope = "openid"; var state = UUID.randomUUID().toString(); var nonce = UUID.randomUUID().toString(); var responseType = "code"; var clientId = "myapp"; var language = "de-DE"; // when try (var res = sut.auth(scope, state, responseType, clientId, REDIRECT_URI, nonce, language)) { // then assertEquals(Status.OK.getStatusCode(), res.getStatus()); var sessionCookie = res.getCookies().get("session_id"); assertEquals(sessionId, sessionCookie.getValue()); } }
@Override public String getDataSource() { return DataSourceConstant.MYSQL; }
@Test void testGetDataSource() { String dataSource = configInfoAggrMapperByMySql.getDataSource(); assertEquals(DataSourceConstant.MYSQL, dataSource); }
@VisibleForTesting static long calculateFilesSizeWithOpenCost( List<HiveTablePartition> partitions, JobConf jobConf, long openCost) throws IOException { long totalBytesWithWeight = 0; int calPartitionSizeThreadNum = Integer.parseInt( jobConf.get( HiveOptions.TABLE_EXEC_HIVE_CALCULATE_PARTITION_SIZE_THREAD_NUM .key())); ExecutorService executorService = null; try { executorService = calPartitionSizeThreadNum == 1 ? newDirectExecutorService() : Executors.newFixedThreadPool(calPartitionSizeThreadNum); List<Future<Long>> partitionFilesSizeFutures = new ArrayList<>(); for (HiveTablePartition partition : partitions) { partitionFilesSizeFutures.add( executorService.submit( new PartitionFilesSizeCalculator(partition, openCost, jobConf))); } for (Future<Long> fileSizeFuture : partitionFilesSizeFutures) { try { totalBytesWithWeight += fileSizeFuture.get(); } catch (InterruptedException | ExecutionException e) { throw new IOException("Fail to calculate total files' size.", e); } } } finally { if (executorService != null) { executorService.shutdown(); } } return totalBytesWithWeight; }
@Test public void testCalculateFilesSize() throws Exception { String baseFilePath = Objects.requireNonNull(this.getClass().getResource("/orc/test.orc")).getPath(); long fileSize = Paths.get(baseFilePath).toFile().length(); File wareHouse = temporaryFolder.newFolder("testCalculateFilesSize"); int partitionNum = 10; long openCost = 1; List<HiveTablePartition> hiveTablePartitions = new ArrayList<>(); for (int i = 0; i < partitionNum; i++) { // create partition directory Path partitionPath = Paths.get(wareHouse.getPath(), "p_" + i); Files.createDirectory(partitionPath); // copy file to the partition directory Files.copy(Paths.get(baseFilePath), Paths.get(partitionPath.toString(), "t.orc")); StorageDescriptor sd = new StorageDescriptor(); sd.setLocation(partitionPath.toString()); hiveTablePartitions.add(new HiveTablePartition(sd, new Properties())); } // test calculation with one single thread JobConf jobConf = new JobConf(); jobConf.set(HiveOptions.TABLE_EXEC_HIVE_CALCULATE_PARTITION_SIZE_THREAD_NUM.key(), "1"); long totalSize = HiveSourceFileEnumerator.calculateFilesSizeWithOpenCost( hiveTablePartitions, jobConf, openCost); long expectedSize = partitionNum * (fileSize + openCost); assertThat(totalSize).isEqualTo(expectedSize); // test calculation with multiple threads jobConf.set(HiveOptions.TABLE_EXEC_HIVE_CALCULATE_PARTITION_SIZE_THREAD_NUM.key(), "3"); totalSize = HiveSourceFileEnumerator.calculateFilesSizeWithOpenCost( hiveTablePartitions, jobConf, openCost); assertThat(totalSize).isEqualTo(expectedSize); }
private MIM() {}
@Test public void mimTest() { LabelFactory lblFactory = new LabelFactory(); MutableDataset<Label> dataset = new MutableDataset<>(new SimpleDataSourceProvenance("Test",lblFactory),lblFactory); String[] featureNames = new String[]{"A","B","C","D","E"}; Label one = new Label("ONE"); Label two = new Label("TWO"); Example<Label> ex = new ArrayExample<>(one,featureNames,new double[]{0,0,0,0,0}); dataset.add(ex); ex = new ArrayExample<>(one,featureNames,new double[]{1,0,1,0,0}); dataset.add(ex); ex = new ArrayExample<>(one,featureNames,new double[]{0,0,0,1,0}); dataset.add(ex); ex = new ArrayExample<>(one,featureNames,new double[]{1,1,0,0,0}); dataset.add(ex); ex = new ArrayExample<>(two,featureNames,new double[]{0,0,1,0,1}); dataset.add(ex); ex = new ArrayExample<>(two,featureNames,new double[]{1,1,1,0,1}); dataset.add(ex); ex = new ArrayExample<>(two,featureNames,new double[]{0,1,1,1,1}); dataset.add(ex); ex = new ArrayExample<>(two,featureNames,new double[]{1,1,0,1,1}); dataset.add(ex); MIM mim = new MIM(2); SelectedFeatureSet sfs = mim.select(dataset); List<String> names = sfs.featureNames(); List<Double> scores = sfs.featureScores(); assertTrue(sfs.isOrdered()); assertEquals(5,names.size()); assertEquals(5,scores.size()); assertEquals(Arrays.asList("E","B","C","D","A"),sfs.featureNames()); assertEquals(Arrays.asList(1.0, 0.1887218755408671, 0.1887218755408671, 0.0487949406953985, 0.0),sfs.featureScores()); }
@Override protected SchemaTransform from(Configuration configuration) { return new JavaMapToFieldsTransform(configuration); }
@Test @Category(NeedsRunner.class) public void testAppendAndDropFields() { Schema inputSchema = Schema.of( Schema.Field.of("a", Schema.FieldType.INT32), Schema.Field.of("b", Schema.FieldType.DOUBLE)); PCollection<Row> input = pipeline .apply( Create.of( Row.withSchema(inputSchema).addValues(2, 0.5).build(), Row.withSchema(inputSchema).addValues(4, 0.25).build())) .setRowSchema(inputSchema); PCollection<Row> renamed = PCollectionRowTuple.of(JavaMapToFieldsTransformProvider.INPUT_ROWS_TAG, input) .apply( new JavaMapToFieldsTransformProvider() .from( JavaMapToFieldsTransformProvider.Configuration.builder() .setLanguage("java") .setAppend(true) .setDrop(Collections.singletonList("b")) .setFields( ImmutableMap.of( "sum", JavaRowUdf.Configuration.builder() .setExpression("a+b") .build())) .build())) .get(JavaMapToFieldsTransformProvider.OUTPUT_ROWS_TAG); Schema outputSchema = renamed.getSchema(); PAssert.that(renamed) .containsInAnyOrder( Row.withSchema(outputSchema).withFieldValue("a", 2).withFieldValue("sum", 2.5).build(), Row.withSchema(outputSchema) .withFieldValue("a", 4) .withFieldValue("sum", 4.25) .build()); pipeline.run(); }
public String readStringNul() { return new String(readStringNulByBytes(), charset); }
@Test void assertReadStringNul() { when(byteBuf.bytesBefore((byte) 0)).thenReturn(0); assertThat(new MySQLPacketPayload(byteBuf, StandardCharsets.UTF_8).readStringNul(), is("")); verify(byteBuf).skipBytes(1); }
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) { return invoke(n, BigDecimal.ZERO); }
@Test void invokeRoundingDown() { FunctionTestUtil.assertResult(roundUpFunction.invoke(BigDecimal.valueOf(10.24)), BigDecimal.valueOf(11)); FunctionTestUtil.assertResult(roundUpFunction.invoke(BigDecimal.valueOf(10.24), BigDecimal.ONE), BigDecimal.valueOf(10.3)); }
@Udf public List<Integer> generateSeriesInt( @UdfParameter(description = "The beginning of the series") final int start, @UdfParameter(description = "Marks the end of the series (inclusive)") final int end ) { return generateSeriesInt(start, end, end - start > 0 ? 1 : -1); }
@Test public void shouldThrowOnStepZeroInt() { // When: final Exception e = assertThrows( KsqlFunctionException.class, () -> rangeUdf.generateSeriesInt(0, 10, 0) ); // Then: assertThat(e.getMessage(), containsString( "GENERATE_SERIES step cannot be zero")); }
@VisibleForTesting void validateParentMenu(Long parentId, Long childId) { if (parentId == null || ID_ROOT.equals(parentId)) { return; } // 不能设置自己为父菜单 if (parentId.equals(childId)) { throw exception(MENU_PARENT_ERROR); } MenuDO menu = menuMapper.selectById(parentId); // 父菜单不存在 if (menu == null) { throw exception(MENU_PARENT_NOT_EXISTS); } // 父菜单必须是目录或者菜单类型 if (!MenuTypeEnum.DIR.getType().equals(menu.getType()) && !MenuTypeEnum.MENU.getType().equals(menu.getType())) { throw exception(MENU_PARENT_NOT_DIR_OR_MENU); } }
@Test public void testValidateParentMenu_canNotSetSelfToBeParent() { // 调用,并断言异常 assertServiceException(() -> menuService.validateParentMenu(1L, 1L), MENU_PARENT_ERROR); }
@GET @Path("/info") @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) public GpgInfo getGPGInfo() { return new GpgInfo(this.gpgGenerator.getGPGContext()); }
@Test public void testGetGPGInfo() throws JSONException, Exception { WebResource r = resource(); JSONObject json = r.path("ws").path("v1").path("gpg").path("info") .accept(MediaType.APPLICATION_JSON).get(JSONObject.class); assertNotNull(json); }
public void setProperties(JSONObject properties) { try { this.properties = JSONUtils.cloneJsonObject(properties); } catch (Exception e) { SALog.printStackTrace(e); } }
@Test public void setProperties() throws JSONException { SAExposureData exposureData = new SAExposureData("ExposeEvent"); JSONObject jsonObject = new JSONObject(); jsonObject.put("expose", "expose"); exposureData.setProperties(jsonObject); Assert.assertEquals(exposureData.getProperties().optString("expose"), "expose"); }
@GET @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) public HistoryInfo get() { return getHistoryInfo(); }
@Test public void testInvalidUri() throws JSONException, Exception { WebResource r = resource(); String responseStr = ""; try { responseStr = r.path("ws").path("v1").path("history").path("bogus") .accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch (UniformInterfaceException ue) { ClientResponse response = ue.getResponse(); assertResponseStatusCode(Status.NOT_FOUND, response.getStatusInfo()); WebServicesTestUtils.checkStringMatch( "error string exists and shouldn't", "", responseStr); } }
@Override public void registerRemote(RemoteInstance remoteInstance) throws ServiceRegisterException { if (needUsingInternalAddr()) { remoteInstance = new RemoteInstance(new Address(config.getInternalComHost(), config.getInternalComPort(), true)); } this.selfAddress = remoteInstance.getAddress(); String host = remoteInstance.getAddress().getHost(); int port = remoteInstance.getAddress().getPort(); try { namingService.registerInstance(config.getServiceName(), host, port); healthChecker.health(); } catch (Throwable e) { healthChecker.unHealth(e); throw new ServiceRegisterException(e.getMessage()); } }
@Test public void registerRemote() throws NacosException { registerRemote(remoteAddress); }
@Override public DataNodeDto toDto() { return DataNodeDto.Builder.builder() .setObjectId(this.getObjectId().toHexString()) .setId(this.getNodeId()) .setTransportAddress(this.getTransportAddress()) .setLastSeen(this.getLastSeen()) .setHostname(this.getHostname()) .setLeader(this.isLeader()) .setClusterAddress(this.getClusterAddress()) .setDataNodeStatus(this.getDataNodeStatus()) .setRestApiAddress(this.getRestApiAddress()) .setActionQueue(this.getActionQueue()) .setCertValidUntil(this.getCertValidTill()) .setDatanodeVersion(this.getDatanodeVersion()) .build(); }
@Test void serialize() throws Exception { final ZonedDateTime lastSeen = Instant.ofEpochSecond(1).atZone(ZoneOffset.UTC); final String nodeId = "2d4cff7a-b9c4-440c-9c62-89ba1fb06211"; final String transportAddress = "http://127.0.0.1:9200"; final String clusterAddress = "http://127.0.0.1:9300"; final String restApiAddress = "http://127.0.0.1:8999"; final String hostname = "graylog.local"; final Map<String, Object> fields = Maps.newHashMap(); fields.put("last_seen", (int) lastSeen.toEpochSecond()); fields.put("node_id", nodeId); fields.put("is_leader", true); fields.put("transport_address", transportAddress); fields.put("rest_api_address", restApiAddress); fields.put("hostname", hostname); fields.put("datanode_status", DataNodeStatus.AVAILABLE); fields.put("cluster_address", clusterAddress); final String id = "61b9c2861448530c3e061283"; final DataNodeEntity node = new DataNodeEntity(new ObjectId(id), fields); final JsonNode jsonNode = mapper.readTree(mapper.writeValueAsString(node)); assertThat(ZonedDateTime.parse(jsonNode.path("last_seen").asText())).isEqualTo(lastSeen); assertThat(jsonNode.path("node_id").asText()).isEqualTo(nodeId); assertThat(jsonNode.path("is_leader").asBoolean()).isEqualTo(true); assertThat(jsonNode.path("transport_address").asText()).isEqualTo(transportAddress); assertThat(jsonNode.path("hostname").asText()).isEqualTo(hostname); assertThat(jsonNode.path("id").asText()).isEqualTo(id); assertThat(jsonNode.path("is_master").asBoolean()).isEqualTo(true); assertThat(jsonNode.path("short_node_id").asText()).isEqualTo("2d4cff7a"); assertThat(jsonNode.path("data_node_status").asText()).isEqualTo(DataNodeStatus.AVAILABLE.name()); assertThat(node.toDto()).isEqualTo(DataNodeDto.Builder.builder() .setLastSeen(new DateTime(lastSeen.toEpochSecond() * 1000, DateTimeZone.UTC)) .setId(nodeId) .setLeader(true) .setTransportAddress(transportAddress) .setHostname(hostname) .setDataNodeStatus(DataNodeStatus.AVAILABLE) .setClusterAddress(clusterAddress) .setRestApiAddress(restApiAddress) .setObjectId(id) .build()); }
public static org.apache.hadoop.mapred.JobID fromYarn(JobId id) { String identifier = fromClusterTimeStamp(id.getAppId().getClusterTimestamp()); return new org.apache.hadoop.mapred.JobID(identifier, id.getId()); }
@Test public void testEnums() throws Exception { for (YarnApplicationState applicationState : YarnApplicationState.values()) { TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED); } // ad hoc test of NEW_SAVING, which is newly added assertEquals(State.PREP, TypeConverter.fromYarn( YarnApplicationState.NEW_SAVING, FinalApplicationStatus.FAILED)); for (TaskType taskType : TaskType.values()) { TypeConverter.fromYarn(taskType); } for (JobState jobState : JobState.values()) { TypeConverter.fromYarn(jobState); } for (QueueState queueState : QueueState.values()) { TypeConverter.fromYarn(queueState); } for (TaskState taskState : TaskState.values()) { TypeConverter.fromYarn(taskState); } }
public Stopwatch stop() { if (isRunning()) stopTime = Instant.now(); return this; }
@Test public void addSubstract() { Instant i1 = Instant.now(); Instant i2 = i1.plus(stopwatch.stop()); Instant i3 = i2.minus(stopwatch); assertEquals(i1, i3); assertTrue(i2.compareTo(i1) >= 0); assertTrue(i3.compareTo(i2) <= 0); }
@Override public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context, Map<String, Long> recentlyUnloadedBundles, Map<String, Long> recentlyUnloadedBrokers) { final var conf = context.brokerConfiguration(); decisionCache.clear(); stats.clear(); Map<String, BrokerLookupData> availableBrokers; try { availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync() .get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS); } catch (ExecutionException | InterruptedException | TimeoutException e) { counter.update(Failure, Unknown); log.warn("Failed to fetch available brokers. Stop unloading.", e); return decisionCache; } try { final var loadStore = context.brokerLoadDataStore(); stats.setLoadDataStore(loadStore); boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log); var skipReason = stats.update( context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf); if (skipReason.isPresent()) { if (debugMode) { log.warn(CANNOT_CONTINUE_UNLOAD_MSG + " Skipped the load stat update. Reason:{}.", skipReason.get()); } counter.update(Skip, skipReason.get()); return decisionCache; } counter.updateLoadData(stats.avg, stats.std); if (debugMode) { log.info("brokers' load stats:{}", stats); } // skip metrics int numOfBrokersWithEmptyLoadData = 0; int numOfBrokersWithFewBundles = 0; final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd(); boolean transfer = conf.isLoadBalancerTransferEnabled(); if (stats.std() > targetStd || isUnderLoaded(context, stats.peekMinBroker(), stats) || isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { unloadConditionHitCount++; } else { unloadConditionHitCount = 0; } if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Shedding condition hit count:{} is less than or equal to the threshold:{}.", unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold()); } counter.update(Skip, HitCount); return decisionCache; } while (true) { if (!stats.hasTransferableBrokers()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Exhausted target transfer brokers."); } break; } UnloadDecision.Reason reason; if (stats.std() > targetStd) { reason = Overloaded; } else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) { reason = Underloaded; if (debugMode) { log.info(String.format("broker:%s is underloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this underloaded broker.", stats.peekMinBroker(), context.brokerLoadDataStore().get(stats.peekMinBroker()).get(), stats.std(), targetStd)); } } else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { reason = Overloaded; if (debugMode) { log.info(String.format("broker:%s is overloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this overloaded broker.", stats.peekMaxBroker(), context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(), stats.std(), targetStd)); } } else { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + "The overall cluster load meets the target, std:{} <= targetStd:{}." + "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.", stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker()); } break; } String maxBroker = stats.pollMaxBroker(); String minBroker = stats.peekMinBroker(); Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker); Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker); if (maxBrokerLoadData.isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " MaxBrokerLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } if (minBrokerLoadData.isEmpty()) { log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker); numOfBrokersWithEmptyLoadData++; continue; } double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA(); double minLoad = minBrokerLoadData.get().getWeightedMaxEMA(); double offload = (maxLoad - minLoad) / 2; BrokerLoadData brokerLoadData = maxBrokerLoadData.get(); double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn() + brokerLoadData.getMsgThroughputOut(); double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn() + minBrokerLoadData.get().getMsgThroughputOut(); double offloadThroughput = maxBrokerThroughput * offload / maxLoad; if (debugMode) { log.info(String.format( "Attempting to shed load from broker:%s%s, which has the max resource " + "usage:%.2f%%, targetStd:%.2f," + " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.", maxBroker, transfer ? " to broker:" + minBroker : "", maxLoad * 100, targetStd, offload * 100, offloadThroughput / KB )); } double trafficMarkedToOffload = 0; double trafficMarkedToGain = 0; Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker); if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " TopBundlesLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData(); if (maxBrokerTopBundlesLoadData.size() == 1) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Sole namespace bundle:%s is overloading the broker. ", maxBroker, maxBrokerTopBundlesLoadData.iterator().next())); continue; } Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker); var minBrokerTopBundlesLoadDataIter = minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() : null; if (maxBrokerTopBundlesLoadData.isEmpty()) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Broker overloaded despite having no bundles", maxBroker)); continue; } int remainingTopBundles = maxBrokerTopBundlesLoadData.size(); for (var e : maxBrokerTopBundlesLoadData) { String bundle = e.bundleName(); if (channel != null && !channel.isOwner(bundle, maxBroker)) { if (debugMode) { log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " MaxBroker:%s is not the owner.", bundle, maxBroker)); } continue; } if (recentlyUnloadedBundles.containsKey(bundle)) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " Bundle has been recently unloaded at ts:%d.", bundle, recentlyUnloadedBundles.get(bundle))); } continue; } if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " This unload can't meet " + "affinity(isolation) or anti-affinity group policies.", bundle)); } continue; } if (remainingTopBundles <= 1) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is" + " less than or equal to 1.", bundle, maxBroker)); } break; } var bundleData = e.stats(); double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut; boolean swap = false; List<Unload> minToMaxUnloads = new ArrayList<>(); double minBrokerBundleSwapThroughput = 0.0; if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) { // see if we can swap bundles from min to max broker to balance better. if (transfer && minBrokerTopBundlesLoadDataIter != null) { var maxBrokerNewThroughput = maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain - maxBrokerBundleThroughput; var minBrokerNewThroughput = minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput; while (minBrokerTopBundlesLoadDataIter.hasNext()) { var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next(); if (!isTransferable(context, availableBrokers, minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) { continue; } var minBrokerBundleThroughput = minBrokerBundleData.stats().msgThroughputIn + minBrokerBundleData.stats().msgThroughputOut; var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput; var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput; if (maxBrokerNewThroughputTmp < maxBrokerThroughput && minBrokerNewThroughputTmp < maxBrokerThroughput) { minToMaxUnloads.add(new Unload(minBroker, minBrokerBundleData.bundleName(), Optional.of(maxBroker))); maxBrokerNewThroughput = maxBrokerNewThroughputTmp; minBrokerNewThroughput = minBrokerNewThroughputTmp; minBrokerBundleSwapThroughput += minBrokerBundleThroughput; if (minBrokerNewThroughput <= maxBrokerNewThroughput && maxBrokerNewThroughput < maxBrokerThroughput * 0.75) { swap = true; break; } } } } if (!swap) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is " + "greater than the target :%.2f KByte/s.", bundle, (trafficMarkedToOffload + maxBrokerBundleThroughput) / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB, offloadThroughput / KB)); } break; } } Unload unload; if (transfer) { if (swap) { minToMaxUnloads.forEach(minToMaxUnload -> { if (debugMode) { log.info("Decided to gain bundle:{} from min broker:{}", minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker()); } var decision = new UnloadDecision(); decision.setUnload(minToMaxUnload); decision.succeed(reason); decisionCache.add(decision); }); if (debugMode) { log.info(String.format( "Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.", minBrokerBundleSwapThroughput / KB, minBroker, maxBroker)); trafficMarkedToGain += minBrokerBundleSwapThroughput; } } unload = new Unload(maxBroker, bundle, Optional.of(minBroker)); } else { unload = new Unload(maxBroker, bundle); } var decision = new UnloadDecision(); decision.setUnload(unload); decision.succeed(reason); decisionCache.add(decision); trafficMarkedToOffload += maxBrokerBundleThroughput; remainingTopBundles--; if (debugMode) { log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s." + " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s." + " Target:%.2f KByte/s.", bundle, maxBrokerBundleThroughput / KB, trafficMarkedToOffload / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain) / KB, offloadThroughput / KB)); } } if (trafficMarkedToOffload > 0) { var adjustedOffload = (trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput; stats.offload(maxLoad, minLoad, adjustedOffload); if (debugMode) { log.info( String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}", stats, maxLoad, minLoad, adjustedOffload)); } } else { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " There is no bundle that can be unloaded in top bundles load data. " + "Consider splitting bundles owned by the broker " + "to make each bundle serve less traffic " + "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport" + " to report more bundles in the top bundles load data.", maxBroker)); } } // while end if (debugMode) { log.info("decisionCache:{}", decisionCache); } if (decisionCache.isEmpty()) { UnloadDecision.Reason reason; if (numOfBrokersWithEmptyLoadData > 0) { reason = NoLoadData; } else if (numOfBrokersWithFewBundles > 0) { reason = NoBundles; } else { reason = HitCount; } counter.update(Skip, reason); } else { unloadConditionHitCount = 0; } } catch (Throwable e) { log.error("Failed to process unloading. ", e); this.counter.update(Failure, Unknown); } return decisionCache; }
@Test public void testRecentlyUnloadedBundles() { UnloadCounter counter = new UnloadCounter(); TransferShedder transferShedder = new TransferShedder(counter); var ctx = setupContext(); Map<String, Long> recentlyUnloadedBundles = new HashMap<>(); var now = System.currentTimeMillis(); recentlyUnloadedBundles.put(bundleE1, now); recentlyUnloadedBundles.put(bundleE2, now); recentlyUnloadedBundles.put(bundleD1, now); recentlyUnloadedBundles.put(bundleD2, now); var res = transferShedder.findBundlesForUnloading(ctx, recentlyUnloadedBundles, Map.of()); var expected = new HashSet<UnloadDecision>(); expected.add(new UnloadDecision(new Unload("broker3:8080", "my-tenant/my-namespaceC/0x00000000_0x0FFFFFFF", Optional.of("broker1:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); assertEquals(counter.getLoadStd(), setupLoadStd); }
public final Logger getLogger(final Class<?> clazz) { return getLogger(clazz.getName()); }
@Test public void loggerNameEndingInDotOrDollarShouldWork() { { String loggerName = "toto.x."; Logger logger = lc.getLogger(loggerName); assertEquals(loggerName, logger.getName()); } { String loggerName = "toto.x$"; Logger logger = lc.getLogger(loggerName); assertEquals(loggerName, logger.getName()); } }
@Override public void execute(Context context) { List<MeasureComputerWrapper> wrappers = Arrays.stream(measureComputers).map(ToMeasureWrapper.INSTANCE).toList(); validateMetrics(wrappers); measureComputersHolder.setMeasureComputers(sortComputers(wrappers)); }
@Test public void support_plugin_metrics_as_input_metrics() { MeasureComputer[] computers = new MeasureComputer[] {newMeasureComputer(array(NEW_METRIC_1), array(NEW_METRIC_2))}; ComputationStep underTest = new LoadMeasureComputersStep(holder, array(new TestMetrics()), computers); underTest.execute(new TestComputationStepContext()); assertThat(holder.getMeasureComputers()).hasSize(1); }
public static String[] getDateFormats() { if ( dateFormats == null ) { int dateFormatsCount = toInt( BaseMessages.getString( PKG, "Const.DateFormat.Count" ), 0 ); dateFormats = new String[dateFormatsCount]; for ( int i = 1; i <= dateFormatsCount; i++ ) { dateFormats[i - 1] = BaseMessages.getString( PKG, "Const.DateFormat" + Integer.toString( i ) ); } } return dateFormats; }
@Test public void testGetDateFormats() { final String[] formats = Const.getDateFormats(); assertTrue( formats.length > 0 ); for ( String format : formats ) { assertTrue( format != null && !format.isEmpty() ); } }
@Udf public boolean check(@UdfParameter(description = "The input JSON string") final String input) { if (input == null) { return false; } try { return !UdfJsonMapper.parseJson(input).isMissingNode(); } catch (KsqlFunctionException e) { return false; } }
@Test public void shouldInterpretObject() { assertTrue(udf.check("{\"1\": 2}")); }
public static DecimalNum valueOf(String val) { if (val.equalsIgnoreCase("NAN")) { throw new NumberFormatException(); } return new DecimalNum(val); }
@Test(expected = NumberFormatException.class) public void testValueOfForDoubleNaNShouldThrowNumberFormatException() { DecimalNum.valueOf(Double.NaN); }
@Override public boolean next() throws SQLException { return mergedResult.next(); }
@Test void assertNext() throws SQLException { assertFalse(new EncryptMergedResult(database, encryptRule, selectStatementContext, mergedResult).next()); }
@Deprecated @Restricted(DoNotUse.class) public static String resolve(ConfigurationContext context, String toInterpolate) { return context.getSecretSourceResolver().resolve(toInterpolate); }
@Test public void resolve_preservesPrefixesNotCoveredBySubstitutors() { final String secret = "foobar"; environment.set("arn:aws:secretsmanager:eu-central-1:123456789012:secret:my-secret", secret); assertThat(resolve("${arn:aws:secretsmanager:eu-central-1:123456789012:secret:my-secret}"), equalTo(secret)); }
@Override public List<Boolean> scriptExists(String... shaDigests) { return commandExecutor.get(scriptExistsAsync(shaDigests)); }
@Test public void testScriptExists() { RScript s = redisson.getScript(); String r = s.scriptLoad("return redis.call('get', 'foo')"); Assertions.assertEquals("282297a0228f48cd3fc6a55de6316f31422f5d17", r); List<Boolean> r1 = s.scriptExists(r); Assertions.assertEquals(1, r1.size()); Assertions.assertTrue(r1.get(0)); s.scriptFlush(); List<Boolean> r2 = s.scriptExists(r); Assertions.assertEquals(1, r2.size()); Assertions.assertFalse(r2.get(0)); }
@Override public void rollback(final Xid xid) throws XAException { try { delegate.rollback(xid); } catch (final XAException ex) { throw mapXAException(ex); } }
@Test void assertRollback() throws XAException { singleXAResource.rollback(xid); verify(xaResource).rollback(xid); }
@Override public Expr uncheckedCastTo(Type targetType) throws AnalysisException { if (targetType.getPrimitiveType().isDecimalV3Type()) { this.type = targetType; checkLiteralOverflowInBinaryStyle(this.value, (ScalarType) targetType); // round int realScale = getRealScale(value); int scale = ((ScalarType) targetType).getScalarScale(); if (scale <= realScale) { this.value = this.value.setScale(scale, RoundingMode.HALF_UP); } return this; } else if (targetType.getPrimitiveType().isDecimalV2Type()) { this.type = targetType; return this; } else if (targetType.isFloatingPointType()) { return new FloatLiteral(value.doubleValue(), targetType); } else if (targetType.isIntegerType()) { return new IntLiteral(value.longValue(), targetType); } else if (targetType.isStringType()) { return new StringLiteral(value.toString()); } return super.uncheckedCastTo(targetType); }
@Test(expected = Throwable.class) public void testDealWithSingularDecimalLiteralAbnormal3() throws AnalysisException { Type type = ScalarType.createDecimalV3Type(PrimitiveType.DECIMAL64, 9, 2); DecimalLiteral decimalLiteral = new DecimalLiteral("92233720368547758.08"); decimalLiteral.uncheckedCastTo(type); }
@Override public boolean equals(Object other) { if (other instanceof Path) { return getRelative().equals(((Path) other).getRelative()); } return false; }
@Test public void testEquals() { assertEquals(getAbsolutePath(), getAbsolutePath()); assertEquals(getAbsolutePath(), getRelativePath()); assertEquals(getAbsolutePath(), getWithSlashes()); assertEquals(getAbsolutePath(), getAppended()); assertNotEquals(getAbsolutePath(), getOne()); assertEquals(getRelativePath(), getAbsolutePath()); assertEquals(getRelativePath(), getRelativePath()); assertEquals(getRelativePath(), getWithSlashes()); assertEquals(getRelativePath(), getAppended()); assertNotEquals(getRelativePath(), getOne()); assertEquals(getWithSlashes(), getAbsolutePath()); assertEquals(getWithSlashes(), getRelativePath()); assertEquals(getWithSlashes(), getWithSlashes()); assertEquals(getWithSlashes(), getAppended()); assertNotEquals(getWithSlashes(), getOne()); assertEquals(getAppended(), getAbsolutePath()); assertEquals(getAppended(), getRelativePath()); assertEquals(getAppended(), getWithSlashes()); assertEquals(getAppended(), getAppended()); assertNotEquals(getAppended(), getOne()); assertNotEquals(getOne(), getAbsolutePath()); assertNotEquals(getOne(), getRelativePath()); assertNotEquals(getOne(), getWithSlashes()); assertNotEquals(getOne(), getAppended()); assertEquals(getOne(), getOne()); }
public static List<SlowPeerReportProto> convertSlowPeerInfo( SlowPeerReports slowPeers) { if (slowPeers.getSlowPeers().size() == 0) { return Collections.emptyList(); } List<SlowPeerReportProto> slowPeerInfoProtos = new ArrayList<>(slowPeers.getSlowPeers().size()); for (Map.Entry<String, OutlierMetrics> entry : slowPeers.getSlowPeers().entrySet()) { OutlierMetrics outlierMetrics = entry.getValue(); slowPeerInfoProtos.add( SlowPeerReportProto.newBuilder() .setDataNodeId(entry.getKey()) .setAggregateLatency(outlierMetrics.getActualLatency()) .setMedian(outlierMetrics.getMedian()) .setMad(outlierMetrics.getMad()) .setUpperLimitLatency(outlierMetrics.getUpperLimitLatency()) .build()); } return slowPeerInfoProtos; }
@Test public void testSlowPeerInfoPBHelper() { // Test with a map that has a few slow peer entries. OutlierMetrics outlierMetrics1 = new OutlierMetrics(0.0, 0.0, 0.0, 0.0); OutlierMetrics outlierMetrics2 = new OutlierMetrics(0.0, 0.0, 0.0, 1.0); OutlierMetrics outlierMetrics3 = new OutlierMetrics(0.0, 0.0, 0.0, 2.0); final SlowPeerReports slowPeers = SlowPeerReports.create( ImmutableMap.of( "peer1", outlierMetrics1, "peer2", outlierMetrics2, "peer3", outlierMetrics3)); SlowPeerReports slowPeersConverted1 = PBHelper.convertSlowPeerInfo( PBHelper.convertSlowPeerInfo(slowPeers)); assertTrue( "Expected map:" + slowPeers + ", got map:" + slowPeersConverted1.getSlowPeers(), slowPeersConverted1.equals(slowPeers)); // Test with an empty map. SlowPeerReports slowPeersConverted2 = PBHelper.convertSlowPeerInfo( PBHelper.convertSlowPeerInfo(SlowPeerReports.EMPTY_REPORT)); assertTrue( "Expected empty map:" + ", got map:" + slowPeersConverted2, slowPeersConverted2.equals(SlowPeerReports.EMPTY_REPORT)); }
public static long toLong(String address) { InetSocketAddress ad = toInetSocketAddress(address); String[] ip = ad.getAddress().getHostAddress().split("\\."); long r = 0; r = r | (Long.parseLong(ip[0]) << 40); r = r | (Long.parseLong(ip[1]) << 32); r = r | (Long.parseLong(ip[2]) << 24); r = r | (Long.parseLong(ip[3]) << 16); r = r | ad.getPort(); return r; }
@Test public void testToLong1() { String[] split = "127.0.0.1".split("\\."); long r = 0; r = r | (Long.parseLong(split[0]) << 40); r = r | (Long.parseLong(split[1]) << 32); r = r | (Long.parseLong(split[2]) << 24); r = r | (Long.parseLong(split[3]) << 16); assertThat(NetUtil.toLong("127.0.0.1")).isEqualTo(r); }
@GET @TreeResponse public ExportedPipelineStep[] doWrapperMetadata() { List<ExportedPipelineStep> wrappers = new ArrayList<>(); for (StepDescriptor d : StepDescriptor.all()) { if (isWrapper(d)) { ExportedPipelineStep step = getStepMetadata(d); if (step != null) { wrappers.add(step); } } } return wrappers.toArray(new ExportedPipelineStep[wrappers.size()]); }
@Test public void wrappers() throws Exception { PipelineMetadataService svc = new PipelineMetadataService(); List<ExportedPipelineStep> wrappers = new ArrayList<>(Arrays.asList(svc.doWrapperMetadata())); assertFalse(wrappers.isEmpty()); ExportedPipelineStep w = null; for (ExportedPipelineStep s : wrappers) { if (s.getFunctionName().equals("timeout")) { w = s; } } assertNotNull(w); }
@Override public List<KsqlPartitionLocation> locate( final List<KsqlKey> keys, final RoutingOptions routingOptions, final RoutingFilterFactory routingFilterFactory, final boolean isRangeScan ) { if (isRangeScan && keys.isEmpty()) { throw new IllegalStateException("Query is range scan but found no range keys."); } final ImmutableList.Builder<KsqlPartitionLocation> partitionLocations = ImmutableList.builder(); final Set<Integer> filterPartitions = routingOptions.getPartitions(); final Optional<Set<KsqlKey>> keySet = keys.isEmpty() ? Optional.empty() : Optional.of(Sets.newHashSet(keys)); // Depending on whether this is a key-based lookup, determine which metadata method to use. // If we don't have keys, find the metadata for all partitions since we'll run the query for // all partitions of the state store rather than a particular one. //For issue #7174. Temporarily turn off metadata finding for a partition with keys //if there are more than one key. final List<PartitionMetadata> metadata; if (keys.size() == 1 && keys.get(0).getKey().size() == 1 && !isRangeScan) { metadata = getMetadataForKeys(keys, filterPartitions); } else { metadata = getMetadataForAllPartitions(filterPartitions, keySet); } if (metadata.isEmpty()) { final MaterializationException materializationException = new MaterializationException( "Cannot determine which host contains the required partitions to serve the pull query. \n" + "The underlying persistent query may be restarting (e.g. as a result of " + "ALTER SYSTEM) view the status of your by issuing <DESCRIBE foo>."); LOG.debug(materializationException.getMessage()); throw materializationException; } // Go through the metadata and group them by partition. for (PartitionMetadata partitionMetadata : metadata) { LOG.debug("Handling pull query for partition {} of state store {}.", partitionMetadata.getPartition(), storeName); final HostInfo activeHost = partitionMetadata.getActiveHost(); final Set<HostInfo> standByHosts = partitionMetadata.getStandbyHosts(); final int partition = partitionMetadata.getPartition(); final Optional<Set<KsqlKey>> partitionKeys = partitionMetadata.getKeys(); LOG.debug("Active host {}, standby {}, partition {}.", activeHost, standByHosts, partition); // For a given partition, find the ordered, filtered list of hosts to consider final List<KsqlNode> filteredHosts = getFilteredHosts(routingOptions, routingFilterFactory, activeHost, standByHosts, partition); partitionLocations.add(new PartitionLocation(partitionKeys, partition, filteredHosts)); } return partitionLocations.build(); }
@Test public void shouldReturnStandBy1WhenActiveDownStandby2ExceedsLag() { // Given: getActiveAndStandbyMetadata(); when(livenessFilter.filter(eq(ACTIVE_HOST))) .thenReturn(Host.exclude(ACTIVE_HOST, "liveness")); // When: final List<KsqlPartitionLocation> result = locator.locate( ImmutableList.of(KEY), routingOptions, routingFilterFactoryLag, false); // Then: List<KsqlNode> nodeList = result.get(0).getNodes().stream() .filter(node -> node.getHost().isSelected()) .collect(Collectors.toList()); assertThat(nodeList.size(), is(1)); assertThat(nodeList, containsInAnyOrder(standByNode1)); }
public void handleReceive(HttpClientResponse response, Span span) { handleFinish(response, span); }
@Test void handleReceive_finishesSpanEvenIfUnwrappedNull() { brave.Span span = mock(brave.Span.class); when(span.context()).thenReturn(context); when(span.customizer()).thenReturn(span); handler.handleReceive(mock(HttpClientResponse.class), span); verify(span).isNoop(); verify(span).context(); verify(span).customizer(); verify(span).finish(); verifyNoMoreInteractions(span); }
public static String keyToString(Object key, URLEscaper.Escaping escaping, UriComponent.Type componentType, boolean full, ProtocolVersion version) { if (version.compareTo(AllProtocolVersions.RESTLI_PROTOCOL_2_0_0.getProtocolVersion()) >= 0) { return keyToStringV2(key, escaping, componentType, full); } else { return keyToStringV1(key, escaping, full); } }
@Test(dataProvider = TestConstants.RESTLI_PROTOCOL_1_2_PREFIX + "compoundKey") public void testCompoundKeyToString(ProtocolVersion version, String expected) { CompoundKey compoundKey = new CompoundKey(); compoundKey.append("key1", "stringVal"); compoundKey.append("key2", 5); compoundKey.append("key3", TestEnum.VALUE_1); String compoundKeyString = URIParamUtils.keyToString(compoundKey, NO_ESCAPING, null, true, version); Assert.assertEquals(compoundKeyString, expected); }
public HashMap<String, Double> computeClusteringCoefficient(Graph graph, ArrayWrapper[] currentNetwork, int[] currentTriangles, double[] currentNodeClustering, boolean directed) { HashMap<String, Double> resultValues = new HashMap<>(); if (directed) { double avClusteringCoefficient = bruteForce(graph); resultValues.put("clusteringCoefficient", avClusteringCoefficient); return resultValues; } else { initStartValues(graph); resultValues = computeTriangles(graph, currentNetwork, currentTriangles, currentNodeClustering, directed); return resultValues; } }
@Test public void testSpecial1UndirectedGraphClusteringCoefficient() { GraphModel graphModel = GraphModel.Factory.newInstance(); UndirectedGraph undirectedGraph = graphModel.getUndirectedGraph(); Node node1 = graphModel.factory().newNode("0"); Node node2 = graphModel.factory().newNode("1"); Node node3 = graphModel.factory().newNode("2"); Node node4 = graphModel.factory().newNode("3"); Node node5 = graphModel.factory().newNode("4"); Node node6 = graphModel.factory().newNode("5"); Node node7 = graphModel.factory().newNode("6"); undirectedGraph.addNode(node1); undirectedGraph.addNode(node2); undirectedGraph.addNode(node3); undirectedGraph.addNode(node4); undirectedGraph.addNode(node5); undirectedGraph.addNode(node6); undirectedGraph.addNode(node7); Edge edge12 = graphModel.factory().newEdge(node1, node2, false); Edge edge13 = graphModel.factory().newEdge(node1, node3, false); Edge edge14 = graphModel.factory().newEdge(node1, node4, false); Edge edge15 = graphModel.factory().newEdge(node1, node5, false); Edge edge16 = graphModel.factory().newEdge(node1, node6, false); Edge edge17 = graphModel.factory().newEdge(node1, node7, false); Edge edge23 = graphModel.factory().newEdge(node2, node3, false); Edge edge34 = graphModel.factory().newEdge(node3, node4, false); Edge edge45 = graphModel.factory().newEdge(node4, node5, false); Edge edge56 = graphModel.factory().newEdge(node5, node6, false); Edge edge67 = graphModel.factory().newEdge(node6, node7, false); Edge edge72 = graphModel.factory().newEdge(node7, node2, false); undirectedGraph.addEdge(edge12); undirectedGraph.addEdge(edge13); undirectedGraph.addEdge(edge14); undirectedGraph.addEdge(edge15); undirectedGraph.addEdge(edge16); undirectedGraph.addEdge(edge17); undirectedGraph.addEdge(edge23); undirectedGraph.addEdge(edge34); undirectedGraph.addEdge(edge45); undirectedGraph.addEdge(edge56); undirectedGraph.addEdge(edge67); undirectedGraph.addEdge(edge72); Graph graph = graphModel.getGraph(); ClusteringCoefficient cc = new ClusteringCoefficient(); ArrayWrapper[] network = new ArrayWrapper[7]; int[] triangles = new int[7]; double[] nodeClustering = new double[7]; HashMap<String, Double> results = cc.computeClusteringCoefficient(graph, network, triangles, nodeClustering, false); double cl1 = nodeClustering[0]; double cl3 = nodeClustering[2]; double res3 = 0.667; double diff = 0.01; assertEquals(cl1, 0.4); assertTrue(Math.abs(cl3 - res3) < diff); }
@Override public void run() throws InvalidInputException { String tableType = _input.getTableType(); if ((tableType.equalsIgnoreCase(REALTIME) || tableType.equalsIgnoreCase(HYBRID))) { _output.setAggregateMetrics(shouldAggregate(_input)); } }
@Test public void testRunOfflineTable() throws Exception { Set<String> metrics = ImmutableSet.of("a", "b", "c"); InputManager input = createInput(metrics, "select sum(a), sum(b), sum(c) from tableT"); input.setTableType("OFFLINE"); ConfigManager output = new ConfigManager(); AggregateMetricsRule rule = new AggregateMetricsRule(input, output); rule.run(); assertFalse(output.isAggregateMetrics()); }
public ImmutableByteSequence fit(int bitWidth) throws ByteSequenceTrimException { return doFit(this, bitWidth); }
@Test public void testFit() throws ImmutableByteSequence.ByteSequenceTrimException { // Test fit by forcing a given MSB index. for (int msbIndex = 0; msbIndex < 32; msbIndex++) { long value = (long) Math.pow(2, msbIndex); ImmutableByteSequence bytes = ImmutableByteSequence.copyFrom(value); checkLegalFit(bytes, msbIndex + 1); if (msbIndex != 0) { checkIllegalFit(bytes, msbIndex); } } }
public void setMultiValues(long partitionId, List<List<String>> multiValues) { this.idToMultiValues.put(partitionId, multiValues); }
@Test public void testMultiListPartition(@Injectable OlapTable dstTable) throws UserException { DescriptorTable descTable = new DescriptorTable(); TupleDescriptor tuple = descTable.createTupleDescriptor("DstTable"); // k1 SlotDescriptor k1 = descTable.addSlotDescriptor(tuple); k1.setColumn(new Column("k1", Type.BIGINT)); k1.setIsMaterialized(true); // k2 SlotDescriptor k2 = descTable.addSlotDescriptor(tuple); k2.setColumn(new Column("k2", ScalarType.createVarchar(25))); k2.setIsMaterialized(true); // v1 SlotDescriptor v1 = descTable.addSlotDescriptor(tuple); v1.setColumn(new Column("v1", ScalarType.createVarchar(25))); v1.setIsMaterialized(true); // v2 SlotDescriptor v2 = descTable.addSlotDescriptor(tuple); v2.setColumn(new Column("v2", Type.BIGINT)); v2.setIsMaterialized(true); ListPartitionInfo listPartitionInfo = new ListPartitionInfo(PartitionType.LIST, Lists.newArrayList(new Column("dt", Type.STRING), new Column("province", Type.STRING))); List<String> multiItems = Lists.newArrayList("dt", "shanghai"); List<List<String>> multiValues = new ArrayList<>(); multiValues.add(multiItems); listPartitionInfo.setMultiValues(1, multiValues); listPartitionInfo.setReplicationNum(1, (short) 3); MaterializedIndex index = new MaterializedIndex(1, MaterializedIndex.IndexState.NORMAL); HashDistributionInfo distInfo = new HashDistributionInfo( 3, Lists.newArrayList(new Column("id", Type.BIGINT))); Partition partition = new Partition(1, "p1", index, distInfo); Map<ColumnId, Column> idToColumn = Maps.newTreeMap(ColumnId.CASE_INSENSITIVE_ORDER); idToColumn.put(ColumnId.create("dt"), new Column("dt", Type.STRING)); idToColumn.put(ColumnId.create("province"), new Column("province", Type.STRING)); new Expectations() {{ dstTable.getId(); result = 1; dstTable.getPartitions(); result = Lists.newArrayList(partition); dstTable.getPartition(1L); result = partition; dstTable.getPartitionInfo(); result = listPartitionInfo; dstTable.getIdToColumn(); result = idToColumn; }}; OlapTableSink sink = new OlapTableSink(dstTable, tuple, Lists.newArrayList(1L), TWriteQuorumType.MAJORITY, false, false, false); sink.init(new TUniqueId(1, 2), 3, 4, 1000); sink.complete(); Assert.assertTrue(sink.toThrift() instanceof TDataSink); }
public IndexingResults bulkIndex(final List<MessageWithIndex> messageList) { return bulkIndex(messageList, false, null); }
@Test public void bulkIndexingShouldAccountMessageSizes() throws IOException { when(messagesAdapter.bulkIndex(any())).thenReturn(IndexingResults.empty()); final IndexSet indexSet = mock(IndexSet.class); final List<MessageWithIndex> messageList = List.of( new MessageWithIndex(wrap(messageWithSize(17)), indexSet), new MessageWithIndex(wrap(messageWithSize(23)), indexSet), new MessageWithIndex(wrap(messageWithSize(42)), indexSet) ); when(messagesAdapter.bulkIndex(any())).thenReturn(IndexingResults.create(createSuccessFromMessages(messageList), List.of())); messages.bulkIndex(messageList); verify(trafficAccounting, times(1)).addOutputTraffic(82); verify(trafficAccounting, never()).addSystemTraffic(anyLong()); }
@Override public double mean() { return mu; }
@Test public void testMean() { System.out.println("mean"); GaussianDistribution instance = new GaussianDistribution(0.0, 1.0); instance.rand(); assertEquals(0.0, instance.mean(), 1E-7); instance = new GaussianDistribution(1.0, 2.0); instance.rand(); assertEquals(1.0, instance.mean(), 1E-7); instance = new GaussianDistribution(2.0, 0.5); instance.rand(); assertEquals(2.0, instance.mean(), 1E-7); instance = new GaussianDistribution(3.0, 3.8); instance.rand(); assertEquals(3.0, instance.mean(), 1E-7); }
public String getTitle() { return info.getString( COSName.TITLE ); }
@Test void testPDFBox3068() throws Exception { try (PDDocument doc = Loader.loadPDF(RandomAccessReadBuffer.createBufferFromStream( TestPDDocumentInformation.class.getResourceAsStream("PDFBOX-3068.pdf")))) { PDDocumentInformation documentInformation = doc.getDocumentInformation(); assertEquals("Title", documentInformation.getTitle()); } }
public static Set<Metric> mapFromDataProvider(TelemetryDataProvider<?> provider) { switch (provider.getDimension()) { case INSTALLATION -> { return mapInstallationMetric(provider); } case PROJECT -> { return mapProjectMetric(provider); } case USER -> { return mapUserMetric(provider); } case LANGUAGE -> { return mapLanguageMetric(provider); } default -> throw new IllegalArgumentException("Dimension: " + provider.getDimension() + " not yet implemented."); } }
@Test void mapFromDataProvider_whenProjectProvider() { TelemetryDataProvider<String> provider = new TestTelemetryBean(Dimension.PROJECT); Set<Metric> metrics = TelemetryMetricsMapper.mapFromDataProvider(provider); List<ProjectMetric> list = retrieveList(metrics); assertThat(list) .extracting(ProjectMetric::getKey, ProjectMetric::getType, ProjectMetric::getProjectUuid, ProjectMetric::getValue, ProjectMetric::getGranularity) .containsExactlyInAnyOrder( expected() ); }
public void isInOrder() { isInOrder(Ordering.natural()); }
@Test public void iterableIsInOrderWithComparator() { Iterable<String> emptyStrings = asList(); assertThat(emptyStrings).isInOrder(COMPARE_AS_DECIMAL); assertThat(asList("1")).isInOrder(COMPARE_AS_DECIMAL); assertThat(asList("1", "1", "2", "10", "10", "10", "20")).isInOrder(COMPARE_AS_DECIMAL); }
public boolean isHigherThan(Component.Type otherType) { if (otherType.isViewsType()) { return this.viewsMaxDepth != null && this.viewsMaxDepth.isHigherThan(otherType); } if (otherType.isReportType()) { return this.reportMaxDepth != null && this.reportMaxDepth.isHigherThan(otherType); } throw new UnsupportedOperationException(UNSUPPORTED_TYPE_UOE_MSG); }
@Test public void PROJECT_isHigher_than_all_report_types_but_PROJECT() { assertThat(CrawlerDepthLimit.PROJECT.isHigherThan(Type.PROJECT)).isFalse(); for (Type reportType : from(REPORT_TYPES).filter(not(equalTo(Type.PROJECT)))) { assertThat(CrawlerDepthLimit.PROJECT.isHigherThan(reportType)).as("isHigherThan(%s)", reportType).isTrue(); } }
boolean sendRecords() { int processed = 0; recordBatch(toSend.size()); final SourceRecordWriteCounter counter = toSend.isEmpty() ? null : new SourceRecordWriteCounter(toSend.size(), sourceTaskMetricsGroup); for (final SourceRecord preTransformRecord : toSend) { ProcessingContext<SourceRecord> context = new ProcessingContext<>(preTransformRecord); final SourceRecord record = transformationChain.apply(context, preTransformRecord); final ProducerRecord<byte[], byte[]> producerRecord = convertTransformedRecord(context, record); if (producerRecord == null || context.failed()) { counter.skipRecord(); recordDropped(preTransformRecord); processed++; continue; } log.trace("{} Appending record to the topic {} with key {}, value {}", this, record.topic(), record.key(), record.value()); Optional<SubmittedRecords.SubmittedRecord> submittedRecord = prepareToSendRecord(preTransformRecord, producerRecord); try { final String topic = producerRecord.topic(); maybeCreateTopic(topic); producer.send( producerRecord, (recordMetadata, e) -> { if (e != null) { if (producerClosed) { log.trace("{} failed to send record to {}; this is expected as the producer has already been closed", AbstractWorkerSourceTask.this, topic, e); } else { log.error("{} failed to send record to {}: ", AbstractWorkerSourceTask.this, topic, e); } log.trace("{} Failed record: {}", AbstractWorkerSourceTask.this, preTransformRecord); producerSendFailed(context, false, producerRecord, preTransformRecord, e); if (retryWithToleranceOperator.getErrorToleranceType() == ToleranceType.ALL) { counter.skipRecord(); submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::ack); } } else { counter.completeRecord(); log.trace("{} Wrote record successfully: topic {} partition {} offset {}", AbstractWorkerSourceTask.this, recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset()); recordSent(preTransformRecord, producerRecord, recordMetadata); submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::ack); if (topicTrackingEnabled) { recordActiveTopic(producerRecord.topic()); } } }); // Note that this will cause retries to take place within a transaction } catch (RetriableException | org.apache.kafka.common.errors.RetriableException e) { log.warn("{} Failed to send record to topic '{}' and partition '{}'. Backing off before retrying: ", this, producerRecord.topic(), producerRecord.partition(), e); toSend = toSend.subList(processed, toSend.size()); submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::drop); counter.retryRemaining(); return false; } catch (ConnectException e) { log.warn("{} Failed to send record to topic '{}' and partition '{}' due to an unrecoverable exception: ", this, producerRecord.topic(), producerRecord.partition(), e); log.trace("{} Failed to send {} with unrecoverable exception: ", this, producerRecord, e); throw e; } catch (KafkaException e) { producerSendFailed(context, true, producerRecord, preTransformRecord, e); } processed++; recordDispatched(preTransformRecord); } toSend = null; batchDispatched(); return true; }
@Test public void testSendRecordsNoTimestamp() { final Long timestamp = -1L; createWorkerTask(); expectSendRecord(emptyHeaders()); expectApplyTransformationChain(); expectTopicCreation(TOPIC); workerTask.toSend = Collections.singletonList( new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, timestamp) ); workerTask.sendRecords(); ArgumentCaptor<ProducerRecord<byte[], byte[]>> sent = verifySendRecord(); assertNull(sent.getValue().timestamp()); verifyTaskGetTopic(); verifyTopicCreation(); }
public static boolean validatePlugin(PluginLookup.PluginType type, Class<?> pluginClass) { switch (type) { case INPUT: return containsAllMethods(inputMethods, pluginClass.getMethods()); case FILTER: return containsAllMethods(filterMethods, pluginClass.getMethods()); case CODEC: return containsAllMethods(codecMethods, pluginClass.getMethods()); case OUTPUT: return containsAllMethods(outputMethods, pluginClass.getMethods()); default: throw new IllegalStateException("Unknown plugin type for validation: " + type); } }
@Test public void testValidCodecPlugin() { Assert.assertTrue(PluginValidator.validatePlugin(PluginLookup.PluginType.CODEC, Line.class)); }
@Override public Iterator<IndexKeyEntries> getSqlRecordIteratorBatch(@Nonnull Comparable value, boolean descending) { return getSqlRecordIteratorBatch(value, descending, null); }
@Test public void getSqlRecordIteratorBatchCursorLeftIncludedAscending() { var expectedOrder = List.of(0, 3, 6, 1, 4, 7); performCursorTest(3, expectedOrder, cursor -> store.getSqlRecordIteratorBatch(0, true, 2, false, false, cursor)); }
static @Nullable Value lookupDocumentValue(Document document, String fieldPath) { OrderByFieldPath resolvedPath = OrderByFieldPath.fromString(fieldPath); // __name__ is a special field and doesn't exist in (top-level) valueMap (see // https://firebase.google.com/docs/firestore/reference/rest/v1/projects.databases.documents#Document). if (resolvedPath.isDocumentName()) { return Value.newBuilder().setReferenceValue(document.getName()).build(); } return findMapValue(new ArrayList<>(resolvedPath.getSegments()), document.getFieldsMap()); }
@Test public void lookupDocumentValue_invalidThrows() { assertThrows( IllegalArgumentException.class, () -> QueryUtils.lookupDocumentValue(testDocument, "")); }
public static <T, S> T copy(S source, T target, String... ignore) { return copy(source, target, DEFAULT_CONVERT, ignore); }
@Test public void testMapArray() { Map<String, Object> data = new HashMap<>(); data.put("colors", Arrays.asList("RED")); Target target = new Target(); FastBeanCopier.copy(data, target); System.out.println(target); Assert.assertNotNull(target.getColors()); Assert.assertSame(target.getColors()[0], Color.RED); }
@Override public <V> MultiLabel generateOutput(V label) { if (label instanceof Collection) { Collection<?> c = (Collection<?>) label; List<Pair<String,Boolean>> dimensions = new ArrayList<>(); for (Object o : c) { dimensions.add(MultiLabel.parseElement(o.toString())); } return MultiLabel.createFromPairList(dimensions); } return MultiLabel.parseString(label.toString()); }
@Test public void testGenerateOutput_allFalse() { MultiLabelFactory factory = new MultiLabelFactory(); MultiLabel output = factory.generateOutput(new HashSet<>(Arrays.asList("a=false", "b=false", "c=false"))); assertEquals(0, output.getLabelSet().size()); assertEquals("", output.getLabelString()); }
@Override public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return true; } try { if(containerService.isContainer(file)) { try { if(log.isDebugEnabled()) { log.debug(String.format("Test if bucket %s is accessible", file)); } return session.getClient().isBucketAccessible(containerService.getContainer(file).getName()); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); } } if(file.isFile() || file.isPlaceholder()) { attributes.find(file, listener); return true; } else { if(log.isDebugEnabled()) { log.debug(String.format("Search for common prefix %s", file)); } // Check for common prefix try { new S3ObjectListService(session, acl).list(file, new CancellingListProgressListener(), String.valueOf(Path.DELIMITER), 1); return true; } catch(ListCanceledException l) { // Found common prefix return true; } catch(NotfoundException e) { throw e; } } } catch(NotfoundException e) { return false; } catch(RetriableAccessDeniedException e) { // Must fail with server error throw e; } catch(AccessDeniedException e) { // Object is inaccessible to current user, but does exist. return true; } }
@Test public void testFindNotFound() throws Exception { final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new Path(container, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final S3FindFeature f = new S3FindFeature(session, new S3AccessControlListFeature(session)); assertFalse(f.find(test)); }
@Override public Optional<Entity> exportEntity(EntityDescriptor entityDescriptor, EntityDescriptorIds entityDescriptorIds) { final ModelId modelId = entityDescriptor.id(); try { final PipelineDao pipelineDao = pipelineService.load(modelId.id()); return Optional.of(exportNativeEntity(pipelineDao, entityDescriptorIds)); } catch (NotFoundException e) { LOG.debug("Couldn't find pipeline {}", entityDescriptor, e); return Optional.empty(); } }
@Test public void exportEntity() { final PipelineDao pipeline = PipelineDao.builder() .id("pipeline-1234") .title("title") .description("description") .source("pipeline \"Test\"\nstage 0 match either\nrule \"debug\"\nend") .build(); final PipelineConnections connections = PipelineConnections.create("id", "stream-1234", Collections.singleton("pipeline-1234")); connectionsService.save(connections); final EntityDescriptor descriptor = EntityDescriptor.create(pipeline.id(), ModelTypes.PIPELINE_V1); final EntityDescriptor streamDescriptor = EntityDescriptor.create("stream-1234", ModelTypes.STREAM_V1); final EntityDescriptorIds entityDescriptorIds = EntityDescriptorIds.of(descriptor, streamDescriptor); final Entity entity = facade.exportNativeEntity(pipeline, entityDescriptorIds); assertThat(entity).isInstanceOf(EntityV1.class); assertThat(entity.id()).isEqualTo(ModelId.of(entityDescriptorIds.get(descriptor).orElse(null))); assertThat(entity.type()).isEqualTo(ModelTypes.PIPELINE_V1); final EntityV1 entityV1 = (EntityV1) entity; final PipelineEntity pipelineEntity = objectMapper.convertValue(entityV1.data(), PipelineEntity.class); assertThat(pipelineEntity.title()).isEqualTo(ValueReference.of("title")); assertThat(pipelineEntity.description()).isEqualTo(ValueReference.of("description")); assertThat(pipelineEntity.source().asString(Collections.emptyMap())).startsWith("pipeline \"Test\""); assertThat(pipelineEntity.connectedStreams()).containsOnly(ValueReference.of(entityDescriptorIds.get(streamDescriptor).orElse(null))); }
static void verifyFixInvalidValues(final List<KiePMMLMiningField> notTargetMiningFields, final PMMLRequestData requestData) { logger.debug("verifyInvalidValues {} {}", notTargetMiningFields, requestData); final Collection<ParameterInfo> requestParams = requestData.getRequestParams(); final List<ParameterInfo> toRemove = new ArrayList<>(); notTargetMiningFields.forEach(miningField -> { ParameterInfo parameterInfo = requestParams.stream() .filter(paramInfo -> miningField.getName().equals(paramInfo.getName())) .findFirst() .orElse(null); if (parameterInfo != null) { boolean match = isMatching(parameterInfo, miningField); if (!match) { manageInvalidValues(miningField, parameterInfo, toRemove); } toRemove.forEach(requestData::removeRequestParam); } }); }
@Test void verifyFixInvalidValuesInvalidAsMissing() { KiePMMLMiningField miningField0 = KiePMMLMiningField.builder("FIELD-0", null) .withDataType(DATA_TYPE.STRING) .withInvalidValueTreatmentMethod(INVALID_VALUE_TREATMENT_METHOD.AS_MISSING) .withAllowedValues(Arrays.asList("123", "124", "125")) .build(); KiePMMLMiningField miningField1 = KiePMMLMiningField.builder("FIELD-1", null) .withDataType(DATA_TYPE.DOUBLE) .withInvalidValueTreatmentMethod(INVALID_VALUE_TREATMENT_METHOD.AS_MISSING) .withAllowedValues(Arrays.asList("1.23", "12.4", "1.25")) .build(); List<KiePMMLInterval> intervals = Arrays.asList(new KiePMMLInterval(0.0, 12.4, CLOSURE.CLOSED_CLOSED), new KiePMMLInterval(12.6, 14.5, CLOSURE.OPEN_CLOSED)); KiePMMLMiningField miningField2 = KiePMMLMiningField.builder("FIELD-2", null) .withDataType(DATA_TYPE.DOUBLE) .withInvalidValueTreatmentMethod(INVALID_VALUE_TREATMENT_METHOD.AS_MISSING) .withIntervals(intervals) .build(); List<KiePMMLMiningField> miningFields = Arrays.asList(miningField0, miningField1, miningField2); PMMLRequestData pmmlRequestData = new PMMLRequestData("123", "modelName"); pmmlRequestData.addRequestParam("FIELD-0", "122"); pmmlRequestData.addRequestParam("FIELD-1", 12.5); pmmlRequestData.addRequestParam("FIELD-2", 14.6); PreProcess.verifyFixInvalidValues(miningFields, pmmlRequestData); assertThat(pmmlRequestData.getRequestParams()).isEmpty(); }
@Override public void alterFunction(QualifiedObjectName functionName, Optional<List<TypeSignature>> parameterTypes, AlterRoutineCharacteristics alterRoutineCharacteristics) { checkCatalog(functionName); jdbi.useTransaction(handle -> { FunctionNamespaceDao transactionDao = handle.attach(functionNamespaceDaoClass); List<SqlInvokedFunction> functions = getSqlFunctions(transactionDao, functionName, parameterTypes); checkUnique(functions, functionName); checkExists(functions, functionName, parameterTypes); SqlInvokedFunction latest = functions.get(0); RoutineCharacteristics.Builder routineCharacteristics = RoutineCharacteristics.builder(latest.getRoutineCharacteristics()); alterRoutineCharacteristics.getNullCallClause().ifPresent(routineCharacteristics::setNullCallClause); SqlInvokedFunction altered = new SqlInvokedFunction( latest.getFunctionId().getFunctionName(), latest.getParameters(), latest.getSignature().getReturnType(), latest.getDescription(), routineCharacteristics.build(), latest.getBody(), notVersioned()); if (!altered.hasSameDefinitionAs(latest)) { checkState(latest.hasVersion(), "Function version missing: %s", latest.getFunctionId()); insertSqlInvokedFunction(transactionDao, altered, getLongVersion(latest) + 1); } }); refreshFunctionsCache(functionName); }
@Test public void testAlterFunction() { createFunction(FUNCTION_POWER_TOWER_DOUBLE, false); createFunction(FUNCTION_POWER_TOWER_INT, false); createFunction(FUNCTION_TANGENT, false); // Alter a specific function by name and parameter types alterFunction(POWER_TOWER, Optional.of(ImmutableList.of(parseTypeSignature(DOUBLE))), new AlterRoutineCharacteristics(Optional.of(RETURNS_NULL_ON_NULL_INPUT))); assertGetFunctions( POWER_TOWER, FUNCTION_POWER_TOWER_INT.withVersion("1"), new SqlInvokedFunction( POWER_TOWER, ImmutableList.of(new Parameter("x", parseTypeSignature(DOUBLE))), parseTypeSignature(DOUBLE), "power tower", RoutineCharacteristics.builder().setDeterminism(DETERMINISTIC).setNullCallClause(RETURNS_NULL_ON_NULL_INPUT).build(), "RETURN pow(x, x)", withVersion("2"))); // Drop function and alter function by name dropFunction(POWER_TOWER, Optional.of(ImmutableList.of(parseTypeSignature(DOUBLE))), false); alterFunction(POWER_TOWER, Optional.empty(), new AlterRoutineCharacteristics(Optional.of(CALLED_ON_NULL_INPUT))); // Alter function by name alterFunction(TANGENT, Optional.empty(), new AlterRoutineCharacteristics(Optional.of(CALLED_ON_NULL_INPUT))); SqlInvokedFunction tangentV2 = new SqlInvokedFunction( TANGENT, FUNCTION_TANGENT.getParameters(), FUNCTION_TANGENT.getSignature().getReturnType(), FUNCTION_TANGENT.getDescription(), RoutineCharacteristics.builder().setDeterminism(DETERMINISTIC).build(), FUNCTION_TANGENT.getBody(), withVersion("2")); assertGetFunctions(TANGENT, tangentV2); // Alter function with no change alterFunction(TANGENT, Optional.empty(), new AlterRoutineCharacteristics(Optional.of(CALLED_ON_NULL_INPUT))); assertGetFunctions(TANGENT, tangentV2); }
@Deprecated public static MaskTree decodeMaskUriFormat(StringBuilder toparse) throws IllegalMaskException { return decodeMaskUriFormat(toparse.toString()); }
@Test public void uriDecodeWithWhitespaces() throws IllegalMaskException, IOException { MaskTree tree = URIMaskUtil.decodeMaskUriFormat("a ,\tb:($*:(c), $start:2,$count :4)"); DataMap dataMap = tree.getDataMap(); assertEquals(dataMap, dataMapFromString( "{'a': 1, 'b': { '$*': { 'c': 1 }, '$start': 2, '$count': 4 } }".replace('\'', '"'))); }
public static InternalSchema pruneInternalSchemaByID(InternalSchema schema, List<Integer> fieldIds, List<Integer> topParentFieldIds) { Types.RecordType recordType = (Types.RecordType)pruneType(schema.getRecord(), fieldIds); // reorder top parent fields, since the recordType.fields() produced by pruneType maybe out of order. List<Types.Field> newFields = new ArrayList<>(); if (topParentFieldIds != null && !topParentFieldIds.isEmpty()) { for (int id : topParentFieldIds) { Types.Field f = recordType.field(id); if (f != null) { newFields.add(f); } else { throw new HoodieSchemaException(String.format("cannot find pruned id %s in currentSchema %s", id, schema.toString())); } } } return new InternalSchema(newFields.isEmpty() ? recordType : Types.RecordType.get(newFields)); }
@Test public void testPruneSchema() { Types.RecordType record = getSimpleRecordType(); InternalSchema originSchema = new InternalSchema(Types.RecordType.get(record.fields())); List<Integer> prunedCols = new ArrayList<>(); prunedCols.add(4); prunedCols.add(3); prunedCols.add(0); prunedCols.add(2); InternalSchema prunedSchema = InternalSchemaUtils.pruneInternalSchemaByID(originSchema, prunedCols, null); InternalSchema checkedSchema = new InternalSchema(Types.RecordType.get(Arrays.asList(new Types.Field[] { Types.Field.get(0, "bool", Types.BooleanType.get()), Types.Field.get(2, "long", Types.LongType.get()), Types.Field.get(3, "float", Types.FloatType.get()), Types.Field.get(4, "double", Types.DoubleType.get()) }))); assertEquals(prunedSchema, checkedSchema); // nest schema Types.RecordType nestRecord = getNestRecordType(); InternalSchema originNestSchema = new InternalSchema(Types.RecordType.get(nestRecord.fields())); List<Integer> prunedNestCols = new ArrayList<>(); prunedNestCols.add(0); prunedNestCols.add(1); prunedNestCols.add(5); prunedNestCols.add(11); InternalSchema prunedNestSchema = InternalSchemaUtils.pruneInternalSchemaByID(originNestSchema, prunedNestCols, null); }
@Override public String toString() { StringBuilder s = new StringBuilder(); for (String prefix : mInnerList) { s.append(prefix).append(";"); } return s.toString(); }
@Test public void toStringTest() { PrefixList prefixList = new PrefixList(null, ";"); assertEquals(prefixList.toString(), ""); prefixList = new PrefixList("", ";"); assertEquals(prefixList.toString(), ""); prefixList = new PrefixList(";", ";"); assertEquals(prefixList.toString(), ""); prefixList = new PrefixList(" a ; ; b ", ";"); assertEquals(prefixList.toString(), "a;b;"); prefixList = new PrefixList("a/b;c", ";"); assertEquals(prefixList.toString(), "a/b;c;"); prefixList = new PrefixList("a/b;c;", ";"); assertEquals(prefixList.toString(), "a/b;c;"); }
@Override @SuppressWarnings("ProtoFieldNullComparison") public List<IncomingMessage> pull( long requestTimeMsSinceEpoch, SubscriptionPath subscription, int batchSize, boolean returnImmediately) throws IOException { PullRequest request = new PullRequest().setReturnImmediately(returnImmediately).setMaxMessages(batchSize); PullResponse response = pubsub.projects().subscriptions().pull(subscription.getPath(), request).execute(); if (response.getReceivedMessages() == null || response.getReceivedMessages().isEmpty()) { return ImmutableList.of(); } List<IncomingMessage> incomingMessages = new ArrayList<>(response.getReceivedMessages().size()); for (ReceivedMessage message : response.getReceivedMessages()) { PubsubMessage pubsubMessage = message.getMessage(); Map<String, String> attributes; if (pubsubMessage.getAttributes() != null) { attributes = pubsubMessage.getAttributes(); } else { attributes = new HashMap<>(); } // Payload. byte[] elementBytes = pubsubMessage.getData() == null ? null : pubsubMessage.decodeData(); if (elementBytes == null) { elementBytes = new byte[0]; } // Timestamp. long timestampMsSinceEpoch; if (Strings.isNullOrEmpty(timestampAttribute)) { timestampMsSinceEpoch = parseTimestampAsMsSinceEpoch(message.getMessage().getPublishTime()); } else { timestampMsSinceEpoch = extractTimestampAttribute(timestampAttribute, attributes); } // Ack id. String ackId = message.getAckId(); checkState(!Strings.isNullOrEmpty(ackId)); // Record id, if any. @Nullable String recordId = null; if (idAttribute != null) { recordId = attributes.get(idAttribute); } if (Strings.isNullOrEmpty(recordId)) { // Fall back to the Pubsub provided message id. recordId = pubsubMessage.getMessageId(); } com.google.pubsub.v1.PubsubMessage.Builder protoMessage = com.google.pubsub.v1.PubsubMessage.newBuilder(); protoMessage.setData(ByteString.copyFrom(elementBytes)); protoMessage.putAllAttributes(attributes); // PubsubMessage uses `null` to represent no ordering key where we want a default of "". if (pubsubMessage.getOrderingKey() != null) { protoMessage.setOrderingKey(pubsubMessage.getOrderingKey()); } else { protoMessage.setOrderingKey(""); } incomingMessages.add( IncomingMessage.of( protoMessage.build(), timestampMsSinceEpoch, requestTimeMsSinceEpoch, ackId, recordId)); } return incomingMessages; }
@Test public void pullOneMessageWithNoData() throws IOException { String expectedSubscription = SUBSCRIPTION.getPath(); PullRequest expectedRequest = new PullRequest().setReturnImmediately(true).setMaxMessages(10); PubsubMessage expectedPubsubMessage = new PubsubMessage() .setMessageId(MESSAGE_ID) .setPublishTime(String.valueOf(PUB_TIME)) .setAttributes( ImmutableMap.of( TIMESTAMP_ATTRIBUTE, String.valueOf(MESSAGE_TIME), ID_ATTRIBUTE, RECORD_ID)); ReceivedMessage expectedReceivedMessage = new ReceivedMessage().setMessage(expectedPubsubMessage).setAckId(ACK_ID); PullResponse expectedResponse = new PullResponse().setReceivedMessages(ImmutableList.of(expectedReceivedMessage)); Mockito.when( (Object) mockPubsub .projects() .subscriptions() .pull(expectedSubscription, expectedRequest) .execute()) .thenReturn(expectedResponse); List<IncomingMessage> acutalMessages = client.pull(REQ_TIME, SUBSCRIPTION, 10, true); assertEquals(1, acutalMessages.size()); IncomingMessage actualMessage = acutalMessages.get(0); assertArrayEquals(new byte[0], actualMessage.message().getData().toByteArray()); }
@Override public void flush(ChannelHandlerContext ctx) throws Exception { if (readInProgress) { // If there is still a read in progress we are sure we will see a channelReadComplete(...) call. Thus // we only need to flush if we reach the explicitFlushAfterFlushes limit. if (++flushPendingCount == explicitFlushAfterFlushes) { flushNow(ctx); } } else if (consolidateWhenNoReadInProgress) { // Flush immediately if we reach the threshold, otherwise schedule if (++flushPendingCount == explicitFlushAfterFlushes) { flushNow(ctx); } else { scheduleFlush(ctx); } } else { // Always flush directly flushNow(ctx); } }
@Test public void testFlushViaScheduledTask() { final AtomicInteger flushCount = new AtomicInteger(); EmbeddedChannel channel = newChannel(flushCount, true); // Flushes should not go through immediately, as they're scheduled as an async task // To ensure we not run the async task directly we will call trigger the flush() via the pipeline. channel.pipeline().flush(); assertEquals(0, flushCount.get()); channel.pipeline().flush(); assertEquals(0, flushCount.get()); // Trigger the execution of the async task channel.runPendingTasks(); assertEquals(1, flushCount.get()); assertFalse(channel.finish()); }
public static String deleteXmlWhitespace(String string) { return XML_WHITESPACE.matcher(string).replaceAll(""); }
@Test public void testeDeleteXmlWhitespace() { String noWhitespace = StringUtils.deleteXmlWhitespace(" foo\nbar "); assertEquals("foobar", noWhitespace); noWhitespace = StringUtils.deleteXmlWhitespace(" \tbaz\rbarz\t "); assertEquals("bazbarz", noWhitespace); noWhitespace = StringUtils.deleteXmlWhitespace("SNAFU"); assertEquals("SNAFU", noWhitespace); }
static SpecificData getModelForSchema(Schema schema) { final Class<?> clazz; if (schema != null && (schema.getType() == Schema.Type.RECORD || schema.getType() == Schema.Type.UNION)) { clazz = SpecificData.get().getClass(schema); } else { return null; } // If clazz == null, the underlying Avro class for the schema is not on the classpath if (clazz == null) { return null; } final SpecificData model; try { final Field modelField = clazz.getDeclaredField("MODEL$"); modelField.setAccessible(true); model = (SpecificData) modelField.get(null); } catch (NoSuchFieldException e) { LOG.info(String.format("Generated Avro class %s did not contain a MODEL$ field. ", clazz) + "Parquet will use default SpecificData model for reading and writing."); return null; } catch (IllegalAccessException e) { LOG.warn( String.format("Field `MODEL$` in class %s was inaccessible. ", clazz) + "Parquet will use default SpecificData model for reading and writing.", e); return null; } final String avroVersion = getRuntimeAvroVersion(); // Avro 1.7 and 1.8 don't include conversions in the MODEL$ field by default if (avroVersion != null && (avroVersion.startsWith("1.8.") || avroVersion.startsWith("1.7."))) { try { addLogicalTypeConversion(model, schema, new HashSet<>()); } catch (IllegalAccessException e) { LOG.warn( String.format("Logical-type conversions were inaccessible for %s", clazz) + "Parquet will use default SpecificData model for reading and writing.", e); return null; } } return model; }
@Test public void testModelForGenericRecord() { SpecificData model = AvroRecordConverter.getModelForSchema(Schema.createRecord( "someSchema", "doc", "some.namespace", false, Lists.newArrayList(new Schema.Field("strField", Schema.create(Schema.Type.STRING))))); // There is no class "someSchema" on the classpath, so should return null assertNull(model); }
@Override protected int poll() throws Exception { // must reset for each poll shutdownRunningTask = null; pendingExchanges = 0; List<software.amazon.awssdk.services.sqs.model.Message> messages = pollingTask.call(); // okay we have some response from aws so lets mark the consumer as ready forceConsumerAsReady(); Queue<Exchange> exchanges = createExchanges(messages); return processBatch(CastUtils.cast(exchanges)); }
@Test void shouldRequestSingleMessageWithSingleReceiveRequest() throws Exception { // given sqsClientMock.addMessage(message("A")); sqsClientMock.addMessage(message("B")); try (var tested = createConsumer(0)) { // when var polledMessagesCount = tested.poll(); // then assertThat(polledMessagesCount).isOne(); assertThat(receiveMessageBodies()).containsExactly("A"); assertThat(sqsClientMock.getReceiveRequests()).containsExactlyInAnyOrder(expectedReceiveRequest(1)); assertThat(sqsClientMock.getQueues()).isEmpty(); } }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() == 1) { final Integer level = data.getIntValue(Data.FORMAT_UINT8, 0); if (level != null && level <= AlertLevelCallback.ALERT_HIGH) { onAlertLevelChanged(device, level); return; } } onInvalidDataReceived(device, data); }
@Test public void onAlertLevelChanged_none() { final DataReceivedCallback callback = new AlertLevelDataCallback() { @Override public void onAlertLevelChanged(@NonNull final BluetoothDevice device, final int level) { assertEquals("No alert received", AlertLevelCallback.ALERT_NONE, level); } @Override public void onInvalidDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onInvalidDataReceived(device, data); fail("Correct data reported as invalid"); } }; final Data data = AlertLevelData.noAlert(); callback.onDataReceived(null, data); assertEquals("Correct value", 0x00, AlertLevelCallback.ALERT_NONE); }
public static <K, V> Map<K, V> mapOf(K key1, V value1) { Map<K, V> result = new HashMap<>(); result.put(key1, value1); return result; }
@Test void testMapOf2() { final Map<String, String> map = mapOf( "key1", "value1", "key2", "value2" ); assertThat(map) .containsEntry("key1", "value1") .containsEntry("key2", "value2"); }
@ScalarFunction("value_at_quantile") @Description("Given an input q between [0, 1], find the value whose rank in the sorted sequence of the n values represented by the qdigest is qn.") @SqlType(StandardTypes.BIGINT) public static long valueAtQuantileBigint(@SqlType("qdigest(bigint)") Slice input, @SqlType(StandardTypes.DOUBLE) double quantile) { checkCondition(quantile >= 0 && quantile <= 1, INVALID_FUNCTION_ARGUMENT, "Quantile should be within bounds [0, 1], was: " + quantile); return new QuantileDigest(input).getQuantile(quantile); }
@Test public void testValueAtQuantileBigint() { QuantileDigest qdigest = new QuantileDigest(1); addAll(qdigest, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9); functionAssertions.assertFunction(format("value_at_quantile(CAST(X'%s' AS qdigest(bigint)), 0.5)", toHexString(qdigest)), BIGINT, 5L); }
public static JsonMapper validateJsonMapper(JsonMapper jsonMapper) { try { final String serializedJob = jsonMapper.serialize(getJobForTesting()); testTimeFields(serializedJob); testUseFieldsNotMethods(serializedJob); testUsePolymorphism(serializedJob); testCanConvertBackToJob(jsonMapper, serializedJob); return jsonMapper; } catch (Exception e) { throw new IllegalArgumentException("The JsonMapper you provided cannot be used as it deserializes jobs in an incorrect way.", e); } }
@Test void testInvalidJacksonJsonMapperPropertiesInsteadOfFields() { assertThatThrownBy(() -> validateJsonMapper(new InvalidJacksonJsonMapper(new ObjectMapper() .registerModule(new Jdk8Module()) .registerModule(new JavaTimeModule()) .setDateFormat(new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ")) )) ) .isInstanceOf(IllegalArgumentException.class) .hasMessage("The JsonMapper you provided cannot be used as it deserializes jobs in an incorrect way.") .hasRootCauseMessage("Job Serialization should use fields and not getters/setters."); }
public static Expression convert(Predicate[] predicates) { Expression expression = Expressions.alwaysTrue(); for (Predicate predicate : predicates) { Expression converted = convert(predicate); Preconditions.checkArgument( converted != null, "Cannot convert Spark predicate to Iceberg expression: %s", predicate); expression = Expressions.and(expression, converted); } return expression; }
@Test public void testNotEqualToNaN() { String col = "col"; NamedReference namedReference = FieldReference.apply(col); LiteralValue value = new LiteralValue(Float.NaN, DataTypes.FloatType); org.apache.spark.sql.connector.expressions.Expression[] attrAndValue = new org.apache.spark.sql.connector.expressions.Expression[] {namedReference, value}; org.apache.spark.sql.connector.expressions.Expression[] valueAndAttr = new org.apache.spark.sql.connector.expressions.Expression[] {value, namedReference}; Predicate notEqNaN1 = new Predicate("<>", attrAndValue); Expression expectedNotEqNaN = Expressions.notNaN(col); Expression actualNotEqNaN1 = SparkV2Filters.convert(notEqNaN1); assertThat(actualNotEqNaN1.toString()).isEqualTo(expectedNotEqNaN.toString()); Predicate notEqNaN2 = new Predicate("<>", valueAndAttr); Expression actualNotEqNaN2 = SparkV2Filters.convert(notEqNaN2); assertThat(actualNotEqNaN2.toString()).isEqualTo(expectedNotEqNaN.toString()); }
@Override public Processor<K, Change<V>, K, Change<VOut>> get() { return new KTableTransformValuesProcessor(transformerSupplier.get()); }
@Test public void shouldCalculateCorrectOldValuesIfMaterializedEvenIfStateful() { builder .table(INPUT_TOPIC, CONSUMED) .transformValues( new StatefulTransformerSupplier(), Materialized.<String, Integer, KeyValueStore<Bytes, byte[]>>as(QUERYABLE_NAME) .withKeySerde(Serdes.String()) .withValueSerde(Serdes.Integer())) .groupBy(toForceSendingOfOldValues(), Grouped.with(Serdes.String(), Serdes.Integer())) .reduce(MockReducer.INTEGER_ADDER, MockReducer.INTEGER_SUBTRACTOR) .mapValues(mapBackToStrings()) .toStream() .process(capture); driver = new TopologyTestDriver(builder.build(), props()); final TestInputTopic<String, String> inputTopic = driver.createInputTopic(INPUT_TOPIC, new StringSerializer(), new StringSerializer()); inputTopic.pipeInput("A", "ignored", 5L); inputTopic.pipeInput("A", "ignored1", 15L); inputTopic.pipeInput("A", "ignored2", 10L); assertThat(output(), equalTo(Arrays.asList(new KeyValueTimestamp<>("A", "1", 5), new KeyValueTimestamp<>("A", "2", 15), new KeyValueTimestamp<>("A", "3", 15)))); final KeyValueStore<String, Integer> keyValueStore = driver.getKeyValueStore(QUERYABLE_NAME); assertThat(keyValueStore.get("A"), is(3)); }