focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Nullable public static Method findPropertySetter( @Nonnull Class<?> clazz, @Nonnull String propertyName, @Nonnull Class<?> propertyType ) { String setterName = "set" + toUpperCase(propertyName.charAt(0)) + propertyName.substring(1); Method method; try { method = clazz.getMethod(setterName, propertyType); } catch (NoSuchMethodException e) { return null; } if (!Modifier.isPublic(method.getModifiers())) { return null; } if (Modifier.isStatic(method.getModifiers())) { return null; } Class<?> returnType = method.getReturnType(); if (returnType != void.class && returnType != Void.class && returnType != clazz) { return null; } return method; }
@Test public void when_findPropertySetter_nonExistent_then_returnsNull() { assertNull(findPropertySetter(JavaProperties.class, "nonExistentField", int.class)); }
private static void pctEncode(byte data, ByteArrayOutputStream bos) { bos.write('%'); char hex1 = Character.toUpperCase(Character.forDigit((data >> 4) & 0xF, 16)); char hex2 = Character.toUpperCase(Character.forDigit(data & 0xF, 16)); bos.write(hex1); bos.write(hex2); }
@Test void pctEncode() { String queryParameterValue = "firstName=James;lastName=Bond;location=England&Britain?"; assertThat(UriUtils.encode(queryParameterValue, UTF_8)) .isEqualToIgnoringCase( "firstName%3DJames%3BlastName%3DBond%3Blocation%3DEngland%26Britain%3F"); }
@Override public Optional<String> nodeIdToName(String nodeId) { return nodeById(nodeId) .map(jsonNode -> jsonNode.get("name").asText()); }
@Test void returnsEmptyOptionalForMissingNodeId() throws Exception { mockNodesResponse(); assertThat(this.clusterAdapter.nodeIdToName("foobar")).isEmpty(); }
public static boolean isFastStatsSame(Partition oldPart, Partition newPart) { // requires to calculate stats if new and old have different fast stats if ((oldPart != null) && oldPart.isSetParameters() && newPart != null && newPart.isSetParameters()) { for (String stat : StatsSetupConst.FAST_STATS) { if (oldPart.getParameters().containsKey(stat) && newPart.getParameters().containsKey(stat)) { Long oldStat = Long.parseLong(oldPart.getParameters().get(stat)); String newStat = newPart.getParameters().get(stat); if (newStat == null || !oldStat.equals(Long.parseLong(newStat))) { return false; } } else { return false; } } return true; } return false; }
@Test public void isFastStatsSameMatching() { Partition oldPartition = new Partition(); Partition newPartition = new Partition(); Map<String, String> stats = new HashMap<>(); Map<String, String> oldParams = new HashMap<>(); Map<String, String> newParams = new HashMap<>(); long testVal = 1; for (String key : FAST_STATS) { oldParams.put(key, String.valueOf(testVal)); newParams.put(key, String.valueOf(testVal)); } oldPartition.setParameters(oldParams); newPartition.setParameters(newParams); assertTrue(MetaStoreServerUtils.isFastStatsSame(oldPartition, newPartition)); }
public Certificate add(X509Certificate cert) { final Certificate db; try { db = Certificate.from(cert); } catch (CertificateEncodingException e) { logger.error("Encoding error in certificate", e); throw new RuntimeException("Encoding error in certificate", e); } try { // Special case for first CSCA certificate for this document type if (repository.countByDocumentType(db.getDocumentType()) == 0) { cert.verify(cert.getPublicKey()); logger.warn("Added first CSCA certificate for {}, set trusted flag manually", db.getDocumentType()); } else { verify(cert, allowAddingExpired ? cert.getNotAfter() : null); } } catch (GeneralSecurityException | VerificationException e) { logger.error( String.format("Could not verify certificate of %s issued by %s", cert.getSubjectX500Principal(), cert.getIssuerX500Principal() ), e ); throw new BadRequestException("Could not verify certificate", e); } return repository.saveAndFlush(db); }
@Test public void shouldAllowToAddCRL() throws Exception { certificateRepo.saveAndFlush(loadCertificate("rdw/02.cer", true)); service.add(readCRL("rdw/02.crl")); }
public static List<Endpoint> getConsoleServerList() { String config = SentinelConfig.getConfig(CONSOLE_SERVER); List<Endpoint> list = new ArrayList<Endpoint>(); if (StringUtil.isBlank(config)) { return list; } int pos = -1; int cur = 0; while (true) { pos = config.indexOf(',', cur); if (cur < config.length() - 1 && pos < 0) { // for single segment, pos move to the end pos = config.length(); } if (pos < 0) { break; } if (pos <= cur) { cur ++; continue; } // parsing String ipPortStr = config.substring(cur, pos); cur = pos + 1; if (StringUtil.isBlank(ipPortStr)) { continue; } ipPortStr = ipPortStr.trim(); int port = 80; Protocol protocol = Protocol.HTTP; if (ipPortStr.startsWith("http://")) { ipPortStr = ipPortStr.substring(7); } else if (ipPortStr.startsWith("https://")) { ipPortStr = ipPortStr.substring(8); port = 443; protocol = Protocol.HTTPS; } int index = ipPortStr.indexOf(":"); if (index == 0) { // skip continue; } String host = ipPortStr; if (index >= 0) { try { port = Integer.parseInt(ipPortStr.substring(index + 1)); if (port <= 1 || port >= 65535) { throw new RuntimeException("Port number [" + port + "] over range"); } } catch (Exception e) { RecordLog.warn("Parse port of dashboard server failed: " + ipPortStr, e); // skip continue; } host = ipPortStr.substring(0, index); } list.add(new Endpoint(protocol, host, port)); } return list; }
@Test public void testGetConsoleServerList() { // empty SentinelConfig.setConfig(TransportConfig.CONSOLE_SERVER, ""); List<Endpoint> list = TransportConfig.getConsoleServerList(); assertNotNull(list); assertEquals(0, list.size()); // single ip SentinelConfig.setConfig(TransportConfig.CONSOLE_SERVER, "112.13.223.3"); list = TransportConfig.getConsoleServerList(); assertNotNull(list); assertEquals(1, list.size()); assertEquals("112.13.223.3", list.get(0).getHost()); assertEquals(80, list.get(0).getPort()); // single domain SentinelConfig.setConfig(TransportConfig.CONSOLE_SERVER, "www.dashboard.org"); list = TransportConfig.getConsoleServerList(); assertNotNull(list); assertEquals(1, list.size()); assertEquals("www.dashboard.org", list.get(0).getHost()); assertEquals(80, list.get(0).getPort()); // single ip including port SentinelConfig.setConfig(TransportConfig.CONSOLE_SERVER, "www.dashboard.org:81"); list = TransportConfig.getConsoleServerList(); assertNotNull(list); assertEquals(1, list.size()); assertEquals("www.dashboard.org", list.get(0).getHost()); assertEquals(81, list.get(0).getPort()); // mixed SentinelConfig.setConfig(TransportConfig.CONSOLE_SERVER, "www.dashboard.org:81,112.13.223.3,112.13.223.4:8080,www.dashboard.org"); list = TransportConfig.getConsoleServerList(); assertNotNull(list); assertEquals(4, list.size()); assertEquals("www.dashboard.org", list.get(0).getHost()); assertEquals(81, list.get(0).getPort()); assertEquals("112.13.223.3", list.get(1).getHost()); assertEquals(80, list.get(1).getPort()); assertEquals("112.13.223.4", list.get(2).getHost()); assertEquals(8080, list.get(2).getPort()); assertEquals("www.dashboard.org", list.get(3).getHost()); assertEquals(80, list.get(3).getPort()); // malformed SentinelConfig.setConfig(TransportConfig.CONSOLE_SERVER, "www.dashboard.org:0"); list = TransportConfig.getConsoleServerList(); assertNotNull(list); assertEquals(0, list.size()); SentinelConfig.setConfig(TransportConfig.CONSOLE_SERVER, "www.dashboard.org:-1"); list = TransportConfig.getConsoleServerList(); assertNotNull(list); assertEquals(0, list.size()); SentinelConfig.setConfig(TransportConfig.CONSOLE_SERVER, ":80"); list = TransportConfig.getConsoleServerList(); assertNotNull(list); assertEquals(0, list.size()); SentinelConfig.setConfig(TransportConfig.CONSOLE_SERVER, "www.dashboard.org:"); list = TransportConfig.getConsoleServerList(); assertNotNull(list); assertEquals(0, list.size()); SentinelConfig.setConfig(TransportConfig.CONSOLE_SERVER, "www.dashboard.org:80000"); list = TransportConfig.getConsoleServerList(); assertNotNull(list); assertEquals(0, list.size()); SentinelConfig.setConfig(TransportConfig.CONSOLE_SERVER, "www.dashboard.org:80000,www.dashboard.org:81,:80"); list = TransportConfig.getConsoleServerList(); assertNotNull(list); assertEquals(1, list.size()); assertEquals("www.dashboard.org", list.get(0).getHost()); assertEquals(81, list.get(0).getPort()); SentinelConfig.setConfig(TransportConfig.CONSOLE_SERVER, "https://www.dashboard.org,http://www.dashboard.org:8080,www.dashboard.org,www.dashboard.org:8080"); list = TransportConfig.getConsoleServerList(); assertNotNull(list); assertEquals(4, list.size()); assertEquals(Protocol.HTTPS, list.get(0).getProtocol()); assertEquals(Protocol.HTTP, list.get(1).getProtocol()); assertEquals(Protocol.HTTP, list.get(2).getProtocol()); assertEquals(Protocol.HTTP, list.get(3).getProtocol()); assertEquals(443, list.get(0).getPort()); assertEquals(80, list.get(2).getPort()); }
@Override public boolean isSatisfied(int index, TradingRecord tradingRecord) { final boolean satisfied = rule1.isSatisfied(index, tradingRecord) ^ rule2.isSatisfied(index, tradingRecord); traceIsSatisfied(index, satisfied); return satisfied; }
@Test public void isSatisfied() { assertTrue(satisfiedRule.xor(BooleanRule.FALSE).isSatisfied(0)); assertTrue(BooleanRule.FALSE.xor(satisfiedRule).isSatisfied(0)); assertFalse(unsatisfiedRule.xor(BooleanRule.FALSE).isSatisfied(0)); assertFalse(BooleanRule.FALSE.xor(unsatisfiedRule).isSatisfied(0)); assertFalse(satisfiedRule.xor(BooleanRule.TRUE).isSatisfied(10)); assertFalse(BooleanRule.TRUE.xor(satisfiedRule).isSatisfied(10)); assertTrue(unsatisfiedRule.xor(BooleanRule.TRUE).isSatisfied(10)); assertTrue(BooleanRule.TRUE.xor(unsatisfiedRule).isSatisfied(10)); }
@Override public Map<String, Boolean> getGroupUuidToManaged(DbSession dbSession, Set<String> groupUuids) { return findManagedInstanceService() .map(managedInstanceService -> managedInstanceService.getGroupUuidToManaged(dbSession, groupUuids)) .orElse(returnNonManagedForAll(groupUuids)); }
@Test public void getGroupUuidToManaged_delegatesToRightService_andPropagateAnswer() { Set<String> groupUuids = Set.of("a", "b"); Map<String, Boolean> serviceResponse = Map.of("a", false, "b", true); ManagedInstanceService anotherManagedInstanceService = getManagedInstanceService(groupUuids, serviceResponse); DelegatingManagedServices managedInstanceService = new DelegatingManagedServices(Set.of(new NeverManagedInstanceService(), anotherManagedInstanceService)); Map<String, Boolean> groupUuidToManaged = managedInstanceService.getGroupUuidToManaged(dbSession, groupUuids); assertThat(groupUuidToManaged).containsExactlyInAnyOrderEntriesOf(serviceResponse); }
@Override public Snapshot getSnapshot() { return histogram.getSnapshot(); }
@Test public void returnsTheSnapshotFromTheReservoir() throws Exception { final Snapshot snapshot = mock(Snapshot.class); when(reservoir.getSnapshot()).thenReturn(snapshot); assertThat(timer.getSnapshot()) .isEqualTo(snapshot); }
@Override public long currentTimeMicroseconds() { return ((platform.nanoTime() - baseTickNanos) / 1000) + baseEpochMicros; }
@Test void relativeTimestamp_incrementsAccordingToNanoTick() { TickClock clock = new TickClock(platform, 1000L /* 1ms */, 0L /* 0ns */); when(platform.nanoTime()).thenReturn(1000L); // 1 microsecond = 1000 nanoseconds assertThat(clock.currentTimeMicroseconds()).isEqualTo(1001L); // 1ms + 1us }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() < 1) { onInvalidDataReceived(device, data); return; } int offset = 0; while (offset < data.size()) { // Packet size final int size = data.getIntValue(Data.FORMAT_UINT8, offset); if (size < 6 || offset + size > data.size()) { onInvalidDataReceived(device, data); return; } // Flags final int flags = data.getIntValue(Data.FORMAT_UINT8, offset + 1); final boolean cgmTrendInformationPresent = (flags & 0x01) != 0; final boolean cgmQualityInformationPresent = (flags & 0x02) != 0; final boolean sensorWarningOctetPresent = (flags & 0x20) != 0; final boolean sensorCalTempOctetPresent = (flags & 0x40) != 0; final boolean sensorStatusOctetPresent = (flags & 0x80) != 0; final int dataSize = 6 + (cgmTrendInformationPresent ? 2 : 0) + (cgmQualityInformationPresent ? 2 : 0) + (sensorWarningOctetPresent ? 1 : 0) + (sensorCalTempOctetPresent ? 1 : 0) + (sensorStatusOctetPresent ? 1 : 0); if (size != dataSize && size != dataSize + 2) { onInvalidDataReceived(device, data); return; } final boolean crcPresent = size == dataSize + 2; if (crcPresent) { final int expectedCrc = data.getIntValue(Data.FORMAT_UINT16_LE, offset + dataSize); final int actualCrc = CRC16.MCRF4XX(data.getValue(), offset, dataSize); if (expectedCrc != actualCrc) { onContinuousGlucoseMeasurementReceivedWithCrcError(device, data); return; } } offset += 2; // Glucose concentration final float glucoseConcentration = data.getFloatValue(Data.FORMAT_SFLOAT, offset); offset += 2; // Time offset (in minutes since Session Start) final int timeOffset = data.getIntValue(Data.FORMAT_UINT16_LE, offset); offset += 2; // Sensor Status Annunciation int warningStatus = 0; int calibrationTempStatus = 0; int sensorStatus = 0; CGMStatus status = null; if (sensorWarningOctetPresent) { warningStatus = data.getIntValue(Data.FORMAT_UINT8, offset++); } if (sensorCalTempOctetPresent) { calibrationTempStatus = data.getIntValue(Data.FORMAT_UINT8, offset++); } if (sensorStatusOctetPresent) { sensorStatus = data.getIntValue(Data.FORMAT_UINT8, offset++); } if (sensorWarningOctetPresent || sensorCalTempOctetPresent || sensorStatusOctetPresent) { status = new CGMStatus(warningStatus, calibrationTempStatus, sensorStatus); } // CGM Trend Information Float trend = null; if (cgmTrendInformationPresent) { trend = data.getFloatValue(Data.FORMAT_SFLOAT, offset); offset += 2; } // CGM Quality Information Float quality = null; if (cgmQualityInformationPresent) { quality = data.getFloatValue(Data.FORMAT_SFLOAT, offset); offset += 2; } // E2E-CRC if (crcPresent) { offset += 2; } onContinuousGlucoseMeasurementReceived(device, glucoseConcentration, trend, quality, status, timeOffset, crcPresent); } }
@Test public void onInvalidDataReceived_tooShort() { final DataReceivedCallback callback = new ContinuousGlucoseMeasurementDataCallback() { @Override public void onContinuousGlucoseMeasurementReceived(@NonNull final BluetoothDevice device, final float glucoseConcentration, @Nullable final Float cgmTrend, @Nullable final Float cgmQuality, final CGMStatus status, final int timeOffset, final boolean secured) { assertEquals("Measurement reported despite invalid data", 1, 2); } @Override public void onContinuousGlucoseMeasurementReceivedWithCrcError(@NonNull final BluetoothDevice device, @NonNull final Data data) { assertEquals("Invalid data reported as CRC error", 1, 2); } @Override public void onInvalidDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { assertEquals("Invalid data", 1, 1); } }; final MutableData data = new MutableData(new byte[5]); // Size assertTrue(data.setValue(6, Data.FORMAT_UINT8, 0)); // Flags assertTrue(data.setByte(0b11100011, 1)); // Glucose Concentration assertTrue(data.setValue(12, 1, Data.FORMAT_SFLOAT, 2)); // Time offset assertFalse(data.setValue(6, Data.FORMAT_UINT16_LE, 4)); callback.onDataReceived(null, data); }
@Override public void getFields( RowMetaInterface row, String name, RowMetaInterface[] info, StepMeta nextStep, VariableSpace space, Repository repository, IMetaStore metaStore ) throws KettleStepException { // Change all the fields to normal storage, this is the fastest way to handle lazy conversion. // It doesn't make sense to use it in the SCD context but people try it anyway // for ( ValueMetaInterface valueMeta : row.getValueMetaList() ) { valueMeta.setStorageType( ValueMetaInterface.STORAGE_TYPE_NORMAL ); // Also change the trim type to "None" as this can cause trouble // during compare of the data when there are leading/trailing spaces in the target table // valueMeta.setTrimType( ValueMetaInterface.TRIM_TYPE_NONE ); } // technical key can't be null if ( Utils.isEmpty( keyField ) ) { String message = BaseMessages.getString( PKG, "DimensionLookupMeta.Error.NoTechnicalKeySpecified" ); logError( message ); throw new KettleStepException( message ); } ValueMetaInterface v = new ValueMetaInteger( keyField ); if ( keyRename != null && keyRename.length() > 0 ) { v.setName( keyRename ); } v.setLength( 9 ); v.setPrecision( 0 ); v.setOrigin( name ); row.addValueMeta( v ); // retrieve extra fields on lookup? // Don't bother if there are no return values specified. if ( !update && fieldLookup.length > 0 ) { Database db = null; try { // Get the rows from the table... if ( databaseMeta != null ) { db = createDatabaseObject(); RowMetaInterface extraFields = getDatabaseTableFields( db, schemaName, tableName ); for ( int i = 0; i < fieldLookup.length; i++ ) { v = extraFields.searchValueMeta( fieldLookup[i] ); if ( v == null ) { String message = BaseMessages.getString( PKG, "DimensionLookupMeta.Exception.UnableToFindReturnField", fieldLookup[i] ); logError( message ); throw new KettleStepException( message ); } // If the field needs to be renamed, rename if ( fieldStream[i] != null && fieldStream[i].length() > 0 ) { v.setName( fieldStream[i] ); } v.setOrigin( name ); row.addValueMeta( v ); } } else { String message = BaseMessages.getString( PKG, "DimensionLookupMeta.Exception.UnableToRetrieveDataTypeOfReturnField" ); logError( message ); throw new KettleStepException( message ); } } catch ( Exception e ) { String message = BaseMessages.getString( PKG, "DimensionLookupMeta.Exception.UnableToRetrieveDataTypeOfReturnField2" ); logError( message ); throw new KettleStepException( message, e ); } finally { if ( db != null ) { db.disconnect(); } } } }
@Test public void testGetFields() throws Exception { RowMeta extraFields = new RowMeta(); extraFields.addValueMeta( new ValueMetaString( "field1" ) ); DatabaseMeta dbMeta = mock( DatabaseMeta.class ); DimensionLookupMeta meta = spy( new DimensionLookupMeta() ); meta.setUpdate( false ); meta.setKeyField( null ); meta.setFieldLookup( new String[] { "field1" } ); meta.setFieldStream( new String[] { "" } ); meta.setDatabaseMeta( dbMeta ); doReturn( extraFields ).when( meta ).getDatabaseTableFields( any(), anyString(), anyString() ); doReturn( mock( LogChannelInterface.class ) ).when( meta ).getLog(); RowMeta row = new RowMeta(); try { meta.getFields( row, "DimensionLookupMetaTest", new RowMeta[] { row }, null, null, null, null ); } catch ( Throwable e ) { Assert.assertTrue( e.getMessage().contains( BaseMessages.getString( DimensionLookupMeta.class, "DimensionLookupMeta.Error.NoTechnicalKeySpecified" ) ) ); } }
public static Path getStagingDir(Cluster cluster, Configuration conf) throws IOException, InterruptedException { UserGroupInformation user = UserGroupInformation.getLoginUser(); return getStagingDir(cluster, conf, user); }
@Test public void testGetStagingDirWhenShortFileOwnerNameAndShortUserName() throws IOException, InterruptedException { Cluster cluster = mock(Cluster.class); Configuration conf = new Configuration(); String stagingDirOwner = USER_1_SHORT_NAME; Path stagingPath = mock(Path.class); UserGroupInformation user = UserGroupInformation .createUserForTesting(USER_1_SHORT_NAME, GROUP_NAMES); assertEquals(USER_1_SHORT_NAME, user.getUserName()); FileSystem fs = new FileSystemTestHelper.MockFileSystem(); FileStatus fileStatus = new FileStatus(1, true, 1, 1, 100L, 100L, FsPermission.getDefault(), stagingDirOwner, stagingDirOwner, stagingPath); when(stagingPath.getFileSystem(conf)).thenReturn(fs); when(fs.getFileStatus(stagingPath)).thenReturn(fileStatus); when(cluster.getStagingAreaDir()).thenReturn(stagingPath); assertEquals(stagingPath, JobSubmissionFiles.getStagingDir(cluster, conf, user)); }
PolarisRouterContext buildRouterContext(HttpHeaders headers) { Collection<String> labelHeaderValues = headers.get(RouterConstant.ROUTER_LABEL_HEADER); if (CollectionUtils.isEmpty(labelHeaderValues)) { labelHeaderValues = new ArrayList<>(); } PolarisRouterContext routerContext = new PolarisRouterContext(); Map<String, String> labelHeaderValuesMap = new HashMap<>(); try { Optional<String> labelHeaderValuesOptional = labelHeaderValues.stream().findFirst(); if (labelHeaderValuesOptional.isPresent()) { String labelHeaderValuesContent = labelHeaderValuesOptional.get(); labelHeaderValuesMap.putAll( JacksonUtils.deserialize2Map(URLDecoder.decode(labelHeaderValuesContent, UTF_8))); } } catch (UnsupportedEncodingException e) { throw new RuntimeException("unsupported charset exception " + UTF_8); } routerContext.putLabels(RouterConstant.ROUTER_LABELS, labelHeaderValuesMap); return routerContext; }
@Test public void buildRouterContext() { PolarisRouterServiceInstanceListSupplier polarisSupplier = new PolarisRouterServiceInstanceListSupplier( delegate, routerAPI, requestInterceptors, null, new PolarisInstanceTransformer()); HttpHeaders headers = new HttpHeaders(); PolarisRouterContext context = polarisSupplier.buildRouterContext(headers); assertThat(context).isNotNull(); // mock try (MockedStatic<ApplicationContextAwareUtils> mockedApplicationContextAwareUtils = Mockito.mockStatic(ApplicationContextAwareUtils.class)) { mockedApplicationContextAwareUtils.when(() -> ApplicationContextAwareUtils.getProperties(anyString())) .thenReturn("mock-value"); MetadataContextHolder.set(new MetadataContext()); headers = new HttpHeaders(); headers.add(RouterConstant.ROUTER_LABEL_HEADER, "{\"k1\":\"v1\"}"); PolarisRouterContext routerContext = polarisSupplier.buildRouterContext(headers); assertThat(routerContext.getLabel("k1")).isEqualTo("v1"); } }
@DELETE @Path("/{connector}/offsets") @Operation(summary = "Reset the offsets for the specified connector") public Response resetConnectorOffsets(final @Parameter(hidden = true) @QueryParam("forward") Boolean forward, final @Context HttpHeaders headers, final @PathParam("connector") String connector) throws Throwable { FutureCallback<Message> cb = new FutureCallback<>(); herder.resetConnectorOffsets(connector, cb); Message msg = requestHandler.completeOrForwardRequest(cb, "/connectors/" + connector + "/offsets", "DELETE", headers, null, new TypeReference<Message>() { }, new IdentityTranslator<>(), forward); return Response.ok().entity(msg).build(); }
@Test public void testResetOffsetsConnectorNotFound() { final ArgumentCaptor<Callback<Message>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackException(cb, new NotFoundException("Connector not found")) .when(herder).resetConnectorOffsets(eq(CONNECTOR_NAME), cb.capture()); assertThrows(NotFoundException.class, () -> connectorsResource.resetConnectorOffsets(null, NULL_HEADERS, CONNECTOR_NAME)); }
@CheckForNull public String get() { // branches will be empty in CE if (branchConfiguration.isPullRequest() || branches.isEmpty()) { return null; } return Optional.ofNullable(getFromProperties()).orElseGet(this::loadWs); }
@Test public void getFromProperties_throws_ISE_if_reference_is_the_same_as_branch() { when(branchConfiguration.branchType()).thenReturn(BranchType.BRANCH); when(branchConfiguration.branchName()).thenReturn(BRANCH_KEY); when(configuration.get("sonar.newCode.referenceBranch")).thenReturn(Optional.of(BRANCH_KEY)); assertThatThrownBy(referenceBranchSupplier::getFromProperties).isInstanceOf(IllegalStateException.class); }
@Override public void verify(String value) { checkNotNull(value); if (!"false".equalsIgnoreCase(value) && !"true".equalsIgnoreCase(value)) { throw new RuntimeException("boolean attribute format is wrong."); } }
@Test public void testVerify_InvalidValue_ExceptionThrown() { assertThrows(RuntimeException.class, () -> booleanAttribute.verify("invalid")); assertThrows(RuntimeException.class, () -> booleanAttribute.verify("1")); assertThrows(RuntimeException.class, () -> booleanAttribute.verify("0")); assertThrows(RuntimeException.class, () -> booleanAttribute.verify("")); }
public synchronized TopologyDescription describe() { return internalTopologyBuilder.describe(); }
@Test public void topologyWithDynamicRoutingShouldDescribeExtractorClass() { final StreamsBuilder builder = new StreamsBuilder(); final TopicNameExtractor<Object, Object> topicNameExtractor = new TopicNameExtractor<Object, Object>() { @Override public String extract(final Object key, final Object value, final RecordContext recordContext) { return recordContext.topic() + "-" + key; } @Override public String toString() { return "anonymous topic name extractor. topic is [recordContext.topic()]-[key]"; } }; builder.stream("input-topic").to(topicNameExtractor); final TopologyDescription describe = builder.build().describe(); assertEquals( "Topologies:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" + " --> KSTREAM-SINK-0000000001\n" + " Sink: KSTREAM-SINK-0000000001 (extractor class: anonymous topic name extractor. topic is [recordContext.topic()]-[key])\n" + " <-- KSTREAM-SOURCE-0000000000\n\n", describe.toString()); }
@SuppressWarnings("unchecked") public static String encode(Type parameter) { if (parameter instanceof NumericType) { return encodeNumeric(((NumericType) parameter)); } else if (parameter instanceof Address) { return encodeAddress((Address) parameter); } else if (parameter instanceof Bool) { return encodeBool((Bool) parameter); } else if (parameter instanceof Bytes) { return encodeBytes((Bytes) parameter); } else if (parameter instanceof DynamicBytes) { return encodeDynamicBytes((DynamicBytes) parameter); } else if (parameter instanceof Utf8String) { return encodeString((Utf8String) parameter); } else if (parameter instanceof StaticArray) { if (DynamicStruct.class.isAssignableFrom( ((StaticArray) parameter).getComponentType())) { return encodeStaticArrayWithDynamicStruct((StaticArray) parameter); } else { return encodeArrayValues((StaticArray) parameter); } } else if (parameter instanceof DynamicStruct) { return encodeDynamicStruct((DynamicStruct) parameter); } else if (parameter instanceof DynamicArray) { return encodeDynamicArray((DynamicArray) parameter); } else if (parameter instanceof PrimitiveType) { return encode(((PrimitiveType) parameter).toSolidityType()); } else { throw new UnsupportedOperationException( "Type cannot be encoded: " + parameter.getClass()); } }
@Test public void testPrimitiveInt() { assertEquals( encode(new org.web3j.abi.datatypes.primitive.Int(0)), ("0000000000000000000000000000000000000000000000000000000000000000")); assertEquals( encode(new org.web3j.abi.datatypes.primitive.Int(Integer.MIN_VALUE)), ("ffffffffffffffffffffffffffffffffffffffffffffffffffffffff80000000")); assertEquals( encode(new org.web3j.abi.datatypes.primitive.Int(Integer.MAX_VALUE)), ("000000000000000000000000000000000000000000000000000000007fffffff")); }
@VisibleForTesting protected long getRollInterval() { String rollInterval = properties.getString(ROLL_INTERVAL_KEY, DEFAULT_ROLL_INTERVAL); Pattern pattern = Pattern.compile("^\\s*(\\d+)\\s*([A-Za-z]*)\\s*$"); Matcher match = pattern.matcher(rollInterval); long millis; if (match.matches()) { String flushUnit = match.group(2); int rollIntervalInt; try { rollIntervalInt = Integer.parseInt(match.group(1)); } catch (NumberFormatException ex) { throw new MetricsException("Unrecognized flush interval: " + rollInterval + ". Must be a number followed by an optional " + "unit. The unit must be one of: minute, hour, day", ex); } if ("".equals(flushUnit)) { millis = TimeUnit.HOURS.toMillis(rollIntervalInt); } else { switch (flushUnit.toLowerCase()) { case "m": case "min": case "minute": case "minutes": millis = TimeUnit.MINUTES.toMillis(rollIntervalInt); break; case "h": case "hr": case "hour": case "hours": millis = TimeUnit.HOURS.toMillis(rollIntervalInt); break; case "d": case "day": case "days": millis = TimeUnit.DAYS.toMillis(rollIntervalInt); break; default: throw new MetricsException("Unrecognized unit for flush interval: " + flushUnit + ". Must be one of: minute, hour, day"); } } } else { throw new MetricsException("Unrecognized flush interval: " + rollInterval + ". Must be a number followed by an optional unit." + " The unit must be one of: minute, hour, day"); } if (millis < 60000) { throw new MetricsException("The flush interval property must be " + "at least 1 minute. Value was " + rollInterval); } return millis; }
@Test public void testGetRollInterval() { doTestGetRollInterval(1, new String[] {"m", "min", "minute", "minutes"}, 60 * 1000L); doTestGetRollInterval(1, new String[] {"h", "hr", "hour", "hours"}, 60 * 60 * 1000L); doTestGetRollInterval(1, new String[] {"d", "day", "days"}, 24 * 60 * 60 * 1000L); ConfigBuilder builder = new ConfigBuilder(); SubsetConfiguration conf = builder.add("sink.roll-interval", "1").subset("sink"); // We can reuse the same sink evry time because we're setting the same // property every time. RollingFileSystemSink sink = new RollingFileSystemSink(); sink.init(conf); assertEquals(3600000L, sink.getRollInterval()); for (char c : "abcefgijklnopqrtuvwxyz".toCharArray()) { builder = new ConfigBuilder(); conf = builder.add("sink.roll-interval", "90 " + c).subset("sink"); try { sink.init(conf); sink.getRollInterval(); fail("Allowed flush interval with bad units: " + c); } catch (MetricsException ex) { // Expected } } }
public static String deleteWhitespace(String str) { if (isEmpty(str)) { return str; } int sz = str.length(); char[] chs = new char[sz]; int count = 0; for (int i = 0; i < sz; i++) { if (!Character.isWhitespace(str.charAt(i))) { chs[count++] = str.charAt(i); } } if (count == sz) { return str; } return new String(chs, 0, count); }
@Test void testDeleteWhitespace() { assertNull(StringUtils.deleteWhitespace(null)); assertEquals(StringUtils.EMPTY, StringUtils.deleteWhitespace("")); assertEquals("abc", StringUtils.deleteWhitespace("abc")); assertEquals("abc", StringUtils.deleteWhitespace(" ab c ")); }
@Override public int actionFlex(String appName, Map<String, String> componentCounts) throws IOException, YarnException { int result = EXIT_SUCCESS; try { Service service = new Service(); service.setName(appName); service.setState(ServiceState.FLEX); for (Map.Entry<String, String> entry : componentCounts.entrySet()) { Component component = new Component(); component.setName(entry.getKey()); Long numberOfContainers = Long.parseLong(entry.getValue()); component.setNumberOfContainers(numberOfContainers); service.addComponent(component); } String buffer = jsonSerDeser.toJson(service); ClientResponse response = getApiClient(getServicePath(appName)) .put(ClientResponse.class, buffer); result = processResponse(response); } catch (Exception e) { LOG.error("Fail to flex application: ", e); result = EXIT_EXCEPTION_THROWN; } return result; }
@Test void testBadFlex() { String appName = "unknown_app"; HashMap<String, String> componentCounts = new HashMap<String, String>(); try { int result = badAsc.actionFlex(appName, componentCounts); assertEquals(EXIT_EXCEPTION_THROWN, result); } catch (IOException | YarnException e) { fail(); } }
public static void getSemanticPropsSingleFromString( SingleInputSemanticProperties result, String[] forwarded, String[] nonForwarded, String[] readSet, TypeInformation<?> inType, TypeInformation<?> outType) { getSemanticPropsSingleFromString( result, forwarded, nonForwarded, readSet, inType, outType, false); }
@Test void testForwardedNonForwardedCheck() { String[] forwarded = {"1"}; String[] nonForwarded = {"1"}; assertThatThrownBy( () -> SemanticPropUtil.getSemanticPropsSingleFromString( new SingleInputSemanticProperties(), forwarded, nonForwarded, null, threeIntTupleType, threeIntTupleType)) .isInstanceOf(InvalidSemanticAnnotationException.class); }
@Override public void validate(Context context) { if (context.deployState().endpointCertificateSecrets().isPresent() && context.deployState().endpointCertificateSecrets().get().isMissing()) { throw new CertificateNotReadyException("TLS enabled, but could not yet retrieve certificate version %s for application %s" .formatted(context.deployState().endpointCertificateSecrets().get().version(), context.deployState().getProperties().applicationId().serializedForm())); } }
@Test void validation_succeeds_without_certificate() throws Exception { DeployState deployState = deployState(servicesXml(), deploymentXml(), Optional.empty()); VespaModel model = new VespaModel(new NullConfigModelRegistry(), deployState); ValidationTester.validate(new EndpointCertificateSecretsValidator(), model, deployState); }
public SchemaMapping fromParquet(MessageType parquetSchema) { List<Type> fields = parquetSchema.getFields(); List<TypeMapping> mappings = fromParquet(fields); List<Field> arrowFields = fields(mappings); return new SchemaMapping(new Schema(arrowFields), parquetSchema, mappings); }
@Test public void testParquetInt64TimeMicrosToArrow() { MessageType parquet = Types.buildMessage() .addField(Types.optional(INT64) .as(LogicalTypeAnnotation.timeType(false, MICROS)) .named("a")) .named("root"); Schema expected = new Schema(asList(field("a", new ArrowType.Time(TimeUnit.MICROSECOND, 64)))); Assert.assertEquals(expected, converter.fromParquet(parquet).getArrowSchema()); }
public static <T> PrefetchableIterator<T> emptyIterator() { return (PrefetchableIterator<T>) EMPTY_ITERATOR; }
@Test public void testEmpty() { verifyIterator(PrefetchableIterators.emptyIterator()); verifyIsAlwaysReady(PrefetchableIterators.emptyIterator()); }
public boolean execute(final File clusterDir) { if (!clusterDir.exists() || !clusterDir.isDirectory()) { throw new IllegalArgumentException("invalid cluster directory: " + clusterDir.getAbsolutePath()); } final RecordingLog.Entry entry = ClusterTool.findLatestValidSnapshot(clusterDir); if (null == entry) { throw new ClusterException("no valid snapshot found"); } final long recordingId = entry.recordingId; final ClusterNodeControlProperties properties = ClusterTool.loadControlProperties(clusterDir); final RecordingSignalCapture recordingSignalCapture = new RecordingSignalCapture(); try (Aeron aeron = Aeron.connect(new Aeron.Context().aeronDirectoryName(properties.aeronDirectoryName)); AeronArchive archive = AeronArchive.connect(new AeronArchive.Context() .controlRequestChannel(archiveLocalRequestChannel) .controlRequestStreamId(archiveLocalRequestStreamId) .controlResponseChannel(IPC_CHANNEL) .recordingSignalConsumer(recordingSignalCapture) .aeron(aeron))) { final SnapshotReader snapshotReader = new SnapshotReader(); replayLocalSnapshotRecording(aeron, archive, recordingId, snapshotReader); final long targetNextServiceSessionId = max( max(snapshotReader.nextServiceSessionId, snapshotReader.maxClusterSessionId + 1), snapshotReader.logServiceSessionId + 1 + snapshotReader.pendingServiceMessageCount); final long targetLogServiceSessionId = targetNextServiceSessionId - 1 - snapshotReader.pendingServiceMessageCount; if (targetNextServiceSessionId != snapshotReader.nextServiceSessionId || targetLogServiceSessionId != snapshotReader.logServiceSessionId || 0 != snapshotReader.pendingServiceMessageCount && (targetLogServiceSessionId + 1 != snapshotReader.minClusterSessionId || targetNextServiceSessionId - 1 != snapshotReader.maxClusterSessionId)) { final long tempRecordingId = createNewSnapshotRecording( aeron, archive, recordingId, targetLogServiceSessionId, targetNextServiceSessionId); final long stopPosition = awaitRecordingStopPosition(archive, recordingId); final long newStopPosition = awaitRecordingStopPosition(archive, tempRecordingId); if (stopPosition != newStopPosition) { throw new ClusterException("new snapshot recording incomplete: expectedStopPosition=" + stopPosition + ", actualStopPosition=" + newStopPosition); } recordingSignalCapture.reset(); archive.truncateRecording(recordingId, 0); recordingSignalCapture.awaitSignalForRecordingId(archive, recordingId, RecordingSignal.DELETE); final long replicationId = archive.replicate( tempRecordingId, recordingId, archive.context().controlRequestStreamId(), IPC_CHANNEL, null); recordingSignalCapture.reset(); recordingSignalCapture.awaitSignalForCorrelationId(archive, replicationId, RecordingSignal.SYNC); final long replicatedStopPosition = recordingSignalCapture.position(); if (stopPosition != replicatedStopPosition) { throw new ClusterException("incomplete replication of the new recording: expectedStopPosition=" + stopPosition + ", replicatedStopPosition=" + replicatedStopPosition); } recordingSignalCapture.reset(); archive.purgeRecording(tempRecordingId); recordingSignalCapture.awaitSignalForRecordingId(archive, tempRecordingId, RecordingSignal.DELETE); return true; } } return false; }
@Test @SlowTest @InterruptAfter(60) void executeIsANoOpIfTheSnapshotIsValid() { final IntFunction<TestNode.TestService[]> servicesSupplier = (i) -> new TestNode.TestService[] { new TestNode.MessageTrackingService(1, i), new TestNode.MessageTrackingService(2, i) }; final TestCluster cluster = aCluster() .withStaticNodes(3) .withTimerServiceSupplier(new PriorityHeapTimerServiceSupplier()) .withServiceSupplier(servicesSupplier) .start(); systemTestWatcher.cluster(cluster); final int serviceCount = cluster.node(0).services().length; final TestNode leader = cluster.awaitLeader(); cluster.connectClient(); TestNode.MessageTrackingService.delaySessionMessageProcessing(true); int messageCount = 0; final ExpandableArrayBuffer msgBuffer = cluster.msgBuffer(); for (int i = 0; i < NUM_MESSAGES; i++) { msgBuffer.putInt(0, ++messageCount, LITTLE_ENDIAN); cluster.pollUntilMessageSent(SIZE_OF_INT); } cluster.takeSnapshot(leader); cluster.awaitSnapshotCount(1); TestNode.MessageTrackingService.delaySessionMessageProcessing(false); cluster.awaitResponseMessageCount(messageCount * serviceCount); awaitServiceMessages(cluster, serviceCount, messageCount); stopConsensusModulesAndServices(cluster); final File leaderClusterDir = leader.consensusModule().context().clusterDir(); final RecordingLog.Entry leaderSnapshot = ClusterTool.findLatestValidSnapshot(leaderClusterDir); assertNotNull(leaderSnapshot); final MutableLong mutableNextSessionId = new MutableLong(NULL_SESSION_ID); final MutableLong mutableNextServiceSessionId = new MutableLong(NULL_SESSION_ID); final MutableLong mutableLogServiceSessionId = new MutableLong(NULL_SESSION_ID); final LongArrayList pendingMessageClusterSessionIds = new LongArrayList(); final ConsensusModuleSnapshotListener stateReader = new NoOpConsensusModuleSnapshotListener() { public void onLoadConsensusModuleState( final long nextSessionId, final long nextServiceSessionId, final long logServiceSessionId, final int pendingMessageCapacity, final DirectBuffer buffer, final int offset, final int length) { mutableNextSessionId.set(nextSessionId); mutableNextServiceSessionId.set(nextServiceSessionId); mutableLogServiceSessionId.set(logServiceSessionId); } public void onLoadPendingMessage( final long clusterSessionId, final DirectBuffer buffer, final int offset, final int length) { pendingMessageClusterSessionIds.add(clusterSessionId); } }; readSnapshotRecording(leader, leaderSnapshot.recordingId, stateReader); final long beforeNextSessionId = mutableNextSessionId.get(); final long beforeNextServiceSessionId = mutableNextServiceSessionId.get(); final long beforeLogServiceSessionId = mutableLogServiceSessionId.get(); final long[] beforeClusterSessionIds = pendingMessageClusterSessionIds.toLongArray(); assertNotEquals(NULL_SESSION_ID, beforeNextSessionId); assertNotEquals(NULL_SESSION_ID, beforeNextServiceSessionId); assertNotEquals(NULL_SESSION_ID, beforeLogServiceSessionId); assertNotEquals(beforeNextSessionId, beforeNextServiceSessionId); assertNotEquals(beforeNextSessionId, beforeLogServiceSessionId); assertNotEquals(beforeNextServiceSessionId, beforeLogServiceSessionId); assertNotEquals(0, beforeClusterSessionIds.length); final ConsensusModuleSnapshotPendingServiceMessagesPatch snapshotPatch = new ConsensusModuleSnapshotPendingServiceMessagesPatch( leader.archive().context().localControlChannel(), leader.archive().context().localControlStreamId()); assertFalse(snapshotPatch.execute(leaderClusterDir)); mutableNextSessionId.set(NULL_SESSION_ID); mutableNextServiceSessionId.set(NULL_SESSION_ID); mutableLogServiceSessionId.set(NULL_SESSION_ID); pendingMessageClusterSessionIds.clear(); readSnapshotRecording(leader, leaderSnapshot.recordingId, stateReader); assertEquals(beforeNextSessionId, mutableNextSessionId.get()); assertEquals(beforeNextServiceSessionId, mutableNextServiceSessionId.get()); assertEquals(beforeLogServiceSessionId, mutableLogServiceSessionId.get()); assertArrayEquals(beforeClusterSessionIds, pendingMessageClusterSessionIds.toLongArray()); }
@Override protected void crawlAccounts(final Flux<Account> accounts) { final int maxConcurrency = getNamespace().getInt(MAX_CONCURRENCY_ARGUMENT); final boolean dryRun = getNamespace().getBoolean(DRY_RUN_ARGUMENT); final PushNotificationExperiment<T> experiment = experimentFactory.buildExperiment(getCommandDependencies(), getConfiguration()); final PushNotificationExperimentSamples pushNotificationExperimentSamples = getCommandDependencies().pushNotificationExperimentSamples(); log.info("Starting \"{}\" with max concurrency: {}", experiment.getExperimentName(), maxConcurrency); accounts .flatMap(account -> Flux.fromIterable(account.getDevices()).map(device -> Tuples.of(account, device))) .doOnNext(ignored -> DEVICE_INSPECTED_COUNTER.increment()) .flatMap(accountAndDevice -> Mono.fromFuture(() -> experiment.isDeviceEligible(accountAndDevice.getT1(), accountAndDevice.getT2())) .mapNotNull(eligible -> eligible ? accountAndDevice : null), maxConcurrency) .flatMap(accountAndDevice -> { final UUID accountIdentifier = accountAndDevice.getT1().getIdentifier(IdentityType.ACI); final byte deviceId = accountAndDevice.getT2().getId(); final Mono<Boolean> recordInitialSampleMono = dryRun ? Mono.just(true) : Mono.fromFuture(() -> { try { return pushNotificationExperimentSamples.recordInitialState( accountIdentifier, deviceId, experiment.getExperimentName(), isInExperimentGroup(accountIdentifier, deviceId, experiment.getExperimentName()), experiment.getState(accountAndDevice.getT1(), accountAndDevice.getT2())); } catch (final JsonProcessingException e) { throw new UncheckedIOException(e); } }) .retryWhen(Retry.backoff(3, Duration.ofSeconds(1)) .onRetryExhaustedThrow(((backoffSpec, retrySignal) -> retrySignal.failure()))); return recordInitialSampleMono.mapNotNull(stateStored -> { Metrics.counter(RECORD_INITIAL_SAMPLE_COUNTER_NAME, DRY_RUN_TAG_NAME, String.valueOf(dryRun), "initialSampleAlreadyExists", String.valueOf(!stateStored)) .increment(); return stateStored ? accountAndDevice : null; }) .onErrorResume(throwable -> { log.warn("Failed to record initial sample for {}:{} in experiment {}", accountIdentifier, deviceId, experiment.getExperimentName(), throwable); return Mono.empty(); }); }, maxConcurrency) .flatMap(accountAndDevice -> { final Account account = accountAndDevice.getT1(); final Device device = accountAndDevice.getT2(); final boolean inExperimentGroup = isInExperimentGroup(account.getIdentifier(IdentityType.ACI), device.getId(), experiment.getExperimentName()); final Mono<Void> applyTreatmentMono = dryRun ? Mono.empty() : Mono.fromFuture(() -> inExperimentGroup ? experiment.applyExperimentTreatment(account, device) : experiment.applyControlTreatment(account, device)) .onErrorResume(throwable -> { log.warn("Failed to apply {} treatment for {}:{} in experiment {}", inExperimentGroup ? "experimental" : " control", account.getIdentifier(IdentityType.ACI), device.getId(), experiment.getExperimentName(), throwable); return Mono.empty(); }); return applyTreatmentMono .doOnSuccess(ignored -> Metrics.counter(APPLY_TREATMENT_COUNTER_NAME, DRY_RUN_TAG_NAME, String.valueOf(dryRun), "treatment", inExperimentGroup ? "experiment" : "control").increment()); }, maxConcurrency) .then() .block(); }
@Test void crawlAccountsExperimentException() { final Device device = mock(Device.class); when(device.getId()).thenReturn(DEVICE_ID); final Account account = mock(Account.class); when(account.getIdentifier(IdentityType.ACI)).thenReturn(ACCOUNT_IDENTIFIER); when(account.getDevices()).thenReturn(List.of(device)); when(experiment.applyExperimentTreatment(account, device)) .thenReturn(CompletableFuture.failedFuture(new RuntimeException())); assertDoesNotThrow(() -> startPushNotificationExperimentCommand.crawlAccounts(Flux.just(account))); verify(experiment).applyExperimentTreatment(account, device); }
public GenericSQLRewriteResult rewrite(final SQLRewriteContext sqlRewriteContext, final QueryContext queryContext) { DatabaseType protocolType = database.getProtocolType(); Map<String, StorageUnit> storageUnits = database.getResourceMetaData().getStorageUnits(); DatabaseType storageType = storageUnits.isEmpty() ? protocolType : storageUnits.values().iterator().next().getStorageType(); SQLTranslatorContext sqlTranslatorContext = translatorRule.translate(new DefaultSQLBuilder(sqlRewriteContext.getSql(), sqlRewriteContext.getSqlTokens()).toSQL(), sqlRewriteContext.getParameterBuilder().getParameters(), queryContext, storageType, database, globalRuleMetaData); return new GenericSQLRewriteResult(new SQLRewriteUnit(sqlTranslatorContext.getSql(), sqlTranslatorContext.getParameters())); }
@Test void assertRewriteStorageTypeIsEmpty() { SQLTranslatorRule rule = new SQLTranslatorRule(new DefaultSQLTranslatorRuleConfigurationBuilder().build()); ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getName()).thenReturn(DefaultDatabase.LOGIC_NAME); when(database.getSchemas()).thenReturn(Collections.singletonMap("test", mock(ShardingSphereSchema.class))); when(database.getResourceMetaData().getStorageUnits()).thenReturn(Collections.emptyMap()); CommonSQLStatementContext sqlStatementContext = mock(CommonSQLStatementContext.class); DatabaseType databaseType = mock(DatabaseType.class); when(sqlStatementContext.getDatabaseType()).thenReturn(databaseType); QueryContext queryContext = mock(QueryContext.class, RETURNS_DEEP_STUBS); when(queryContext.getSqlStatementContext()).thenReturn(sqlStatementContext); GenericSQLRewriteResult actual = new GenericSQLRewriteEngine(rule, database, mock(RuleMetaData.class)) .rewrite(new SQLRewriteContext(database, sqlStatementContext, "SELECT 1", Collections.emptyList(), mock(ConnectionContext.class), new HintValueContext()), queryContext); assertThat(actual.getSqlRewriteUnit().getSql(), is("SELECT 1")); assertThat(actual.getSqlRewriteUnit().getParameters(), is(Collections.emptyList())); }
@Transactional public MeetingConfirmResponse create(String uuid, long attendeeId, MeetingConfirmRequest request) { LocalDateTime startDateTime = request.toStartDateTime(); LocalDateTime endDateTime = request.toEndDateTime(); Meeting meeting = meetingRepository.findByUuid(uuid) .orElseThrow(() -> new MomoException(MeetingErrorCode.INVALID_UUID)); Attendee attendee = attendeeRepository.findByIdAndMeeting(attendeeId, meeting) .orElseThrow(() -> new MomoException(AttendeeErrorCode.INVALID_ATTENDEE)); validateHostPermission(attendee); validateNotAlreadyConfirmed(meeting); validateMeetingLocked(meeting); validateTimeRange(meeting, startDateTime, endDateTime); validateDateRange(meeting, startDateTime, endDateTime); ConfirmedMeeting confirmedMeeting = new ConfirmedMeeting(meeting, startDateTime, endDateTime); confirmedMeetingRepository.save(confirmedMeeting); return MeetingConfirmResponse.from(confirmedMeeting); }
@DisplayName("주최자가 잠겨있는 약속 일정을 확정할 때, UUID가 유효하지 않으면 예외가 발생한다.") @Test void confirmScheduleThrowsExceptionWhen_InvalidMeeting() { String invalidUuid = "invalidUuid"; assertThatThrownBy(() -> meetingConfirmService.create(invalidUuid, attendee.getId(), validRequest)) .isInstanceOf(MomoException.class) .hasMessage(MeetingErrorCode.INVALID_UUID.message()); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { return this.list(directory, listener, new HostPreferences(session.getHost()).getInteger("box.listing.chunksize")); }
@Test public void testListRoot() throws Exception { final BoxFileidProvider fileid = new BoxFileidProvider(session); final Path directory = new Path("/", EnumSet.of(AbstractPath.Type.directory, Path.Type.volume)); final AttributedList<Path> list = new BoxListService(session, fileid).list( directory, new DisabledListProgressListener()); assertFalse(list.isEmpty()); for(Path f : list) { assertEquals(f.attributes(), new BoxAttributesFinderFeature(session, fileid).find(f, new DisabledListProgressListener())); assertSame(directory, f.getParent()); assertNotEquals(TransferStatus.UNKNOWN_LENGTH, f.attributes().getSize()); assertNotNull(f.attributes().getFileId()); if(f.isFile()) { assertNotEquals(Checksum.NONE, f.attributes().getChecksum()); } assertTrue(f.attributes().getModificationDate() > 0); assertTrue(f.attributes().getCreationDate() > 0); } }
static boolean isStale(OmemoDevice userDevice, OmemoDevice subject, Date lastReceipt, int maxAgeHours) { if (userDevice.equals(subject)) { return false; } if (lastReceipt == null) { return false; } long maxAgeMillis = MILLIS_PER_HOUR * maxAgeHours; Date now = new Date(); return now.getTime() - lastReceipt.getTime() > maxAgeMillis; }
@Test public void isStaleDeviceTest() throws XmppStringprepException { OmemoDevice user = new OmemoDevice(JidCreate.bareFrom("alice@wonderland.lit"), 123); OmemoDevice other = new OmemoDevice(JidCreate.bareFrom("bob@builder.tv"), 444); Date now = new Date(); Date deleteMe = new Date(now.getTime() - ((DELETE_STALE + 1) * ONE_HOUR)); // Devices one hour "older" than max ages are stale assertTrue(OmemoService.isStale(user, other, deleteMe, DELETE_STALE)); // Own device is never stale, no matter how old assertFalse(OmemoService.isStale(user, user, deleteMe, DELETE_STALE)); // Always return false if date is null. assertFalse(OmemoService.isStale(user, other, null, DELETE_STALE)); }
public static void notEmpty(Collection<?> collection, String message) { if (CollectionUtil.isEmpty(collection)) { throw new IllegalArgumentException(message); } }
@Test(expected = IllegalArgumentException.class) public void assertNotEmptyByStringAndMessageIsNull() { Assert.notEmpty(""); }
@Override public int run(String[] args) throws Exception { YarnConfiguration yarnConf = getConf() == null ? new YarnConfiguration() : new YarnConfiguration( getConf()); boolean isHAEnabled = yarnConf.getBoolean(YarnConfiguration.RM_HA_ENABLED, YarnConfiguration.DEFAULT_RM_HA_ENABLED); if (args.length < 1) { printUsage("", isHAEnabled); return -1; } int exitCode = -1; int i = 0; String cmd = args[i++]; exitCode = 0; if ("-help".equals(cmd)) { if (i < args.length) { printUsage(args[i], isHAEnabled); } else { printHelp("", isHAEnabled); } return exitCode; } if (USAGE.containsKey(cmd)) { if (isHAEnabled) { return super.run(args); } System.out.println("Cannot run " + cmd + " when ResourceManager HA is not enabled"); return -1; } // // verify that we have enough command line parameters // String subClusterId = StringUtils.EMPTY; if ("-refreshAdminAcls".equals(cmd) || "-refreshQueues".equals(cmd) || "-refreshNodesResources".equals(cmd) || "-refreshServiceAcl".equals(cmd) || "-refreshUserToGroupsMappings".equals(cmd) || "-refreshSuperUserGroupsConfiguration".equals(cmd) || "-refreshClusterMaxPriority".equals(cmd)) { subClusterId = parseSubClusterId(args, isHAEnabled); // If we enable Federation mode, the number of args may be either one or three. // Example: -refreshQueues or -refreshQueues -subClusterId SC-1 if (isYarnFederationEnabled(getConf()) && args.length != 1 && args.length != 3) { printUsage(cmd, isHAEnabled); return exitCode; } else if (!isYarnFederationEnabled(getConf()) && args.length != 1) { // If Federation mode is not enabled, then the number of args can only be one. // Example: -refreshQueues printUsage(cmd, isHAEnabled); return exitCode; } } // If it is federation mode, we will print federation mode information if (isYarnFederationEnabled(getConf())) { System.out.println("Using YARN Federation mode."); } try { if ("-refreshQueues".equals(cmd)) { exitCode = refreshQueues(subClusterId); } else if ("-refreshNodes".equals(cmd)) { exitCode = handleRefreshNodes(args, cmd, isHAEnabled); } else if ("-refreshNodesResources".equals(cmd)) { exitCode = refreshNodesResources(subClusterId); } else if ("-refreshUserToGroupsMappings".equals(cmd)) { exitCode = refreshUserToGroupsMappings(subClusterId); } else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) { exitCode = refreshSuperUserGroupsConfiguration(subClusterId); } else if ("-refreshAdminAcls".equals(cmd)) { exitCode = refreshAdminAcls(subClusterId); } else if ("-refreshServiceAcl".equals(cmd)) { exitCode = refreshServiceAcls(subClusterId); } else if ("-refreshClusterMaxPriority".equals(cmd)) { exitCode = refreshClusterMaxPriority(subClusterId); } else if ("-getGroups".equals(cmd)) { String[] usernames = Arrays.copyOfRange(args, i, args.length); exitCode = getGroups(usernames); } else if ("-updateNodeResource".equals(cmd)) { exitCode = handleUpdateNodeResource(args, cmd, isHAEnabled, subClusterId); } else if ("-addToClusterNodeLabels".equals(cmd)) { exitCode = handleAddToClusterNodeLabels(args, cmd, isHAEnabled); } else if ("-removeFromClusterNodeLabels".equals(cmd)) { exitCode = handleRemoveFromClusterNodeLabels(args, cmd, isHAEnabled); } else if ("-replaceLabelsOnNode".equals(cmd)) { exitCode = handleReplaceLabelsOnNodes(args, cmd, isHAEnabled); } else { exitCode = -1; System.err.println(cmd.substring(1) + ": Unknown command"); printUsage("", isHAEnabled); } } catch (IllegalArgumentException arge) { exitCode = -1; System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage()); printUsage(cmd, isHAEnabled); } catch (RemoteException e) { // // This is a error returned by hadoop server. Print // out the first line of the error message, ignore the stack trace. exitCode = -1; try { String[] content; content = e.getLocalizedMessage().split("\n"); System.err.println(cmd.substring(1) + ": " + content[0]); } catch (Exception ex) { System.err.println(cmd.substring(1) + ": " + ex.getLocalizedMessage()); } } catch (Exception e) { exitCode = -1; System.err.println(cmd.substring(1) + ": " + e.getLocalizedMessage()); } if (null != localNodeLabelsManager) { localNodeLabelsManager.stop(); } return exitCode; }
@Test public void testReplaceMultipleLabelsOnSingleNode() throws Exception { // Successfully replace labels dummyNodeLabelsManager.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y")); String[] args = { "-replaceLabelsOnNode", "node1,x,y", "-directlyAccessNodeLabelStore" }; assertTrue(0 != rmAdminCLI.run(args)); }
List<String> liveKeysAsOrderedList() { return new ArrayList<String>(liveMap.keySet()); }
@Test public void empty1() { long now = 3000; assertNotNull(tracker.getOrCreate(key, now++)); now += ComponentTracker.DEFAULT_TIMEOUT + 1000; tracker.removeStaleComponents(now); assertEquals(0, tracker.liveKeysAsOrderedList().size()); assertEquals(0, tracker.getComponentCount()); assertNotNull(tracker.getOrCreate(key, now++)); }
public void writeUshort(int value) throws IOException { if (value < 0 || value > 0xFFFF) { throw new ExceptionWithContext("Unsigned short value out of range: %d", value); } write(value); write(value >> 8); }
@Test(expected=ExceptionWithContext.class) public void testWriteUshortOutOfBounds() throws IOException { writer.writeUshort(-1); }
static Schema removeFields(Schema schema, String... fields) { List<String> exclude = Arrays.stream(fields).collect(Collectors.toList()); Schema.Builder builder = Schema.builder(); for (Field field : schema.getFields()) { if (exclude.contains(field.getName())) { continue; } builder.addField(field); } return builder.build(); }
@Test public void testPubsubRowToMessageParDo_timestamp() { Instant timestamp = Instant.now(); Row withTimestamp = Row.withSchema(NON_USER_WITH_BYTES_PAYLOAD) .addValues(ImmutableMap.of(), timestamp, new byte[] {}) .build(); assertEquals( timestamp.toString(), doFn(NON_USER_WITH_BYTES_PAYLOAD, null).timestamp(withTimestamp).toString()); Schema withoutTimestampSchema = removeFields(NON_USER_WITH_BYTES_PAYLOAD, DEFAULT_EVENT_TIMESTAMP_KEY_NAME); Row withoutTimestamp = Row.withSchema(withoutTimestampSchema).addValues(ImmutableMap.of(), new byte[] {}).build(); ReadableDateTime actual = doFn(withoutTimestampSchema, null).timestamp(withoutTimestamp); assertNotNull(actual); }
@Override protected CompletableFuture<JobSubmitResponseBody> handleRequest( @Nonnull HandlerRequest<JobSubmitRequestBody> request, @Nonnull DispatcherGateway gateway) throws RestHandlerException { final Collection<File> uploadedFiles = request.getUploadedFiles(); final Map<String, Path> nameToFile = uploadedFiles.stream() .collect(Collectors.toMap(File::getName, Path::fromLocalFile)); if (uploadedFiles.size() != nameToFile.size()) { throw new RestHandlerException( String.format( "The number of uploaded files was %s than the expected count. Expected: %s Actual %s", uploadedFiles.size() < nameToFile.size() ? "lower" : "higher", nameToFile.size(), uploadedFiles.size()), HttpResponseStatus.BAD_REQUEST); } final JobSubmitRequestBody requestBody = request.getRequestBody(); if (requestBody.jobGraphFileName == null) { throw new RestHandlerException( String.format( "The %s field must not be omitted or be null.", JobSubmitRequestBody.FIELD_NAME_JOB_GRAPH), HttpResponseStatus.BAD_REQUEST); } CompletableFuture<JobGraph> jobGraphFuture = loadJobGraph(requestBody, nameToFile); Collection<Path> jarFiles = getJarFilesToUpload(requestBody.jarFileNames, nameToFile); Collection<Tuple2<String, Path>> artifacts = getArtifactFilesToUpload(requestBody.artifactFileNames, nameToFile); CompletableFuture<JobGraph> finalizedJobGraphFuture = uploadJobGraphFiles(gateway, jobGraphFuture, jarFiles, artifacts, configuration); CompletableFuture<Acknowledge> jobSubmissionFuture = finalizedJobGraphFuture.thenCompose( jobGraph -> gateway.submitJob(jobGraph, timeout)); return jobSubmissionFuture.thenCombine( jobGraphFuture, (ack, jobGraph) -> new JobSubmitResponseBody("/jobs/" + jobGraph.getJobID())); }
@TestTemplate void testFailedJobSubmission() throws Exception { final String errorMessage = "test"; DispatcherGateway mockGateway = TestingDispatcherGateway.newBuilder() .setSubmitFunction( jobgraph -> FutureUtils.completedExceptionally( new Exception(errorMessage))) .build(); JobSubmitHandler handler = new JobSubmitHandler( () -> CompletableFuture.completedFuture(mockGateway), RpcUtils.INF_TIMEOUT, Collections.emptyMap(), Executors.directExecutor(), configuration); final Path jobGraphFile = TempDirUtils.newFile(temporaryFolder).toPath(); JobGraph jobGraph = JobGraphTestUtils.emptyJobGraph(); try (ObjectOutputStream objectOut = new ObjectOutputStream(Files.newOutputStream(jobGraphFile))) { objectOut.writeObject(jobGraph); } JobSubmitRequestBody request = new JobSubmitRequestBody( jobGraphFile.getFileName().toString(), Collections.emptyList(), Collections.emptyList()); assertThatFuture( handler.handleRequest( HandlerRequest.create( request, EmptyMessageParameters.getInstance(), Collections.singletonList(jobGraphFile.toFile())), mockGateway)) .eventuallyFailsWith(Exception.class) .withMessageContaining(errorMessage); }
public static Permission getPermission(String name, String serviceName, String... actions) { PermissionFactory permissionFactory = PERMISSION_FACTORY_MAP.get(serviceName); if (permissionFactory == null) { throw new IllegalArgumentException("No permissions found for service: " + serviceName); } return permissionFactory.create(name, actions); }
@Test public void getPermission_MultiMap() { Permission permission = ActionConstants.getPermission("foo", MultiMapService.SERVICE_NAME); assertNotNull(permission); assertTrue(permission instanceof MultiMapPermission); }
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) { CharStream input = CharStreams.fromString(source); FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input ); CommonTokenStream tokens = new CommonTokenStream( lexer ); FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens ); ParserHelper parserHelper = new ParserHelper(eventsManager); additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol())); parser.setHelper(parserHelper); parser.setErrorHandler( new FEELErrorHandler() ); parser.removeErrorListeners(); // removes the error listener that prints to the console parser.addErrorListener( new FEELParserErrorListener( eventsManager ) ); // pre-loads the parser with symbols defineVariables( inputVariableTypes, inputVariables, parser ); if (typeRegistry != null) { parserHelper.setTypeRegistry(typeRegistry); } return parser; }
@Test void multiplication() { String inputExpression = "10 * x"; BaseNode infix = parse( inputExpression, mapOf(entry("x", BuiltInType.NUMBER)) ); assertThat( infix).isInstanceOf(InfixOpNode.class); assertThat( infix.getResultType()).isEqualTo(BuiltInType.NUMBER); assertThat( infix.getText()).isEqualTo(inputExpression); InfixOpNode mult = (InfixOpNode) infix; assertThat( mult.getLeft()).isInstanceOf(NumberNode.class); assertThat( mult.getLeft().getText()).isEqualTo("10"); assertThat( mult.getOperator()).isEqualTo(InfixOperator.MULT); assertThat( mult.getRight()).isInstanceOf(NameRefNode.class); assertThat( mult.getRight().getText()).isEqualTo("x"); }
@Override public boolean apply(Map.Entry mapEntry) { return predicate.apply(mapEntry); }
@Test public void testRecordPredicate() { Record record = new Record("ONE", "TWO", "THREE"); SqlPredicate predicate = new SqlPredicate("str1 = 'ONE' AND str2 = 'TWO' AND (str3 = 'THREE' OR str3 = 'three')"); Map.Entry entry = createEntry("1", record); assertTrue(predicate.apply(entry)); }
@Override public Optional<WindowExpression> getWindowExpression() { final Optional<WindowExpression> windowExpression = original.getWindowExpression(); final Optional<RefinementInfo> refinementInfo = original.getRefinementInfo(); // we only need to rewrite if we have a window expression and if we use emit final if (!windowExpression.isPresent() || !refinementInfo.isPresent() || refinementInfo.get().getOutputRefinement() == OutputRefinement.CHANGES) { return original.getWindowExpression(); } final Optional<WindowTimeClause> gracePeriod; if (!windowExpression.get().getKsqlWindowExpression().getGracePeriod().isPresent()) { gracePeriod = Optional.of(zeroGracePeriod); } else { gracePeriod = windowExpression.get().getKsqlWindowExpression().getGracePeriod(); } final WindowExpression window = original.getWindowExpression().get(); final KsqlWindowExpression ksqlWindowNew; final KsqlWindowExpression ksqlWindowOld = window.getKsqlWindowExpression(); final Optional<NodeLocation> location = ksqlWindowOld.getLocation(); final Optional<WindowTimeClause> retention = ksqlWindowOld.getRetention(); if (ksqlWindowOld instanceof HoppingWindowExpression) { ksqlWindowNew = new HoppingWindowExpression( location, ((HoppingWindowExpression) ksqlWindowOld).getSize(), ((HoppingWindowExpression) ksqlWindowOld).getAdvanceBy(), retention, gracePeriod, Optional.of(OutputRefinement.FINAL) ); } else if (ksqlWindowOld instanceof TumblingWindowExpression) { ksqlWindowNew = new TumblingWindowExpression( location, ((TumblingWindowExpression) ksqlWindowOld).getSize(), retention, gracePeriod, Optional.of(OutputRefinement.FINAL) ); } else if (ksqlWindowOld instanceof SessionWindowExpression) { ksqlWindowNew = new SessionWindowExpression( location, ((SessionWindowExpression) ksqlWindowOld).getGap(), retention, gracePeriod, Optional.of(OutputRefinement.FINAL) ); } else { throw new KsqlException("WINDOW type must be HOPPING, TUMBLING, or SESSION"); } return Optional.of(new WindowExpression( original.getWindowExpression().get().getWindowName(), ksqlWindowNew )); }
@Test public void shouldThrowIfUnsupportedWindowType() { // Given: when(windowExpression.getKsqlWindowExpression()).thenReturn(unsupportedWindowType); when(unsupportedWindowType.getGracePeriod()).thenReturn(gracePeriodOptional); when(gracePeriodOptional.isPresent()).thenReturn(false); // When: final Exception e = assertThrows( KsqlException.class, () -> rewrittenAnalysis.getWindowExpression() ); // Then assertThat(e.getMessage(), containsString("WINDOW type must be HOPPING, TUMBLING, or SESSION")); }
@Override public boolean syncVerifyData(DistroData verifyData, String targetServer) { if (isNoExistTarget(targetServer)) { return true; } // replace target server as self server so that can callback. verifyData.getDistroKey().setTargetServer(memberManager.getSelf().getAddress()); DistroDataRequest request = new DistroDataRequest(verifyData, DataOperation.VERIFY); Member member = memberManager.find(targetServer); if (checkTargetServerStatusUnhealthy(member)) { Loggers.DISTRO .warn("[DISTRO] Cancel distro verify caused by target server {} unhealthy, key: {}", targetServer, verifyData.getDistroKey()); return false; } try { Response response = clusterRpcClientProxy.sendRequest(member, request); return checkResponse(response); } catch (NacosException e) { Loggers.DISTRO.error("[DISTRO-FAILED] Verify distro data failed! key: {} ", verifyData.getDistroKey(), e); } return false; }
@Test void testSyncVerifyDataWithCallbackSuccess() throws NacosException { DistroData verifyData = new DistroData(); verifyData.setDistroKey(new DistroKey()); when(memberManager.hasMember(member.getAddress())).thenReturn(true); when(memberManager.find(member.getAddress())).thenReturn(member); member.setState(NodeState.UP); when(clusterRpcClientProxy.isRunning(member)).thenReturn(true); transportAgent.syncVerifyData(verifyData, member.getAddress(), distroCallback); verify(distroCallback).onSuccess(); }
@Override public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { LOG.debug("Merging statistics: [aggregateColStats:{}, newColStats: {}]", aggregateColStats, newColStats); DoubleColumnStatsDataInspector aggregateData = doubleInspectorFromStats(aggregateColStats); DoubleColumnStatsDataInspector newData = doubleInspectorFromStats(newColStats); Double lowValue = mergeLowValue(getLowValue(aggregateData), getLowValue(newData)); if (lowValue != null) { aggregateData.setLowValue(lowValue); } Double highValue = mergeHighValue(getHighValue(aggregateData), getHighValue(newData)); if (highValue != null) { aggregateData.setHighValue(highValue); } aggregateData.setNumNulls(mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls())); NumDistinctValueEstimator oldNDVEst = aggregateData.getNdvEstimator(); NumDistinctValueEstimator newNDVEst = newData.getNdvEstimator(); List<NumDistinctValueEstimator> ndvEstimatorsList = Arrays.asList(oldNDVEst, newNDVEst); aggregateData.setNumDVs(mergeNumDistinctValueEstimator(aggregateColStats.getColName(), ndvEstimatorsList, aggregateData.getNumDVs(), newData.getNumDVs())); aggregateData.setNdvEstimator(ndvEstimatorsList.get(0)); KllHistogramEstimator oldKllEst = aggregateData.getHistogramEstimator(); KllHistogramEstimator newKllEst = newData.getHistogramEstimator(); aggregateData.setHistogramEstimator(mergeHistogramEstimator(aggregateColStats.getColName(), oldKllEst, newKllEst)); aggregateColStats.getStatsData().setDoubleStats(aggregateData); }
@Test public void testMergeNonNullWithNullValues() { ColumnStatisticsObj aggrObj = createColumnStatisticsObj(new ColStatsBuilder<>(double.class) .low(1d) .high(3d) .numNulls(4) .numDVs(2) .hll(1d, 3d, 3d) .kll(1d, 3d, 3d) .build()); ColumnStatisticsObj newObj = createColumnStatisticsObj(new ColStatsBuilder<>(double.class) .low(null) .high(null) .numNulls(2) .numDVs(0) .build()); merger.merge(aggrObj, newObj); ColumnStatisticsData expectedColumnStatisticsData = new ColStatsBuilder<>(double.class) .low(1d) .high(3d) .numNulls(6) .numDVs(2) .hll(1d, 3d, 3d) .kll(1d, 3d, 3d) .build(); assertEquals(expectedColumnStatisticsData, aggrObj.getStatsData()); }
static UnboundedCountingSource createUnboundedFrom(long start) { return new UnboundedCountingSource(start, 1, 1L, Duration.ZERO, new NowTimestampFn()); }
@Test @Category(NeedsRunner.class) public void testUnboundedSourceWithRate() { Duration period = Duration.millis(5); long numElements = 1000L; PCollection<Long> input = p.apply( Read.from( CountingSource.createUnboundedFrom(0) .withTimestampFn(new ValueAsTimestampFn()) .withRate(1, period)) .withMaxNumRecords(numElements)); addCountingAsserts(input, numElements); PCollection<Long> diffs = input .apply("TimestampDiff", ParDo.of(new ElementValueDiff())) .apply("DistinctTimestamps", Distinct.create()); // This assert also confirms that diffs only has one unique value. PAssert.thatSingleton(diffs).isEqualTo(0L); Instant started = Instant.now(); p.run(); Instant finished = Instant.now(); Duration expectedDuration = period.multipliedBy((int) numElements); assertThat(started.plus(expectedDuration).isBefore(finished), is(true)); }
@Override public void onWorkflowTerminated(Workflow workflow) { LOG.trace( "Workflow {} is terminated with status {}", workflow.getWorkflowId(), workflow.getStatus()); metrics.counter( MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC, getClass(), TYPE_TAG, "onWorkflowTerminated", MetricConstants.STATUS_TAG, workflow.getStatus().name()); }
@Test public void testWorkflowTimedOut() { when(workflow.getStatus()).thenReturn(Workflow.WorkflowStatus.TIMED_OUT); statusListener.onWorkflowTerminated(workflow); Assert.assertEquals( 1L, metricRepo .getCounter( MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC, MaestroWorkflowStatusListener.class, "type", "onWorkflowTerminated", "status", "TIMED_OUT") .count()); }
@Override protected void handleAddPartitionToTxn(CommandAddPartitionToTxn command) { checkArgument(state == State.Connected); final TxnID txnID = new TxnID(command.getTxnidMostBits(), command.getTxnidLeastBits()); final TransactionCoordinatorID tcId = TransactionCoordinatorID.get(command.getTxnidMostBits()); final long requestId = command.getRequestId(); final List<String> partitionsList = command.getPartitionsList(); if (log.isDebugEnabled()) { partitionsList.forEach(partition -> log.debug("Receive add published partition to txn request {} " + "from {} with txnId {}, topic: [{}]", requestId, remoteAddress, txnID, partition)); } if (!checkTransactionEnableAndSendError(requestId)) { return; } TransactionMetadataStoreService transactionMetadataStoreService = service.pulsar().getTransactionMetadataStoreService(); verifyTxnOwnership(txnID) .thenCompose(isOwner -> { if (!isOwner) { return failedFutureTxnNotOwned(txnID); } return transactionMetadataStoreService .addProducedPartitionToTxn(txnID, partitionsList); }) .whenComplete((v, ex) -> { if (ex == null) { if (log.isDebugEnabled()) { log.debug("Send response success for add published partition to txn request {}", requestId); } writeAndFlush(Commands.newAddPartitionToTxnResponse(requestId, txnID.getLeastSigBits(), txnID.getMostSigBits())); } else { ex = handleTxnException(ex, BaseCommand.Type.ADD_PARTITION_TO_TXN.name(), requestId); writeAndFlush(Commands.newAddPartitionToTxnResponse(requestId, txnID.getLeastSigBits(), txnID.getMostSigBits(), BrokerServiceException.getClientErrorCode(ex), ex.getMessage())); transactionMetadataStoreService.handleOpFail(ex, tcId); } }); }
@Test(expectedExceptions = IllegalArgumentException.class) public void shouldFailHandleAddPartitionToTxn() throws Exception { ServerCnx serverCnx = mock(ServerCnx.class, CALLS_REAL_METHODS); Field stateUpdater = ServerCnx.class.getDeclaredField("state"); stateUpdater.setAccessible(true); stateUpdater.set(serverCnx, ServerCnx.State.Failed); serverCnx.handleAddPartitionToTxn(any()); }
@Override public void write(int b) { ensureAvailable(1); buffer[pos++] = (byte) (b); }
@Test(expected = NullPointerException.class) public void testWrite_whenBufferIsNull() { out.write(null, 0, 0); }
public static MountTo to(final String target) { return new MountTo(checkNotNullOrEmpty(target, "Target should not be null")); }
@Test public void should_mount_with_other_handler() throws Exception { server.mount(MOUNT_DIR, to("/dir")).response(header(HttpHeaders.CONTENT_TYPE, "text/plain")); running(server, () -> { ClassicHttpResponse httpResponse = helper.getResponse(remoteUrl("/dir/dir.response")); String value = httpResponse.getFirstHeader(HttpHeaders.CONTENT_TYPE).getValue(); assertThat(value, is("text/plain")); String content = CharStreams.toString(new InputStreamReader(httpResponse.getEntity().getContent())); assertThat(content, is("response from dir")); }); }
boolean isMethodCorrect(ResolvedMethod m) { if (m.getReturnType()!=null) { log.error("The method {} is annotated with @SelfValidation but does not return void. It is ignored", m.getRawMember()); return false; } else if (m.getArgumentCount() != 1 || !m.getArgumentType(0).getErasedType().equals(ViolationCollector.class)) { log.error("The method {} is annotated with @SelfValidation but does not have a single parameter of type {}", m.getRawMember(), ViolationCollector.class); return false; } else if (!m.isPublic()) { log.error("The method {} is annotated with @SelfValidation but is not public", m.getRawMember()); return false; } return true; }
@Test @SuppressWarnings("Slf4jFormatShouldBeConst") void additionalParametersAreNotAccepted() throws NoSuchMethodException { assertThat(selfValidatingValidator.isMethodCorrect( getMethod("validateFailAdditionalParameters", ViolationCollector.class, int.class))) .isFalse(); verify(log).error("The method {} is annotated with @SelfValidation but does not have a single parameter of type {}", InvalidExample.class.getMethod("validateFailAdditionalParameters", ViolationCollector.class, int.class), ViolationCollector.class); }
@Override public Address translate(Address address) throws Exception { if (address == null) { return null; } // if it is inside cloud, return private address otherwise we need to translate it. if (!usePublic) { return address; } Address publicAddress = privateToPublic.get(address); if (publicAddress != null) { return publicAddress; } privateToPublic = getAddresses.call(); return privateToPublic.get(address); }
@Test public void testTranslate_whenNotFound_thenReturnNull() throws Exception { RemoteAddressProvider provider = new RemoteAddressProvider(() -> expectedAddresses, true); Address notAvailableAddress = new Address("127.0.0.3", 5701); Address actual = provider.translate(notAvailableAddress); assertNull(actual); }
public static String getHeader() { return QUOTA_HEADER; }
@Test public void testGetHeader() { String header = " QUOTA REM_QUOTA SPACE_QUOTA " + "REM_SPACE_QUOTA "; assertEquals(header, QuotaUsage.getHeader()); }
@Override public <T> Invoker<T> join(Directory<T> directory, boolean buildFilterChain) throws RpcException { if (buildFilterChain) { return buildClusterInterceptors(doJoin(directory)); } else { return doJoin(directory); } }
@Test void testBuildClusterInvokerChain() { Map<String, String> parameters = new HashMap<>(); parameters.put(INTERFACE_KEY, DemoService.class.getName()); parameters.put("registry", "zookeeper"); parameters.put(REFERENCE_FILTER_KEY, "demo"); ServiceConfigURL url = new ServiceConfigURL( "registry", "127.0.0.1", 2181, "org.apache.dubbo.registry.RegistryService", parameters); URL consumerUrl = new ServiceConfigURL("dubbo", "127.0.0.1", 20881, DemoService.class.getName(), parameters); consumerUrl = consumerUrl.setScopeModel(ApplicationModel.defaultModel().getInternalModule()); Directory<?> directory = mock(Directory.class); when(directory.getUrl()).thenReturn(url); when(directory.getConsumerUrl()).thenReturn(consumerUrl); DemoCluster demoCluster = new DemoCluster(); Invoker<?> invoker = demoCluster.join(directory, true); Assertions.assertTrue(invoker instanceof AbstractCluster.ClusterFilterInvoker); Assertions.assertTrue( ((AbstractCluster.ClusterFilterInvoker<?>) invoker).getFilterInvoker() instanceof FilterChainBuilder.ClusterCallbackRegistrationInvoker); }
public static boolean isEquals(String s1, String s2) { if (s1 == null && s2 == null) { return true; } if (s1 == null || s2 == null) { return false; } return s1.equals(s2); }
@Test void testIsEquals() throws Exception { assertTrue(StringUtils.isEquals(null, null)); assertFalse(StringUtils.isEquals(null, "")); assertTrue(StringUtils.isEquals("abc", "abc")); assertFalse(StringUtils.isEquals("abc", "ABC")); }
public static Failed failed(XmlPullParser parser) throws XmlPullParserException, IOException { ParserUtils.assertAtStartTag(parser); String name; StanzaError.Condition condition = null; List<StanzaErrorTextElement> textElements = new ArrayList<>(4); outerloop: while (true) { XmlPullParser.Event event = parser.next(); switch (event) { case START_ELEMENT: name = parser.getName(); String namespace = parser.getNamespace(); if (StanzaError.ERROR_CONDITION_AND_TEXT_NAMESPACE.equals(namespace)) { if (name.equals(AbstractTextElement.ELEMENT)) { String lang = ParserUtils.getXmlLang(parser); String text = parser.nextText(); StanzaErrorTextElement stanzaErrorTextElement = new StanzaErrorTextElement(text, lang); textElements.add(stanzaErrorTextElement); } else { condition = StanzaError.Condition.fromString(name); } } break; case END_ELEMENT: name = parser.getName(); if (Failed.ELEMENT.equals(name)) { break outerloop; } break; default: // Catch all for incomplete switch (MissingCasesInEnumSwitch) statement. break; } } ParserUtils.assertAtEndTag(parser); return new Failed(condition, textElements); }
@Test public void testParseFailed() throws Exception { String failedStanza = XMLBuilder.create("failed") .a("xmlns", "urn:xmpp:sm:3") .asString(outputProperties); StreamManagement.Failed failedPacket = ParseStreamManagement.failed( PacketParserUtils.getParserFor(failedStanza)); assertNotNull(failedPacket); assertTrue(failedPacket.getStanzaErrorCondition() == null); }
@Override public void execute(Exchange exchange) throws SmppException { SubmitSm[] submitSms = createSubmitSm(exchange); List<String> messageIDs = new ArrayList<>(submitSms.length); String messageID = null; for (int i = 0; i < submitSms.length; i++) { SubmitSm submitSm = submitSms[i]; messageID = null; if (log.isDebugEnabled()) { log.debug("Sending short message {} for exchange id '{}'...", i, exchange.getExchangeId()); } try { SubmitSmResult result = session.submitShortMessage( submitSm.getServiceType(), TypeOfNumber.valueOf(submitSm.getSourceAddrTon()), NumberingPlanIndicator.valueOf(submitSm.getSourceAddrNpi()), submitSm.getSourceAddr(), TypeOfNumber.valueOf(submitSm.getDestAddrTon()), NumberingPlanIndicator.valueOf(submitSm.getDestAddrNpi()), submitSm.getDestAddress(), new ESMClass(submitSm.getEsmClass()), submitSm.getProtocolId(), submitSm.getPriorityFlag(), submitSm.getScheduleDeliveryTime(), submitSm.getValidityPeriod(), new RegisteredDelivery(submitSm.getRegisteredDelivery()), submitSm.getReplaceIfPresent(), DataCodings.newInstance(submitSm.getDataCoding()), (byte) 0, submitSm.getShortMessage(), submitSm.getOptionalParameters()); if (result != null) { messageID = result.getMessageId(); } } catch (Exception e) { throw new SmppException(e); } if (messageID != null) { messageIDs.add(messageID); } } if (log.isDebugEnabled()) { log.debug("Sent short message for exchange id '{}' and received message ids '{}'", exchange.getExchangeId(), messageIDs); } Message message = ExchangeHelper.getResultMessage(exchange); message.setHeader(SmppConstants.ID, messageIDs); message.setHeader(SmppConstants.SENT_MESSAGE_COUNT, messageIDs.size()); }
@Test @Disabled() public void executeLongBody() throws Exception { byte[] firstSM = new byte[] { 5, 0, 3, 1, 2, 1, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51 }; byte[] secondSM = new byte[] { 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48 }; Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut); exchange.getIn().setHeader(SmppConstants.COMMAND, "SubmitSm"); exchange.getIn().setHeader(SmppConstants.ID, "1"); exchange.getIn().setBody( "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901"); when(session.submitShortMessage(eq("CMT"), eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1616"), eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1717"), eq(new ESMClass()), eq((byte) 0), eq((byte) 1), (String) isNull(), (String) isNull(), eq(new RegisteredDelivery(SMSCDeliveryReceipt.SUCCESS_FAILURE)), eq(ReplaceIfPresentFlag.DEFAULT.value()), eq(DataCodings.newInstance((byte) 0)), eq((byte) 0), eq(firstSM))) .thenReturn(new SubmitSmResult(new MessageId("1"), null)); when(session.submitShortMessage(eq("CMT"), eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1616"), eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1717"), eq(new ESMClass()), eq((byte) 0), eq((byte) 1), (String) isNull(), (String) isNull(), eq(new RegisteredDelivery(SMSCDeliveryReceipt.SUCCESS_FAILURE)), eq(ReplaceIfPresentFlag.DEFAULT.value()), eq(DataCodings.newInstance((byte) 0)), eq((byte) 0), eq(secondSM))) .thenReturn(new SubmitSmResult(new MessageId("2"), null)); command.execute(exchange); assertEquals(Arrays.asList("1", "2"), exchange.getMessage().getHeader(SmppConstants.ID)); assertEquals(2, exchange.getMessage().getHeader(SmppConstants.SENT_MESSAGE_COUNT)); }
public Optional<Flavor> getFlavor(String name) { if (configuredFlavors.containsKey(name)) return Optional.of(configuredFlavors.get(name)); NodeResources nodeResources = NodeResources.fromLegacyName(name); return Optional.of(new Flavor(nodeResources)); }
@Test void testConfigParsing() { FlavorsConfig.Builder builder = new FlavorsConfig.Builder(); List<FlavorsConfig.Flavor.Builder> flavorBuilderList = new ArrayList<>(); { FlavorsConfig.Flavor.Builder flavorBuilder = new FlavorsConfig.Flavor.Builder(); flavorBuilder.name("strawberry").cost(2); flavorBuilderList.add(flavorBuilder); } { FlavorsConfig.Flavor.Builder flavorBuilder = new FlavorsConfig.Flavor.Builder(); flavorBuilder.minCpuCores(10); flavorBuilder.cpuSpeedup(1.3); flavorBuilder.name("banana").cost(3); flavorBuilderList.add(flavorBuilder); } builder.flavor(flavorBuilderList); FlavorsConfig config = new FlavorsConfig(builder); NodeFlavors nodeFlavors = new NodeFlavors(config); Flavor banana = nodeFlavors.getFlavor("banana").get(); assertEquals(3, banana.cost()); assertEquals(13, banana.resources().vcpu(), delta); assertEquals(13, banana.resources().vcpu(), delta, "10 * 1.3"); }
public void triggerNextSuperstep() { synchronized (monitor) { if (terminated) { throw new IllegalStateException("Already terminated."); } superstepNumber++; monitor.notifyAll(); } }
@Test public void testWaitIncorrectAsync() { try { SuperstepKickoffLatch latch = new SuperstepKickoffLatch(); latch.triggerNextSuperstep(); latch.triggerNextSuperstep(); Waiter w = new Waiter(latch, 2); Thread waiter = new Thread(w); waiter.setDaemon(true); waiter.start(); WatchDog wd = new WatchDog(waiter, 2000); wd.start(); Thread.sleep(100); wd.join(); if (wd.getError() != null) { throw wd.getError(); } if (w.getError() != null) { if (!(w.getError() instanceof IllegalStateException)) { throw new Exception("wrong exception type " + w.getError()); } } else { Assert.fail("should cause exception"); } } catch (Throwable t) { t.printStackTrace(); Assert.fail("Error: " + t.getMessage()); } }
public static PipelineDataSourceConfiguration newInstance(final String type, final String param) { switch (type) { case StandardPipelineDataSourceConfiguration.TYPE: return new StandardPipelineDataSourceConfiguration(param); case ShardingSpherePipelineDataSourceConfiguration.TYPE: return new ShardingSpherePipelineDataSourceConfiguration(param); default: throw new UnsupportedSQLOperationException(String.format("Unsupported data source type `%s`", type)); } }
@Test void assertNewInstanceForUnsupportedType() { assertThrows(UnsupportedSQLOperationException.class, () -> PipelineDataSourceConfigurationFactory.newInstance("Invalid", "")); }
@Override public String toString() { return toUri().toString(); }
@Test public void testToString() { LocalUri hpath = LocalUri.Root.append("example").append("some-id").append("instances").append("some-instance-id"); assertEquals("/example/some-id/instances/some-instance-id", hpath.path()); }
@GET @Path("{id}") @Produces(MediaType.APPLICATION_JSON) public Response getDeviceKey(@PathParam("id") String id) { DeviceKey deviceKey = nullIsNotFound(get(DeviceKeyService.class).getDeviceKey(DeviceKeyId.deviceKeyId(id)), DEVICE_KEY_NOT_FOUND); return ok(codec(DeviceKey.class).encode(deviceKey, this)).build(); }
@Test public void testGetDeviceKeyById() { deviceKeySet.add(deviceKey1); expect(mockDeviceKeyService.getDeviceKey(DeviceKeyId.deviceKeyId(deviceKeyId1))) .andReturn(deviceKey1) .anyTimes(); replay(mockDeviceKeyService); WebTarget wt = target(); String response = wt.path("keys/" + deviceKeyId1).request().get(String.class); final JsonObject result = Json.parse(response).asObject(); assertThat(result, notNullValue()); assertThat(result, matchesDeviceKey(deviceKey1)); verify(mockDeviceKeyService); }
public Map<String, Object> getKsqlStreamConfigProps(final String applicationId) { final Map<String, Object> map = new HashMap<>(getKsqlStreamConfigProps()); map.put( MetricCollectors.RESOURCE_LABEL_PREFIX + StreamsConfig.APPLICATION_ID_CONFIG, applicationId ); // Streams client metrics aren't used in Confluent deployment possiblyConfigureConfluentTelemetry(map); return Collections.unmodifiableMap(map); }
@Test public void shouldSetStreamsConfigKsqlTopicPrefixedProperties() { final KsqlConfig ksqlConfig = new KsqlConfig( Collections.singletonMap( KsqlConfig.KSQL_STREAMS_PREFIX + StreamsConfig.TOPIC_PREFIX + TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, 2)); assertThat(ksqlConfig.getKsqlStreamConfigProps() .get(StreamsConfig.TOPIC_PREFIX + TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG), equalTo(2)); assertThat(ksqlConfig.getKsqlStreamConfigProps() .get(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG), is(nullValue())); }
public static <T> boolean containsElement(@Nullable Collection<T> collection, @Nullable T element) { if (collection != null && element != null) { return collection.contains(element); } return false; }
@Test void containsElement() { assertThat(PublicApiUtils.containsElement(null, null)).isFalse(); assertThat(PublicApiUtils.containsElement(null, "test")).isFalse(); assertThat(PublicApiUtils.containsElement(List.of("test"), null)).isFalse(); assertThat(PublicApiUtils.containsElement(List.of("test"), "test")).isTrue(); assertThat(PublicApiUtils.containsElement(List.of("test"), "test1")).isFalse(); }
@SafeVarargs public static <T> List<T> sortPageAll(int pageNo, int pageSize, Comparator<T> comparator, Collection<T>... colls) { final List<T> list = new ArrayList<>(pageNo * pageSize); for (Collection<T> coll : colls) { list.addAll(coll); } if (null != comparator) { list.sort(comparator); } return page(pageNo, pageSize, list); }
@Test public void sortPageAllTest() { final List<Integer> list = CollUtil.newArrayList(1, 2, 3, 4, 5, 6, 7, 8, 9); final List<Integer> sortPageAll = CollUtil.sortPageAll(1, 5, Comparator.reverseOrder(), list); assertEquals(CollUtil.newArrayList(4, 3, 2, 1), sortPageAll); }
public static short translateBucketAcl(GSAccessControlList acl, String userId) { short mode = (short) 0; for (GrantAndPermission gp : acl.getGrantAndPermissions()) { Permission perm = gp.getPermission(); GranteeInterface grantee = gp.getGrantee(); if (perm.equals(Permission.PERMISSION_READ)) { if (isUserIdInGrantee(grantee, userId)) { // If the bucket is readable by the user, add r and x to the owner mode. mode |= (short) 0500; } } else if (perm.equals(Permission.PERMISSION_WRITE)) { if (isUserIdInGrantee(grantee, userId)) { // If the bucket is writable by the user, +w to the owner mode. mode |= (short) 0200; } } else if (perm.equals(Permission.PERMISSION_FULL_CONTROL)) { if (isUserIdInGrantee(grantee, userId)) { // If the user has full control to the bucket, +rwx to the owner mode. mode |= (short) 0700; } } } return mode; }
@Test public void translateEveryoneReadPermission() { GroupGrantee allUsersGrantee = GroupGrantee.ALL_USERS; mAcl.grantPermission(allUsersGrantee, Permission.PERMISSION_READ); assertEquals((short) 0500, GCSUtils.translateBucketAcl(mAcl, ID)); assertEquals((short) 0500, GCSUtils.translateBucketAcl(mAcl, OTHER_ID)); }
public Future<KafkaVersionChange> reconcile() { return getPods() .compose(this::detectToAndFromVersions) .compose(i -> prepareVersionChange()); }
@Test public void testExistingClusterWithoutVersions(VertxTestContext context) { VersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(null, VERSIONS.defaultVersion().metadataVersion(), null), mockRos(mockUniformPods(VERSIONS.defaultVersion().version())) ); Checkpoint async = context.checkpoint(); vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { assertThat(c.from(), is(VERSIONS.defaultVersion())); assertThat(c.to(), is(VERSIONS.defaultVersion())); assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); async.flag(); }))); }
@Override public ShowCreateTableStatement getSqlStatement() { return (ShowCreateTableStatement) super.getSqlStatement(); }
@Test void assertNewInstance() { MySQLShowCreateTableStatement sqlStatement = new MySQLShowCreateTableStatement(); sqlStatement.setTable(new SimpleTableSegment(new TableNameSegment(0, 0, new IdentifierValue("tbl_1")))); ShowCreateTableStatementContext actual = new ShowCreateTableStatementContext(sqlStatement, DefaultDatabase.LOGIC_NAME); assertThat(actual, instanceOf(CommonSQLStatementContext.class)); assertThat(actual.getSqlStatement(), is(sqlStatement)); assertThat(actual.getTablesContext().getSimpleTables().stream().map(each -> each.getTableName().getIdentifier().getValue()).collect(Collectors.toList()), is(Collections.singletonList("tbl_1"))); }
@Override public Mono<ConfigMap> resetSettingConfig(String name) { return client.fetch(Theme.class, name) .filter(theme -> StringUtils.isNotBlank(theme.getSpec().getSettingName())) .flatMap(theme -> { String configMapName = theme.getSpec().getConfigMapName(); String settingName = theme.getSpec().getSettingName(); return client.fetch(Setting.class, settingName) .map(SettingUtils::settingDefinedDefaultValueMap) .flatMap(data -> updateConfigMapData(configMapName, data)); }); }
@Test void resetSettingConfig() { Theme theme = new Theme(); theme.setMetadata(new Metadata()); theme.getMetadata().setName("fake-theme"); theme.setSpec(new Theme.ThemeSpec()); theme.getSpec().setSettingName("fake-setting"); theme.getSpec().setConfigMapName("fake-config"); theme.getSpec().setDisplayName("Hello"); when(client.fetch(Theme.class, "fake-theme")) .thenReturn(Mono.just(theme)); Setting setting = new Setting(); setting.setMetadata(new Metadata()); setting.getMetadata().setName("fake-setting"); setting.setSpec(new Setting.SettingSpec()); var formSchemaItem = Map.of("name", "email", "value", "example@exmple.com"); Setting.SettingForm settingForm = new Setting.SettingForm(); settingForm.setGroup("basic"); settingForm.setFormSchema(List.of(formSchemaItem)); setting.getSpec().setForms(List.of(settingForm)); when(client.fetch(eq(Setting.class), eq("fake-setting"))) .thenReturn(Mono.just(setting)); ConfigMap configMap = new ConfigMap(); configMap.setMetadata(new Metadata()); configMap.getMetadata().setName("fake-config"); when(client.fetch(eq(ConfigMap.class), eq("fake-config"))) .thenReturn(Mono.just(configMap)); when(client.update(any(ConfigMap.class))) .thenAnswer((Answer<Mono<ConfigMap>>) invocation -> { ConfigMap argument = invocation.getArgument(0); JSONAssert.assertEquals(""" { "data": { "basic": "{\\"email\\":\\"example@exmple.com\\"}" }, "apiVersion": "v1alpha1", "kind": "ConfigMap", "metadata": { "name": "fake-config" } } """, JsonUtils.objectToJson(argument), true); return Mono.just(invocation.getArgument(0)); }); themeService.resetSettingConfig("fake-theme") .as(StepVerifier::create) .consumeNextWith(next -> { assertThat(next).isNotNull(); }) .verifyComplete(); verify(client, times(1)) .fetch(eq(Setting.class), eq(setting.getMetadata().getName())); verify(client, times(1)).fetch(eq(ConfigMap.class), eq("fake-config")); verify(client, times(1)).update(any(ConfigMap.class)); }
public String getUserArtifactsBaseDir() { return flinkConfig.getString(ArtifactFetchOptions.BASE_DIR); }
@Test public void testGetUserArtifactsBaseDir() { flinkConfig.set(ArtifactFetchOptions.BASE_DIR, "/opt/job/artifacts"); assertThat(kubernetesJobManagerParameters.getUserArtifactsBaseDir()) .isEqualTo("/opt/job/artifacts"); }
public static CommandExecutor newInstance(final PostgreSQLCommandPacketType commandPacketType, final PostgreSQLCommandPacket commandPacket, final ConnectionSession connectionSession, final PortalContext portalContext) throws SQLException { if (commandPacket instanceof SQLReceivedPacket) { log.debug("Execute packet type: {}, sql: {}", commandPacketType, ((SQLReceivedPacket) commandPacket).getSQL()); } else { log.debug("Execute packet type: {}", commandPacketType); } if (!(commandPacket instanceof PostgreSQLAggregatedCommandPacket)) { return getCommandExecutor(commandPacketType, commandPacket, connectionSession, portalContext); } PostgreSQLAggregatedCommandPacket aggregatedCommandPacket = (PostgreSQLAggregatedCommandPacket) commandPacket; if (aggregatedCommandPacket.isContainsBatchedStatements()) { return new PostgreSQLAggregatedCommandExecutor(getExecutorsOfAggregatedBatchedStatements(aggregatedCommandPacket, connectionSession, portalContext)); } List<CommandExecutor> result = new ArrayList<>(aggregatedCommandPacket.getPackets().size()); for (PostgreSQLCommandPacket each : aggregatedCommandPacket.getPackets()) { result.add(getCommandExecutor((PostgreSQLCommandPacketType) each.getIdentifier(), each, connectionSession, portalContext)); } return new PostgreSQLAggregatedCommandExecutor(result); }
@Test void assertNewInstance() throws SQLException { Collection<InputOutput> inputOutputs = Arrays.asList( new InputOutput(PostgreSQLCommandPacketType.SIMPLE_QUERY, PostgreSQLComQueryPacket.class, PostgreSQLComQueryExecutor.class), new InputOutput(PostgreSQLCommandPacketType.PARSE_COMMAND, PostgreSQLComParsePacket.class, PostgreSQLComParseExecutor.class), new InputOutput(PostgreSQLCommandPacketType.BIND_COMMAND, PostgreSQLComBindPacket.class, PostgreSQLComBindExecutor.class), new InputOutput(PostgreSQLCommandPacketType.DESCRIBE_COMMAND, PostgreSQLComDescribePacket.class, PostgreSQLComDescribeExecutor.class), new InputOutput(PostgreSQLCommandPacketType.EXECUTE_COMMAND, PostgreSQLComExecutePacket.class, PostgreSQLComExecuteExecutor.class), new InputOutput(PostgreSQLCommandPacketType.SYNC_COMMAND, PostgreSQLComSyncPacket.class, PostgreSQLComSyncExecutor.class), new InputOutput(PostgreSQLCommandPacketType.CLOSE_COMMAND, PostgreSQLComClosePacket.class, PostgreSQLComCloseExecutor.class), new InputOutput(PostgreSQLCommandPacketType.TERMINATE, PostgreSQLComTerminationPacket.class, PostgreSQLComTerminationExecutor.class), new InputOutput(PostgreSQLCommandPacketType.FLUSH_COMMAND, PostgreSQLComFlushPacket.class, PostgreSQLComFlushExecutor.class)); for (InputOutput each : inputOutputs) { Class<? extends PostgreSQLCommandPacket> commandPacketClass = each.getCommandPacketClass(); if (null == commandPacketClass) { commandPacketClass = PostgreSQLCommandPacket.class; } PostgreSQLCommandPacket packet = preparePacket(commandPacketClass); CommandExecutor actual = PostgreSQLCommandExecutorFactory.newInstance(each.getCommandPacketType(), packet, connectionSession, portalContext); assertThat(actual, instanceOf(each.getResultClass())); } }
public <T extends S3ClientBuilder> void applyEndpointConfigurations(T builder) { if (endpoint != null) { builder.endpointOverride(URI.create(endpoint)); } }
@Test public void testApplyEndpointConfiguration() { Map<String, String> properties = Maps.newHashMap(); properties.put(S3FileIOProperties.ENDPOINT, "endpoint"); S3FileIOProperties s3FileIOProperties = new S3FileIOProperties(properties); S3ClientBuilder mockS3ClientBuilder = Mockito.mock(S3ClientBuilder.class); s3FileIOProperties.applyEndpointConfigurations(mockS3ClientBuilder); Mockito.verify(mockS3ClientBuilder).endpointOverride(Mockito.any(URI.class)); }
@Inject public RpcServer(ConfigserverConfig config, SuperModelRequestHandler superModelRequestHandler, MetricUpdaterFactory metrics, HostRegistry hostRegistry, FileServer fileServer, RpcAuthorizer rpcAuthorizer, RpcRequestHandlerProvider handlerProvider) { this.superModelRequestHandler = superModelRequestHandler; metricUpdaterFactory = metrics; supervisor.setMaxOutputBufferSize(config.maxoutputbuffersize()); this.metrics = metrics.getOrCreateMetricUpdater(Map.of()); BlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>(config.maxgetconfigclients()); int rpcWorkerThreads = (config.numRpcThreads() == 0) ? threadsToUse() : config.numRpcThreads(); executorService = new ThreadPoolExecutor(rpcWorkerThreads, rpcWorkerThreads, 0, TimeUnit.SECONDS, workQueue, ThreadFactoryFactory.getDaemonThreadFactory(THREADPOOL_NAME)); delayedConfigResponses = new DelayedConfigResponses(this, config.numDelayedResponseThreads()); spec = new Spec(null, config.rpcport()); this.hostRegistry = hostRegistry; this.useRequestVersion = config.useVespaVersionInRequest(); this.hostedVespa = config.hostedVespa(); this.canReturnEmptySentinelConfig = config.canReturnEmptySentinelConfig(); this.fileServer = fileServer; this.rpcAuthorizer = rpcAuthorizer; downloader = fileServer.downloader(); handlerProvider.setInstance(this); setUpFileDistributionHandlers(); }
@Test public void testRpcServer() throws IOException, SAXException, InterruptedException { try (RpcTester tester = new RpcTester(applicationId, temporaryFolder)) { ApplicationRepository applicationRepository = tester.applicationRepository(); applicationRepository.deploy(testApp, new PrepareParams.Builder().applicationId(applicationId).build()); testPrintStatistics(tester); testGetConfig(tester); testEnabled(tester); testApplicationNotLoadedErrorWhenAppDeleted(tester); } }
public HttpResult getBinary(String url) throws IOException, NotModifiedException { return getBinary(url, null, null); }
@Test void largeFeedWithoutContentLengthHeader() { byte[] bytes = new byte[100000]; Arrays.fill(bytes, (byte) 1); this.mockServerClient.when(HttpRequest.request().withMethod("GET")) .respond(HttpResponse.response() .withBody(bytes) .withConnectionOptions(ConnectionOptions.connectionOptions().withSuppressContentLengthHeader(true))); IOException e = Assertions.assertThrows(IOException.class, () -> getter.getBinary(this.feedUrl)); Assertions.assertEquals("Response size exceeds the maximum allowed size (10000 bytes)", e.getMessage()); }
protected RuleDTO buildContextPathDefaultRuleDTO(final String selectorId, final MetaDataRegisterDTO metaDataDTO, final String ruleHandler) { String contextPath = metaDataDTO.getContextPath(); return buildRuleDTO(selectorId, ruleHandler, contextPath, PathUtils.decoratorPath(contextPath)); }
@Test public void testBuildContextPathDefaultRuleDTO() { MetaDataRegisterDTO metaDataRegisterDTO = MetaDataRegisterDTO.builder().build(); metaDataRegisterDTO.setContextPath("Context_Path"); RuleDTO ruleDTO = abstractShenyuClientRegisterService.buildContextPathDefaultRuleDTO("Selector_Id", metaDataRegisterDTO, "Rule_Handler"); assertEquals("Selector_Id", ruleDTO.getSelectorId()); assertEquals("Context_Path", ruleDTO.getName()); assertTrue(ruleDTO.getEnabled()); assertTrue(ruleDTO.getLoged()); assertEquals(1, ruleDTO.getSort()); assertEquals("Rule_Handler", ruleDTO.getHandle()); }
@Override public TransferAction action(final Session<?> source, final Session<?> destination, boolean resumeRequested, boolean reloadRequested, final TransferPrompt prompt, final ListProgressListener listener) throws BackgroundException { if(log.isDebugEnabled()) { log.debug(String.format("Find transfer action with prompt %s", prompt)); } if(resumeRequested) { return TransferAction.comparison; } final TransferAction action; if(reloadRequested) { action = TransferAction.forName( PreferencesFactory.get().getProperty("queue.copy.reload.action")); } else { // Use default action = TransferAction.forName( PreferencesFactory.get().getProperty("queue.copy.action")); } if(action.equals(TransferAction.callback)) { for(TransferItem upload : roots) { final Path copy = mapping.get(upload.remote); final Find find = destination.getFeature(Find.class); if(find.find(copy)) { // Found remote file if(copy.isDirectory()) { // List files in target directory if(this.list(destination, copy, null, listener).isEmpty()) { // Do not prompt for existing empty directories continue; } } // Prompt user to choose a filter return prompt.prompt(upload); } } // No files exist yet therefore it is most straightforward to use the overwrite action return TransferAction.overwrite; } return action; }
@Test public void testActionPrompt() throws Exception { final Path test = new Path("t", EnumSet.of(Path.Type.file)); CopyTransfer t = new CopyTransfer(new Host(new TestProtocol(), "t"), new Host(new TestProtocol(), "t"), Collections.singletonMap(test, new Path("d", EnumSet.of(Path.Type.file))), new BandwidthThrottle(BandwidthThrottle.UNLIMITED)); final NullSession session = new NullTransferSession(new Host(new TestProtocol(), "t")); assertEquals(TransferAction.comparison, t.action(session, session, false, true, new DisabledTransferPrompt() { @Override public TransferAction prompt(final TransferItem file) { return TransferAction.comparison; } }, new DisabledListProgressListener())); }
public Schema addToSchema(Schema schema) { validate(schema); schema.addProp(LOGICAL_TYPE_PROP, name); schema.setLogicalType(this); return schema; }
@Test void decimalFailsWithNegativeScale() { final Schema schema = Schema.createFixed("aDecimal", null, null, 4); assertThrows("Should reject precision", IllegalArgumentException.class, "Invalid decimal scale: -2 (must be positive)", () -> { LogicalTypes.decimal(9, -2).addToSchema(schema); return null; }); assertNull(LogicalTypes.fromSchemaIgnoreInvalid(schema), "Invalid logical type should not be set on schema"); }
@Override public String getLastScreenUrl() { return null; }
@Test public void getLastScreenUrl() { Assert.assertNull(mSensorsAPI.getLastScreenUrl()); }
public void chmod(final int mask) throws IOException, InterruptedException { if (!isUnix() || mask == -1) return; act(new Chmod(mask)); }
@Test public void chmod() throws Exception { assumeFalse(Functions.isWindows()); File f = temp.newFile("file"); FilePath fp = new FilePath(f); int prevMode = fp.mode(); assertEquals(0400, chmodAndMode(fp, 0400)); assertEquals(0412, chmodAndMode(fp, 0412)); assertEquals(0777, chmodAndMode(fp, 0777)); assertEquals(prevMode, chmodAndMode(fp, prevMode)); }
@Override public List<String> nullPartitionValueList() { return ImmutableList.of("__DEFAULT_PARTITION__", "null"); }
@Test public void testInit() { PaimonPartitionKey paimonPartitionKey = new PaimonPartitionKey(); assertEquals(paimonPartitionKey.nullPartitionValueList(), ImmutableList.of("__DEFAULT_PARTITION__", "null")); }
static int getConfigValueAsInt(ServiceConfiguration conf, String configProp, int defaultValue) { Object value = conf.getProperty(configProp); if (value instanceof Integer) { log.info("Configuration for [{}] is [{}]", configProp, value); return (Integer) value; } else if (value instanceof String) { try { return Integer.parseInt((String) value); } catch (NumberFormatException numberFormatException) { log.error("Expected configuration for [{}] to be a long, but got [{}]. Using default value: [{}]", configProp, value, defaultValue, numberFormatException); return defaultValue; } } else { log.info("Configuration for [{}] is using the default value: [{}]", configProp, defaultValue); return defaultValue; } }
@Test public void testGetConfigValueAsIntegerReturnsDefaultIfNAN() { Properties props = new Properties(); props.setProperty("prop1", "non-a-number"); ServiceConfiguration config = new ServiceConfiguration(); config.setProperties(props); int actual = ConfigUtils.getConfigValueAsInt(config, "prop1", 9); assertEquals(9, actual); }
@Override public @Nullable V get(Object key) { return getIfPresent(key, /* recordStats */ false); }
@Test(dataProvider = "caches", groups = "isolated") @CacheSpec(population = Population.EMPTY, executor = CacheExecutor.THREADED, compute = Compute.ASYNC, stats = Stats.DISABLED) public void refresh_startReloadBeforeLoadCompletion(CacheContext context) { var beganLoadSuccess = new AtomicBoolean(); var endLoadSuccess = new CountDownLatch(1); var beganReloading = new AtomicBoolean(); var beganLoading = new AtomicBoolean(); var endReloading = new AtomicBoolean(); var endLoading = new AtomicBoolean(); StatsCounter stats = Mockito.mock(); context.ticker().setAutoIncrementStep(Duration.ofSeconds(1)); context.caffeine().recordStats(() -> stats); var asyncCache = context.buildAsync(new CacheLoader<Int, Int>() { @Override public Int load(Int key) { beganLoading.set(true); await().untilTrue(endLoading); return new Int(ThreadLocalRandom.current().nextInt()); } @Override public Int reload(Int key, Int oldValue) { beganReloading.set(true); await().untilTrue(endReloading); return new Int(ThreadLocalRandom.current().nextInt()); } }); Answer<?> answer = invocation -> { beganLoadSuccess.set(true); endLoadSuccess.await(); return null; }; doAnswer(answer).when(stats).recordLoadSuccess(anyLong()); // Start load var future1 = asyncCache.get(context.absentKey()); await().untilTrue(beganLoading); // Complete load; start load callback endLoading.set(true); await().untilTrue(beganLoadSuccess); // Start reload var refresh = asyncCache.synchronous().refresh(context.absentKey()); await().untilTrue(beganReloading); // Complete load callback endLoadSuccess.countDown(); await().untilAsserted(() -> assertThat(future1.getNumberOfDependents()).isEqualTo(0)); // Complete reload callback endReloading.set(true); await().untilAsserted(() -> assertThat(refresh.getNumberOfDependents()).isEqualTo(0)); // Assert new value await().untilAsserted(() -> assertThat(asyncCache.get(context.absentKey())).succeedsWith(refresh.get())); }
public void deleteTopicInNameServer(final String addr, final String topic, final long timeoutMillis) throws RemotingException, InterruptedException, MQClientException { DeleteTopicFromNamesrvRequestHeader requestHeader = new DeleteTopicFromNamesrvRequestHeader(); requestHeader.setTopic(topic); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.DELETE_TOPIC_IN_NAMESRV, requestHeader); RemotingCommand response = this.remotingClient.invokeSync(addr, request, timeoutMillis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { return; } default: break; } throw new MQClientException(response.getCode(), response.getRemark()); }
@Test public void testDeleteTopicInNameServer() throws RemotingException, InterruptedException, MQClientException, MQBrokerException { mockInvokeSync(); mqClientAPI.deleteTopicInNameServer(defaultNsAddr, defaultTopic, defaultTimeout); mqClientAPI.deleteTopicInNameServer(defaultNsAddr, clusterName, defaultTopic, defaultTimeout); }
@Override public UnderFileSystem create(String path, UnderFileSystemConfiguration conf) { Preconditions.checkNotNull(path, "Unable to create UnderFileSystem instance:" + " URI path should not be null"); if (checkOSSCredentials(conf)) { try { return OSSUnderFileSystem.createInstance(new AlluxioURI(path), conf); } catch (Exception e) { throw Throwables.propagate(e); } } String err = "OSS Credentials not available, cannot create OSS Under File System."; throw Throwables.propagate(new IOException(err)); }
@Test public void createInstanceWithPath() { UnderFileSystem ufs = mFactory.create(mOssPath, mConf); Assert.assertNotNull(ufs); Assert.assertTrue(ufs instanceof OSSUnderFileSystem); }
static String escapePathName(String path) { if (path == null || path.length() == 0) { throw new TableException("Path should not be null or empty: " + path); } StringBuilder sb = null; for (int i = 0; i < path.length(); i++) { char c = path.charAt(i); if (needsEscaping(c)) { if (sb == null) { sb = new StringBuilder(path.length() + 2); for (int j = 0; j < i; j++) { sb.append(path.charAt(j)); } } escapeChar(c, sb); } else if (sb != null) { sb.append(c); } } if (sb == null) { return path; } return sb.toString(); }
@Test void testEscapePathName() { String origin = "[00:00]"; String expected = "%5B00%3A00%5D"; String actual = PartitionPathUtils.escapePathName(origin); assertThat(actual).isEqualTo(expected); assertThat(PartitionPathUtils.unescapePathName(actual)).isEqualTo(origin); }
public Connector newConnector(String connectorClassOrAlias) { Class<? extends Connector> klass = connectorClass(connectorClassOrAlias); return newPlugin(klass); }
@Test public void newConnectorShouldInstantiateWithPluginClassLoader() { Connector plugin = plugins.newConnector(TestPlugin.SAMPLING_CONNECTOR.className()); assertInstanceOf(SamplingTestPlugin.class, plugin, "Cannot collect samples"); Map<String, SamplingTestPlugin> samples = ((SamplingTestPlugin) plugin).flatten(); assertTrue(samples.containsKey("<init>")); // constructor was called assertPluginClassLoaderAlwaysActive(plugin); }
public String getLogFileName() { return logFileName; }
@Test public void getLogFileNameOutputNull() { // Arrange final LogHeader objectUnderTest = new LogHeader(0); // Act final String actual = objectUnderTest.getLogFileName(); // Assert result Assert.assertNull(actual); }
public Optional<ReadwriteSplittingDataSourceGroupRule> findDataSourceGroupRule(final String dataSourceName) { return Optional.ofNullable(dataSourceRuleGroups.get(dataSourceName)); }
@Test void assertCreateReadwriteSplittingRuleWithRowValueExpressionImpl() { ReadwriteSplittingDataSourceGroupRuleConfiguration config = new ReadwriteSplittingDataSourceGroupRuleConfiguration( "<GROOVY>${['readwrite']}_ds", "<GROOVY>${['write']}_ds", Arrays.asList("<GROOVY>read_ds_${['0']}", "read_ds_${['1']}", "read_ds_2", "<LITERAL>read_ds_3"), "random"); ReadwriteSplittingRule readwriteSplittingRule = new ReadwriteSplittingRule("logic_db", new ReadwriteSplittingRuleConfiguration( Collections.singleton(config), Collections.singletonMap("random", new AlgorithmConfiguration("RANDOM", new Properties()))), mock(ComputeNodeInstanceContext.class)); Optional<ReadwriteSplittingDataSourceGroupRule> actual = readwriteSplittingRule.findDataSourceGroupRule("readwrite_ds"); assertTrue(actual.isPresent()); assertThat(actual.get().getName(), is("readwrite_ds")); assertThat(actual.get().getReadwriteSplittingGroup().getWriteDataSource(), is("write_ds")); assertThat(actual.get().getReadwriteSplittingGroup().getReadDataSources(), is(Arrays.asList("read_ds_0", "read_ds_1", "read_ds_2", "read_ds_3"))); assertThat(actual.get().getLoadBalancer().getType(), is("RANDOM")); }
public String toJsonString() { ObjectMapper objectMapper = new ObjectMapper(); try { return objectMapper.writeValueAsString(this); } catch (JsonProcessingException e) { throw new RuntimeException(e); } }
@Test void toJsonString() throws JsonProcessingException { ObjectMapper objectMapper = new ObjectMapper(); NamingServerNode node = new NamingServerNode(); Map<String,Object> map = new HashMap<>(); map.put("k","v"); node.setMetadata(map); node.setGroup("group"); node.setControl(new Node.Endpoint("1.1.1.1",888)); node.setTransaction(new Node.Endpoint("2.2.2.2",999)); System.out.println(node.toJsonString()); assertEquals(node.toJsonString(),objectMapper.writeValueAsString(node)); }
@VisibleForTesting Entity exportNativeEntity(InputWithExtractors inputWithExtractors, EntityDescriptorIds entityDescriptorIds) { final Input input = inputWithExtractors.input(); // TODO: Create independent representation of entity? final Map<String, ValueReference> staticFields = input.getStaticFields().entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, kv -> ValueReference.of(kv.getValue()))); final ReferenceMap configuration = toReferenceMap(input.getConfiguration()); final List<ExtractorEntity> extractors = inputWithExtractors.extractors().stream() .map(this::encodeExtractor) .collect(Collectors.toList()); final InputEntity inputEntity = InputEntity.create( ValueReference.of(input.getTitle()), configuration, staticFields, ValueReference.of(input.getType()), ValueReference.of(input.isGlobal()), extractors); final JsonNode data = objectMapper.convertValue(inputEntity, JsonNode.class); final Set<Constraint> constraints = versionConstraints(input); return EntityV1.builder() .id(ModelId.of(entityDescriptorIds.getOrThrow(input.getId(), ModelTypes.INPUT_V1))) .type(ModelTypes.INPUT_V1) .data(data) .constraints(ImmutableSet.copyOf(constraints)) .build(); }
@Test public void exportNativeEntity() { final ImmutableMap<String, Object> fields = ImmutableMap.of( MessageInput.FIELD_TITLE, "Input Title", MessageInput.FIELD_TYPE, "org.graylog2.inputs.raw.udp.RawUDPInput", MessageInput.FIELD_CONFIGURATION, Collections.emptyMap() ); final InputImpl input = new InputImpl(fields); final ImmutableList<Extractor> extractors = ImmutableList.of(); final InputWithExtractors inputWithExtractors = InputWithExtractors.create(input, extractors); final EntityDescriptor descriptor = EntityDescriptor.create(input.getId(), ModelTypes.INPUT_V1); final EntityDescriptorIds entityDescriptorIds = EntityDescriptorIds.of(descriptor); final Entity entity = facade.exportNativeEntity(inputWithExtractors, entityDescriptorIds); assertThat(entity).isInstanceOf(EntityV1.class); assertThat(entity.id()).isEqualTo(ModelId.of(entityDescriptorIds.get(descriptor).orElse(null))); assertThat(entity.type()).isEqualTo(ModelTypes.INPUT_V1); final EntityV1 entityV1 = (EntityV1) entity; final InputEntity inputEntity = objectMapper.convertValue(entityV1.data(), InputEntity.class); assertThat(inputEntity.title()).isEqualTo(ValueReference.of("Input Title")); assertThat(inputEntity.type()).isEqualTo(ValueReference.of("org.graylog2.inputs.raw.udp.RawUDPInput")); assertThat(inputEntity.configuration()).isEmpty(); }
@Override public boolean consistentWithEquals() { return getElemCoder().consistentWithEquals(); }
@Test public void testConsistentWithEquals() { ListCoder<Integer> coder = ListCoder.of(VarIntCoder.of()); assertTrue(coder.consistentWithEquals()); }
private <T> T accept(Expression<T> expr) { return expr.accept(this); }
@Test public void testTrue() throws Exception { assertThat(Expr.True.create().accept(new BooleanNumberConditionsVisitor())).isTrue(); assertThat(loadCondition("condition-true.json").accept(new BooleanNumberConditionsVisitor())) .isTrue(); }
@Override public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay way, IntsRef relationFlags) { OSMValueExtractor.extractTons(edgeId, edgeIntAccess, way, weightEncoder, MAX_WEIGHT_TAGS); // vehicle:conditional no @ (weight > 7.5) for (String restriction : HGV_RESTRICTIONS) { String value = way.getTag(restriction, ""); if (value.startsWith("no") && value.indexOf("@") < 6) { // no,none[ ]@ double dec = OSMValueExtractor.conditionalWeightToTons(value); if (!Double.isNaN(dec)) weightEncoder.setDecimal(false, edgeId, edgeIntAccess, dec); } } }
@Test public void testConditionalTags() { EdgeIntAccess edgeIntAccess = new ArrayEdgeIntAccess(1); ReaderWay readerWay = new ReaderWay(1); readerWay.setTag("highway", "primary"); readerWay.setTag("hgv:conditional", "no @ (weight > 7.5)"); parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags); assertEquals(7.5, mwEnc.getDecimal(false, edgeId, edgeIntAccess), .01); edgeIntAccess = new ArrayEdgeIntAccess(1); readerWay.setTag("hgv:conditional", "none @ (weight > 10t)"); parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags); assertEquals(10, mwEnc.getDecimal(false, edgeId, edgeIntAccess), .01); edgeIntAccess = new ArrayEdgeIntAccess(1); readerWay.setTag("hgv:conditional", "no@ (weight > 7)"); parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags); assertEquals(7, mwEnc.getDecimal(false, edgeId, edgeIntAccess), .01); edgeIntAccess = new ArrayEdgeIntAccess(1); readerWay.setTag("hgv:conditional", "no @ (maxweight > 6)"); // allow different tagging parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags); assertEquals(6, mwEnc.getDecimal(false, edgeId, edgeIntAccess), .01); }
public Statement buildStatement(final ParserRuleContext parseTree) { return build(Optional.of(getSources(parseTree)), parseTree); }
@Test public void shouldBuildAssertNotExistsTopicWithConfigsAndTimeout() { // Given: final SingleStatementContext stmt = givenQuery("ASSERT NOT EXISTS TOPIC 'a-b-c' WITH (REPLICAS=1, partitions=1) TIMEOUT 10 SECONDS;"); // When: final AssertTopic assertTopic = (AssertTopic) builder.buildStatement(stmt); // Then: assertThat(assertTopic.getTopic(), is("a-b-c")); assertThat(assertTopic.getConfig().get("REPLICAS").getValue(), is(1)); assertThat(assertTopic.getConfig().get("PARTITIONS").getValue(), is(1)); assertThat(assertTopic.getTimeout().get().getTimeUnit(), is(TimeUnit.SECONDS)); assertThat(assertTopic.getTimeout().get().getValue(), is(10L)); assertThat(assertTopic.checkExists(), is(false)); }
public void registerStrategy(BatchingStrategy<?, ?, ?> strategy) { _strategies.add(strategy); }
@Test public void testNothingToDoForStrategy() { RecordingStrategy<Integer, Integer, String> strategy = new RecordingStrategy<>((key, promise) -> promise.done(String.valueOf(key)), key -> 0); _batchingSupport.registerStrategy(strategy); Task<String> task = Task.par(Task.value("0"), Task.value("1")) .map("concat", (s0, s1) -> s0 + s1); String result = runAndWait("TestBatchingSupport.testNothingToDoForStrategy", task); assertEquals(result, "01"); assertEquals(strategy.getClassifiedKeys().size(), 0); assertEquals(strategy.getExecutedBatches().size(), 0); assertEquals(strategy.getExecutedSingletons().size(), 0); }
public static <T> List<LocalProperty<T>> sorted(Collection<T> columns, SortOrder order) { return columns.stream().map(column -> new SortingProperty<>(column, order)).collect(toImmutableList()); }
@Test public void testNormalizeDifferentSorts() { List<LocalProperty<String>> localProperties = builder() .sorted("a", SortOrder.ASC_NULLS_FIRST) .sorted("a", SortOrder.DESC_NULLS_LAST) .build(); assertNormalize( localProperties, Optional.of(sorted("a", SortOrder.ASC_NULLS_FIRST)), Optional.empty()); assertNormalizeAndFlatten( localProperties, sorted("a", SortOrder.ASC_NULLS_FIRST)); }
protected static PrivateKey toPrivateKey(File keyFile, String keyPassword) throws NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeySpecException, InvalidAlgorithmParameterException, KeyException, IOException { return toPrivateKey(keyFile, keyPassword, true); }
@Test public void testPkcs1Des3EncryptedDsaNoPassword() throws Exception { assertThrows(InvalidKeySpecException.class, new Executable() { @Override public void execute() throws Throwable { SslContext.toPrivateKey(new File(getClass().getResource("dsa_pkcs1_des3_encrypted.key") .getFile()), null); } }); }
@Override public List<HasMetadata> buildAccompanyingKubernetesResources() throws IOException { final Service service = kubernetesJobManagerParameters .getRestServiceExposedType() .serviceType() .buildUpExternalRestService(kubernetesJobManagerParameters); return Collections.singletonList(service); }
@Test void testSetServiceExposedType() throws IOException { this.flinkConfig.set( KubernetesConfigOptions.REST_SERVICE_EXPOSED_TYPE, KubernetesConfigOptions.ServiceExposedType.NodePort); final List<HasMetadata> resources = this.externalServiceDecorator.buildAccompanyingKubernetesResources(); assertThat(((Service) resources.get(0)).getSpec().getType()) .isEqualTo(KubernetesConfigOptions.ServiceExposedType.NodePort.name()); this.flinkConfig.set( KubernetesConfigOptions.REST_SERVICE_EXPOSED_TYPE, KubernetesConfigOptions.ServiceExposedType.ClusterIP); final List<HasMetadata> servicesWithClusterIP = this.externalServiceDecorator.buildAccompanyingKubernetesResources(); assertThat(((Service) servicesWithClusterIP.get(0)).getSpec().getType()) .isEqualTo(KubernetesConfigOptions.ServiceExposedType.ClusterIP.name()); }
public abstract void filter(Metadata metadata) throws TikaException;
@Test public void testMimeClearingFilter() throws Exception { Metadata metadata = new Metadata(); metadata.set(Metadata.CONTENT_TYPE, MediaType.image("jpeg").toString()); metadata.set("author", "author"); MetadataFilter filter = new ClearByMimeMetadataFilter(set("image/jpeg", "application/pdf")); filter.filter(metadata); assertEquals(0, metadata.size()); metadata.set(Metadata.CONTENT_TYPE, MediaType.text("plain").toString()); metadata.set("author", "author"); filter.filter(metadata); assertEquals(2, metadata.size()); assertEquals("author", metadata.get("author")); }
public StreamResponse getResponse() { return _response; }
@Test public void testWritableStacktraceDisabled() { Throwable throwable = new Exception("Inner exception message"); StreamResponse resposne = new StreamResponseBuilder().build(EntityStreams.emptyStream()); StreamException exception = new StreamException(resposne, "Outer exception message", throwable, WRITABLE_STACKTRACE_DISABLED); Assert.assertEquals(exception.getMessage(), "Outer exception message"); Assert.assertEquals(exception.getStackTrace().length, 0); Assert.assertEquals(exception.getResponse().getStatus(), 200); Assert.assertNotNull(exception.getCause()); Assert.assertSame(exception.getCause(), throwable); Assert.assertTrue(exception.getCause().getStackTrace().length > 0); Assert.assertEquals(exception.getCause().getMessage(), "Inner exception message"); }
@NonNull private static VariableSpace getSpace( @NonNull ConnectionDetails connectionDetails ) { VariableSpace space = connectionDetails.getSpace(); return space != null ? space : Variables.getADefaultVariableSpace(); }
@Test public void testGetResolvedRootPathSubstitutesEnvironmentVariables() throws Exception { String rootPath = "C:\\Users"; System.setProperty( "folder_location", rootPath ); when( vfsConnectionDetails.getSpace() ).thenReturn( Variables.getADefaultVariableSpace() ); assertGetResolvedRootPath( "${folder_location}", "C:/Users" ); }