focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Object parse(Object input) { return input; }
@Test public void passThrough_null() { // WHEN Object arguments = parser.parse(null); // THEN assertThat(arguments).isNull(); }
public static String prepareUrl(@NonNull String url) { url = url.trim(); String lowerCaseUrl = url.toLowerCase(Locale.ROOT); // protocol names are case insensitive if (lowerCaseUrl.startsWith("feed://")) { Log.d(TAG, "Replacing feed:// with http://"); return prepareUrl(url.substring("feed://".length())); } else if (lowerCaseUrl.startsWith("pcast://")) { Log.d(TAG, "Removing pcast://"); return prepareUrl(url.substring("pcast://".length())); } else if (lowerCaseUrl.startsWith("pcast:")) { Log.d(TAG, "Removing pcast:"); return prepareUrl(url.substring("pcast:".length())); } else if (lowerCaseUrl.startsWith("itpc")) { Log.d(TAG, "Replacing itpc:// with http://"); return prepareUrl(url.substring("itpc://".length())); } else if (lowerCaseUrl.startsWith(AP_SUBSCRIBE)) { Log.d(TAG, "Removing antennapod-subscribe://"); return prepareUrl(url.substring(AP_SUBSCRIBE.length())); } else if (lowerCaseUrl.contains(AP_SUBSCRIBE_DEEPLINK)) { Log.d(TAG, "Removing " + AP_SUBSCRIBE_DEEPLINK); String query = Uri.parse(url).getQueryParameter("url"); try { return prepareUrl(URLDecoder.decode(query, "UTF-8")); } catch (UnsupportedEncodingException e) { return prepareUrl(query); } } else if (!(lowerCaseUrl.startsWith("http://") || lowerCaseUrl.startsWith("https://"))) { Log.d(TAG, "Adding http:// at the beginning of the URL"); return "http://" + url; } else { return url; } }
@Test public void testCorrectURLHttps() { final String in = "https://example.com"; final String out = UrlChecker.prepareUrl(in); assertEquals(in, out); }
@Override public boolean retainAll(Collection<?> c) { return get(retainAllAsync(c)); }
@Test public void testRetainAll() { Set<Integer> set = redisson.getSet("set"); for (int i = 0; i < 20000; i++) { set.add(i); } Assertions.assertTrue(set.retainAll(Arrays.asList(1, 2))); assertThat(set).containsOnly(1, 2); Assertions.assertEquals(2, set.size()); }
@Override public List<UsbSerialPort> getPorts() { return mPorts; }
@Test public void compositeAlternateSettingDevice() throws Exception { UsbDeviceConnection usbDeviceConnection = mock(UsbDeviceConnection.class); UsbDevice usbDevice = mock(UsbDevice.class); UsbInterface ethernetControlInterface = mock(UsbInterface.class); UsbInterface ethernetDummyInterface = mock(UsbInterface.class); UsbInterface ethernetDataInterface = mock(UsbInterface.class); UsbInterface controlInterface = mock(UsbInterface.class); UsbInterface dataInterface = mock(UsbInterface.class); UsbEndpoint controlEndpoint = mock(UsbEndpoint.class); UsbEndpoint readEndpoint = mock(UsbEndpoint.class); UsbEndpoint writeEndpoint = mock(UsbEndpoint.class); // has multiple USB_CLASS_CDC_DATA interfaces => get correct with IAD when(usbDeviceConnection.getRawDescriptors()).thenReturn(HexDump.hexStringToByteArray( "12 01 00 02 EF 02 01 40 FE CA 02 40 00 01 01 02 03 01\n" + "09 02 9A 00 04 01 00 80 32\n" + "08 0B 00 02 02 06 00 00\n" + "09 04 00 00 01 02 06 00 04\n" + "05 24 00 20 01\n" + "05 24 06 00 01\n" + "0D 24 0F 04 00 00 00 00 DC 05 00 00 00\n" + "07 05 81 03 08 00 01\n" + "09 04 01 00 00 0A 00 00 00\n" + "09 04 01 01 02 0A 00 00 00\n" + "07 05 82 02 40 00 00\n" + "07 05 02 02 40 00 00\n" + "08 0B 02 02 02 02 00 00\n" + "09 04 02 00 01 02 02 00 04\n" + "05 24 00 20 01\n" + "05 24 01 00 03\n" + "04 24 02 02\n" + "05 24 06 02 03\n" + "07 05 83 03 08 00 10\n" + "09 04 03 00 02 0A 00 00 00\n" + "07 05 04 02 40 00 00\n" + "07 05 84 02 40 00 00")); when(usbDeviceConnection.claimInterface(controlInterface,true)).thenReturn(true); when(usbDeviceConnection.claimInterface(dataInterface,true)).thenReturn(true); when(usbDevice.getInterfaceCount()).thenReturn(5); when(usbDevice.getInterface(0)).thenReturn(ethernetControlInterface); when(usbDevice.getInterface(1)).thenReturn(ethernetDummyInterface); when(usbDevice.getInterface(2)).thenReturn(ethernetDataInterface); when(usbDevice.getInterface(3)).thenReturn(controlInterface); when(usbDevice.getInterface(4)).thenReturn(dataInterface); when(ethernetControlInterface.getId()).thenReturn(0); when(ethernetControlInterface.getInterfaceClass()).thenReturn(UsbConstants.USB_CLASS_COMM); when(ethernetControlInterface.getInterfaceSubclass()).thenReturn(6); when(ethernetDummyInterface.getId()).thenReturn(1); when(ethernetDummyInterface.getAlternateSetting()).thenReturn(0); when(ethernetDummyInterface.getInterfaceClass()).thenReturn(UsbConstants.USB_CLASS_CDC_DATA); when(ethernetDataInterface.getId()).thenReturn(1); when(ethernetDataInterface.getAlternateSetting()).thenReturn(1); when(ethernetDataInterface.getInterfaceClass()).thenReturn(UsbConstants.USB_CLASS_CDC_DATA); when(controlInterface.getId()).thenReturn(2); when(controlInterface.getInterfaceClass()).thenReturn(UsbConstants.USB_CLASS_COMM); when(controlInterface.getInterfaceSubclass()).thenReturn(USB_SUBCLASS_ACM); when(dataInterface.getId()).thenReturn(3); when(dataInterface.getInterfaceClass()).thenReturn(UsbConstants.USB_CLASS_CDC_DATA); when(controlInterface.getEndpointCount()).thenReturn(1); when(controlInterface.getEndpoint(0)).thenReturn(controlEndpoint); when(dataInterface.getEndpointCount()).thenReturn(2); when(dataInterface.getEndpoint(0)).thenReturn(writeEndpoint); when(dataInterface.getEndpoint(1)).thenReturn(readEndpoint); when(controlEndpoint.getDirection()).thenReturn(UsbConstants.USB_DIR_IN); when(controlEndpoint.getType()).thenReturn(UsbConstants.USB_ENDPOINT_XFER_INT); when(readEndpoint.getDirection()).thenReturn(UsbConstants.USB_DIR_IN); when(readEndpoint.getType()).thenReturn(UsbConstants.USB_ENDPOINT_XFER_BULK); when(writeEndpoint.getDirection()).thenReturn(UsbConstants.USB_DIR_OUT); when(writeEndpoint.getType()).thenReturn(UsbConstants.USB_ENDPOINT_XFER_BULK); CdcAcmSerialDriver driver = new CdcAcmSerialDriver(usbDevice); CdcAcmSerialDriver.CdcAcmSerialPort port = (CdcAcmSerialDriver.CdcAcmSerialPort) driver.getPorts().get(0); port.mConnection = usbDeviceConnection; port.openInt(); assertEquals(readEndpoint, port.mReadEndpoint); assertEquals(writeEndpoint, port.mWriteEndpoint); }
static Properties resolveConsumerProperties(Map<String, String> options, Object keySchema, Object valueSchema) { Properties properties = from(options); withSerdeConsumerProperties(true, options, keySchema, properties); withSerdeConsumerProperties(false, options, valueSchema, properties); return properties; }
@Test public void test_consumerProperties_avro() { // key assertThat(PropertiesResolver.resolveConsumerProperties(Map.of( OPTION_KEY_FORMAT, AVRO_FORMAT ), DUMMY_SCHEMA, null)).containsExactlyInAnyOrderEntriesOf(Map.of( KEY_DESERIALIZER, HazelcastKafkaAvroDeserializer.class.getCanonicalName(), OPTION_KEY_AVRO_SCHEMA, DUMMY_SCHEMA )); // value assertThat(PropertiesResolver.resolveConsumerProperties(Map.of( OPTION_KEY_FORMAT, UNKNOWN_FORMAT, OPTION_VALUE_FORMAT, AVRO_FORMAT ), null, DUMMY_SCHEMA)).containsExactlyInAnyOrderEntriesOf(Map.of( VALUE_DESERIALIZER, HazelcastKafkaAvroDeserializer.class.getCanonicalName(), OPTION_VALUE_AVRO_SCHEMA, DUMMY_SCHEMA )); }
public void write(D datum, Encoder out) throws IOException { Objects.requireNonNull(out, "Encoder cannot be null"); try { write(root, datum, out); } catch (TracingNullPointException | TracingClassCastException | TracingAvroTypeException e) { throw e.summarize(root); } }
@Test void arrayConcurrentModification() throws Exception { String json = "{\"type\": \"array\", \"items\": \"int\" }"; Schema s = new Schema.Parser().parse(json); final GenericArray<Integer> a = new GenericData.Array<>(1, s); ByteArrayOutputStream bao = new ByteArrayOutputStream(); final GenericDatumWriter<GenericArray<Integer>> w = new GenericDatumWriter<>(s); CountDownLatch sizeWrittenSignal = new CountDownLatch(1); CountDownLatch eltAddedSignal = new CountDownLatch(1); final TestEncoder e = new TestEncoder(EncoderFactory.get().directBinaryEncoder(bao, null), sizeWrittenSignal, eltAddedSignal); // call write in another thread ExecutorService executor = Executors.newSingleThreadExecutor(); Future<Void> result = executor.submit(() -> { w.write(a, e); return null; }); sizeWrittenSignal.await(); // size has been written so now add an element to the array a.add(7); // and signal for the element to be written eltAddedSignal.countDown(); try { result.get(); fail("Expected ConcurrentModificationException"); } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof ConcurrentModificationException); } }
@Override public GeometryState createGroupedState() { return new GroupedGeometryState(); }
@Test public void testCreateGroupedStateEmpty() { GeometryState state = factory.createGroupedState(); assertNull(state.getGeometry()); assertTrue(state.getEstimatedSize() > 0, format("Estimated memory size was %d", state.getEstimatedSize())); }
@Override public void filter(ContainerRequestContext requestContext) throws IOException { ThreadContext.unbindSubject(); final boolean secure = requestContext.getSecurityContext().isSecure(); final MultivaluedMap<String, String> headers = requestContext.getHeaders(); final Map<String, Cookie> cookies = requestContext.getCookies(); final Request grizzlyRequest = grizzlyRequestProvider.get(); final String host = RestTools.getRemoteAddrFromRequest(grizzlyRequest, trustedProxies); final String authHeader = headers.getFirst(HttpHeaders.AUTHORIZATION); final Set<Class<?>> matchedResources = requestContext.getUriInfo().getMatchedResources().stream() .map(Object::getClass).collect(Collectors.toSet()); final SecurityContext securityContext; if (authHeader != null && authHeader.startsWith("Basic")) { final String base64UserPass = authHeader.substring(authHeader.indexOf(' ') + 1); final String userPass = decodeBase64(base64UserPass); final String[] split = userPass.split(":", 2); if (split.length != 2) { throw new BadRequestException("Invalid credentials in Authorization header"); } securityContext = createSecurityContext(split[0], split[1], secure, SecurityContext.BASIC_AUTH, host, grizzlyRequest.getRemoteAddr(), headers, cookies, matchedResources); } else { securityContext = createSecurityContext(null, null, secure, null, host, grizzlyRequest.getRemoteAddr(), headers, cookies, matchedResources); } requestContext.setSecurityContext(securityContext); }
@Test(expected = BadRequestException.class) public void filterWithBasicAuthAndMalformedCredentialsShouldThrowBadRequestException() throws Exception { final MultivaluedHashMap<String, String> headers = new MultivaluedHashMap<>(); final String credentials = Base64.getEncoder().encodeToString("user_pass".getBytes(StandardCharsets.US_ASCII)); headers.putSingle(HttpHeaders.AUTHORIZATION, "Basic " + credentials); when(requestContext.getHeaders()).thenReturn(headers); filter.filter(requestContext); }
public List<Partition> getPartitions(Connection connection, Table table) { JDBCTable jdbcTable = (JDBCTable) table; String query = getPartitionQuery(table); try (PreparedStatement ps = connection.prepareStatement(query)) { ps.setString(1, jdbcTable.getDbName()); ps.setString(2, jdbcTable.getJdbcTable()); ResultSet rs = ps.executeQuery(); ImmutableList.Builder<Partition> list = ImmutableList.builder(); if (null != rs) { while (rs.next()) { String[] partitionNames = rs.getString("NAME"). replace("'", "").split(","); long createTime = rs.getTimestamp("MODIFIED_TIME").getTime(); for (String partitionName : partitionNames) { list.add(new Partition(partitionName, createTime)); } } return list.build(); } else { return Lists.newArrayList(); } } catch (SQLException | NullPointerException e) { throw new StarRocksConnectorException(e.getMessage(), e); } }
@Test public void testMysqlInvalidPartition() { try { MockResultSet invalidPartition = new MockResultSet("partitions"); invalidPartition.addColumn("NAME", Arrays.asList("'20230810'")); invalidPartition.addColumn("PARTITION_EXPRESSION", Arrays.asList("`d`")); invalidPartition.addColumn("MODIFIED_TIME", Arrays.asList("2023-08-01")); new Expectations() { { preparedStatement.executeQuery(); result = invalidPartition; minTimes = 0; } }; JDBCMetadata jdbcMetadata = new JDBCMetadata(properties, "catalog", dataSource); List<Column> columns = Arrays.asList(new Column("d", Type.VARCHAR)); JDBCTable jdbcTable = new JDBCTable(100000, "tbl1", columns, Lists.newArrayList(), "test", "catalog", properties); jdbcMetadata.getPartitions(jdbcTable, Arrays.asList("20230810")).size(); Assert.fail(); } catch (Exception e) { Assert.assertTrue(e.getMessage().contains("Timestamp format must be yyyy-mm-dd hh:mm:ss")); } }
@Override @Deprecated public <K1, V1> KStream<K1, V1> flatTransform(final org.apache.kafka.streams.kstream.TransformerSupplier<? super K, ? super V, Iterable<KeyValue<K1, V1>>> transformerSupplier, final String... stateStoreNames) { Objects.requireNonNull(transformerSupplier, "transformerSupplier can't be null"); final String name = builder.newProcessorName(TRANSFORM_NAME); return flatTransform(transformerSupplier, Named.as(name), stateStoreNames); }
@Test @SuppressWarnings("deprecation") public void shouldNotAllowNullStoreNamesOnFlatTransform() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.flatTransform(flatTransformerSupplier, (String[]) null)); assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array")); }
@Override public FileContent getContent() throws FileSystemException { return requireResolvedFileObject().getContent(); }
@Test public void testDelegatesGetContent() throws FileSystemException { FileContent fileContent = mock( FileContent.class ); when( resolvedFileObject.getContent() ).thenReturn( fileContent ); assertSame( fileContent, fileObject.getContent() ); }
@Override public void createPort(Port osPort) { checkNotNull(osPort, ERR_NULL_PORT); checkArgument(!Strings.isNullOrEmpty(osPort.getId()), ERR_NULL_PORT_ID); checkArgument(!Strings.isNullOrEmpty(osPort.getNetworkId()), ERR_NULL_PORT_NET_ID); osNetworkStore.createPort(osPort); log.info(String.format(MSG_PORT, osPort.getId(), MSG_CREATED)); }
@Test(expected = IllegalArgumentException.class) public void testCreatePortWithNullId() { final Port testPort = NeutronPort.builder() .networkId(NETWORK_ID) .build(); target.createPort(testPort); }
public static String post(HttpURLConnection con, Map<String, String> headers, String requestBody, Integer connectTimeoutMs, Integer readTimeoutMs) throws IOException, UnretryableException { handleInput(con, headers, requestBody, connectTimeoutMs, readTimeoutMs); return handleOutput(con); }
@Test public void testErrorResponseUnretryableCode() throws IOException { HttpURLConnection mockedCon = createHttpURLConnection("dummy"); when(mockedCon.getInputStream()).thenThrow(new IOException("Can't read")); when(mockedCon.getErrorStream()).thenReturn(new ByteArrayInputStream( "{\"error\":\"some_arg\", \"error_description\":\"some problem with arg\"}" .getBytes(StandardCharsets.UTF_8))); when(mockedCon.getResponseCode()).thenReturn(HttpURLConnection.HTTP_BAD_REQUEST); UnretryableException ioe = assertThrows(UnretryableException.class, () -> HttpAccessTokenRetriever.post(mockedCon, null, null, null, null)); assertTrue(ioe.getMessage().contains("{\"some_arg\" - \"some problem with arg\"}")); }
<K, V> List<ConsumerRecord<K, V>> fetchRecords(FetchConfig fetchConfig, Deserializers<K, V> deserializers, int maxRecords) { // Error when fetching the next record before deserialization. if (corruptLastRecord) throw new KafkaException("Received exception when fetching the next record from " + partition + ". If needed, please seek past the record to " + "continue consumption.", cachedRecordException); if (isConsumed) return Collections.emptyList(); List<ConsumerRecord<K, V>> records = new ArrayList<>(); try { for (int i = 0; i < maxRecords; i++) { // Only move to next record if there was no exception in the last fetch. Otherwise, we should // use the last record to do deserialization again. if (cachedRecordException == null) { corruptLastRecord = true; lastRecord = nextFetchedRecord(fetchConfig); corruptLastRecord = false; } if (lastRecord == null) break; Optional<Integer> leaderEpoch = maybeLeaderEpoch(currentBatch.partitionLeaderEpoch()); TimestampType timestampType = currentBatch.timestampType(); ConsumerRecord<K, V> record = parseRecord(deserializers, partition, leaderEpoch, timestampType, lastRecord); records.add(record); recordsRead++; bytesRead += lastRecord.sizeInBytes(); nextFetchOffset = lastRecord.offset() + 1; // In some cases, the deserialization may have thrown an exception and the retry may succeed, // we allow user to move forward in this case. cachedRecordException = null; } } catch (SerializationException se) { cachedRecordException = se; if (records.isEmpty()) throw se; } catch (KafkaException e) { cachedRecordException = e; if (records.isEmpty()) throw new KafkaException("Received exception when fetching the next record from " + partition + ". If needed, please seek past the record to " + "continue consumption.", e); } return records; }
@Test public void testAbortedTransactionRecordsRemoved() { int numRecords = 10; Records rawRecords = newTranscactionalRecords(ControlRecordType.ABORT, numRecords); FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData() .setRecords(rawRecords) .setAbortedTransactions(newAbortedTransactions()); try (final Deserializers<String, String> deserializers = newStringDeserializers()) { FetchConfig fetchConfig = newFetchConfig(IsolationLevel.READ_COMMITTED, true); CompletedFetch completedFetch = newCompletedFetch(0, partitionData); List<ConsumerRecord<String, String>> records = completedFetch.fetchRecords(fetchConfig, deserializers, 10); assertEquals(0, records.size()); fetchConfig = newFetchConfig(IsolationLevel.READ_UNCOMMITTED, true); completedFetch = newCompletedFetch(0, partitionData); records = completedFetch.fetchRecords(fetchConfig, deserializers, 10); assertEquals(numRecords, records.size()); } }
protected void handleOriginNonSuccessResponse(final HttpResponse originResponse, DiscoveryResult chosenServer) { final int respStatus = originResponse.status().code(); OutboundException obe; StatusCategory statusCategory; ClientException.ErrorType niwsErrorType; if (respStatus == 503) { statusCategory = ZuulStatusCategory.FAILURE_ORIGIN_THROTTLED; niwsErrorType = ClientException.ErrorType.SERVER_THROTTLED; obe = new OutboundException(OutboundErrorType.SERVICE_UNAVAILABLE, requestAttempts); if (currentRequestStat != null) { currentRequestStat.updateWithHttpStatusCode(respStatus); currentRequestStat.serviceUnavailable(); } } else { statusCategory = ZuulStatusCategory.FAILURE_ORIGIN; niwsErrorType = ClientException.ErrorType.GENERAL; obe = new OutboundException(OutboundErrorType.ERROR_STATUS_RESPONSE, requestAttempts); if (currentRequestStat != null) { currentRequestStat.updateWithHttpStatusCode(respStatus); currentRequestStat.generalError(); } } obe.setStatusCode(respStatus); long duration = 0; if (currentRequestStat != null) { duration = currentRequestStat.duration(); } if (currentRequestAttempt != null) { currentRequestAttempt.complete(respStatus, duration, obe); } // Flag this error with the ExecutionListener. origin.onRequestExceptionWithServer(zuulRequest, chosenServer, attemptNum, new ClientException(niwsErrorType)); boolean retryable5xxResponse = isRetryable5xxResponse(zuulRequest, originResponse); if (retryable5xxResponse) { origin.adjustRetryPolicyIfNeeded(zuulRequest); } if (retryable5xxResponse && isBelowRetryLimit()) { logger.debug( "Retrying: status={}, attemptNum={}, maxRetries={}, startedSendingResponseToClient={}, hasCompleteBody={}, method={}", respStatus, attemptNum, origin.getMaxRetriesForRequest(context), startedSendingResponseToClient, zuulRequest.hasCompleteBody(), zuulRequest.getMethod()); // detach from current origin. ByteBufUtil.touch(originResponse, "ProxyEndpoint handling non-success retry, request: ", zuulRequest); unlinkFromOrigin(); releasePartialResponse(originResponse); // ensure body reader indexes are reset so retry is able to access the body buffer // otherwise when the body is read by netty (in writeBufferedBodyContent) the body will appear empty zuulRequest.resetBodyReader(); // retry request with different origin passport.add(PassportState.ORIGIN_RETRY_START); proxyRequestToOrigin(); } else { SessionContext zuulCtx = context; logger.info( "Sending error to client: status={}, attemptNum={}, maxRetries={}, startedSendingResponseToClient={}, hasCompleteBody={}, method={}", respStatus, attemptNum, origin.getMaxRetriesForRequest(zuulCtx), startedSendingResponseToClient, zuulRequest.hasCompleteBody(), zuulRequest.getMethod()); // This is a final response after all retries that will go to the client ByteBufUtil.touch(originResponse, "ProxyEndpoint handling non-success response, request: ", zuulRequest); zuulResponse = buildZuulHttpResponse(originResponse, statusCategory, obe); invokeNext(zuulResponse); } }
@Test void retryWhenNoAdjustment() { createResponse(HttpResponseStatus.SERVICE_UNAVAILABLE); proxyEndpoint.handleOriginNonSuccessResponse(response, createDiscoveryResult()); verify(nettyOrigin).adjustRetryPolicyIfNeeded(request); verify(nettyOrigin).connectToOrigin(any(), any(), anyInt(), any(), any(), any()); }
public static List<String> splitToWhiteSpaceSeparatedTokens(String input) { if (input == null) { return new ArrayList<>(); } StringTokenizer tokenizer = new StringTokenizer(input.trim(), QUOTE_CHAR + WHITESPACE, true); List<String> tokens = new ArrayList<>(); StringBuilder quotedText = new StringBuilder(); while (tokenizer.hasMoreTokens()) { String token = tokenizer.nextToken(); if (QUOTE_CHAR.equals(token)) { // if we have a quote, add the next tokens to the quoted text // until the quoting has finished quotedText.append(QUOTE_CHAR); String buffer = quotedText.toString(); if (isSingleQuoted(buffer) || isDoubleQuoted(buffer)) { tokens.add(buffer.substring(1, buffer.length() - 1)); quotedText = new StringBuilder(); } } else if (WHITESPACE.equals(token)) { // a white space, if in quote, add the white space, otherwise // skip it if (quotedText.length() > 0) { quotedText.append(WHITESPACE); } } else { if (quotedText.length() > 0) { quotedText.append(token); } else { tokens.add(token); } } } if (quotedText.length() > 0) { throw new IllegalArgumentException("Invalid quoting found in args " + quotedText); } return tokens; }
@Test public void testWhiteSpaceQuoted() { List<String> args = splitToWhiteSpaceSeparatedTokens("\"arg 0\""); assertEquals("arg 0", args.get(0)); }
@Override public AttributedList<Local> list(final Filter<String> filter) throws AccessDeniedException { final NSURL resolved; try { resolved = this.lock(true); if(null == resolved) { return super.list(filter); } final AttributedList<Local> list = super.list(resolved.path(), filter); this.release(resolved); return list; } catch(LocalAccessDeniedException e) { log.warn(String.format("Failure obtaining lock for %s. %s", this, e)); return super.list(filter); } }
@Test(expected = LocalAccessDeniedException.class) public void testListNotFound() throws Exception { final String name = UUID.randomUUID().toString(); FinderLocal l = new FinderLocal(System.getProperty("java.io.tmpdir"), name); l.list(); }
@Override public BeamSqlTable buildBeamSqlTable(Table table) { Schema schema = table.getSchema(); ObjectNode properties = table.getProperties(); Optional<ParsedLocation> parsedLocation = Optional.empty(); if (!Strings.isNullOrEmpty(table.getLocation())) { parsedLocation = Optional.of(parseLocation(checkArgumentNotNull(table.getLocation()))); } List<String> topics = mergeParam(parsedLocation.map(loc -> loc.topic), (ArrayNode) properties.get("topics")); List<String> allBootstrapServers = mergeParam( parsedLocation.map(loc -> loc.brokerLocation), (ArrayNode) properties.get("bootstrap_servers")); String bootstrapServers = String.join(",", allBootstrapServers); Optional<String> payloadFormat = properties.has("format") ? Optional.of(properties.get("format").asText()) : Optional.empty(); if (Schemas.isNestedSchema(schema)) { Optional<PayloadSerializer> serializer = payloadFormat.map( format -> PayloadSerializers.getSerializer( format, checkArgumentNotNull(schema.getField(PAYLOAD_FIELD).getType().getRowSchema()), TableUtils.convertNode2Map(properties))); return new NestedPayloadKafkaTable(schema, bootstrapServers, topics, serializer); } else { /* * CSV is handled separately because multiple rows can be produced from a single message, which * adds complexity to payload extraction. It remains here and as the default because it is the * historical default, but it will not be extended to support attaching extended attributes to * rows. */ if (payloadFormat.orElse("csv").equals("csv")) { return new BeamKafkaCSVTable(schema, bootstrapServers, topics); } PayloadSerializer serializer = PayloadSerializers.getSerializer( payloadFormat.get(), schema, TableUtils.convertNode2Map(properties)); return new PayloadSerializerKafkaTable(schema, bootstrapServers, topics, serializer); } }
@Test public void testBuildWithExtraTopics() { Table table = mockTableWithExtraTopics("hello", ImmutableList.of("topic2", "topic3")); BeamSqlTable sqlTable = provider.buildBeamSqlTable(table); assertNotNull(sqlTable); assertTrue(sqlTable instanceof BeamKafkaCSVTable); BeamKafkaCSVTable kafkaTable = (BeamKafkaCSVTable) sqlTable; assertEquals(LOCATION_BROKER, kafkaTable.getBootstrapServers()); assertEquals(ImmutableList.of(LOCATION_TOPIC, "topic2", "topic3"), kafkaTable.getTopics()); }
@Override public List<String> getArgumentDesc() { return desc; }
@Test public void testDescription() { assertEquals(JMeterUtils.getResString("string_to_file_pathname"), function.getArgumentDesc().get(0), "Function 'stringtofile' should have successfully reading the configuration file 'messages.properties'"); }
public static String sanitize(String name) { try { String encoded = URLEncoder.encode(name, StandardCharsets.UTF_8.name()); StringBuilder builder = new StringBuilder(); for (int i = 0; i < encoded.length(); i++) { char c = encoded.charAt(i); if (c == '*') { // Metric ObjectName treats * as pattern builder.append("%2A"); } else if (c == '+') { // Space URL-encoded as +, replace with percent encoding builder.append("%20"); } else { builder.append(c); } } return builder.toString(); } catch (UnsupportedEncodingException e) { throw new KafkaException(e); } }
@Test public void testSanitize() { String principal = "CN=Some characters !@#$%&*()_-+=';:,/~"; String sanitizedPrincipal = Sanitizer.sanitize(principal); assertTrue(sanitizedPrincipal.replace('%', '_').matches("[a-zA-Z0-9\\._\\-]+")); assertEquals(principal, Sanitizer.desanitize(sanitizedPrincipal)); }
@Override public boolean process(Set<? extends TypeElement> annotations, RoundEnvironment roundEnv) { var triggers = annotations.stream() .filter(te -> { for (var trigger : KoraSchedulingAnnotationProcessor.triggers) { if (te.getQualifiedName().contentEquals(trigger.canonicalName())) { return true; } } return false; }) .toArray(TypeElement[]::new); var scheduledMethods = roundEnv.getElementsAnnotatedWithAny(triggers); var scheduledTypes = scheduledMethods.stream().collect(Collectors.groupingBy(e -> { var type = (TypeElement) e.getEnclosingElement(); return type.getQualifiedName().toString(); })); for (var entry : scheduledTypes.entrySet()) { var methods = entry.getValue(); var type = (TypeElement) entry.getValue().get(0).getEnclosingElement(); try { this.generateModule(type, methods); } catch (ProcessingErrorException e) { e.printError(this.processingEnv); } catch (IOException e) { throw new RuntimeException(e); } // todo exceptions } return false; }
@Test void testScheduledWithTrigger() throws Exception { process(ScheduledWithTrigger.class); }
@Override public List<TableIdentifier> listTables(Namespace namespace) { SnowflakeIdentifier scope = NamespaceHelpers.toSnowflakeIdentifier(namespace); Preconditions.checkArgument( scope.type() == SnowflakeIdentifier.Type.SCHEMA, "listTables must be at SCHEMA level; got %s from namespace %s", scope, namespace); List<SnowflakeIdentifier> sfTables = snowflakeClient.listIcebergTables(scope); return sfTables.stream() .map(NamespaceHelpers::toIcebergTableIdentifier) .collect(Collectors.toList()); }
@Test public void testListTablesWithinNonexistentSchema() { String dbName = "DB_2"; String schemaName = "NONEXISTENT_SCHEMA"; assertThatExceptionOfType(RuntimeException.class) .isThrownBy(() -> catalog.listTables(Namespace.of(dbName, schemaName))) .withMessageContaining("does not exist") .withMessageContaining("DB_2.NONEXISTENT_SCHEMA"); }
@CanIgnoreReturnValue public GsonBuilder setDateFormat(String pattern) { if (pattern != null) { try { new SimpleDateFormat(pattern); } catch (IllegalArgumentException e) { // Throw exception if it is an invalid date format throw new IllegalArgumentException("The date pattern '" + pattern + "' is not valid", e); } } this.datePattern = pattern; return this; }
@SuppressWarnings("deprecation") // for GsonBuilder.setDateFormat(int) @Test public void testSetDateFormatValidStyle() { GsonBuilder builder = new GsonBuilder(); int[] validStyles = {DateFormat.FULL, DateFormat.LONG, DateFormat.MEDIUM, DateFormat.SHORT}; for (int style : validStyles) { // Should not throw an exception builder.setDateFormat(style); builder.setDateFormat(style, style); } }
@Override public List<PrivilegedOperation> reacquireContainer(ContainerId containerId) throws ResourceHandlerException { return null; }
@Test public void testReacquireContainer() throws Exception { ContainerId containerIdMock = mock(ContainerId.class); Assert.assertNull(cGroupsBlkioResourceHandlerImpl .reacquireContainer(containerIdMock)); }
@Override public String toString() { return SelParserTreeConstants.jjtNodeName[id] + (value == null ? "" : ": " + value); }
@Test public void testToStringWithValue() { root.jjtSetValue("foo"); assertEquals("Execute: foo", root.toString()); }
@Override @Nullable public int[] readIntArray() throws EOFException { int len = readInt(); if (len == NULL_ARRAY_LENGTH) { return null; } if (len > 0) { int[] values = new int[len]; for (int i = 0; i < len; i++) { values[i] = readInt(); } return values; } return new int[0]; }
@Test public void testReadIntArray() throws Exception { byte[] bytesBE = {0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, -1, -1, -1, -1}; byte[] bytesLE = {0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, -1, -1, -1, -1}; in.init((byteOrder == BIG_ENDIAN ? bytesBE : bytesLE), 0); in.position(12); int[] theNullArray = in.readIntArray(); in.position(0); int[] theZeroLengthArray = in.readIntArray(); in.position(4); int[] bytes = in.readIntArray(); assertNull(theNullArray); assertArrayEquals(new int[0], theZeroLengthArray); assertArrayEquals(new int[]{1}, bytes); }
public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext) throws SQLException { Optional<MergedResult> mergedResult = executeMerge(queryResults, sqlStatementContext); Optional<MergedResult> result = mergedResult.isPresent() ? Optional.of(decorate(mergedResult.get(), sqlStatementContext)) : decorate(queryResults.get(0), sqlStatementContext); return result.orElseGet(() -> new TransparentMergedResult(queryResults.get(0))); }
@Test void assertMergeWithDecoratorRuleOnly() throws SQLException { when(database.getRuleMetaData().getRules()).thenReturn(Collections.singleton(new DecoratorRuleFixture())); MergedResult actual = new MergeEngine(mock(RuleMetaData.class), database, new ConfigurationProperties(new Properties()), mock(ConnectionContext.class)).merge(Collections.singletonList(queryResult), mock(SQLStatementContext.class)); assertThat(actual.getValue(1, String.class), is("decorated_value")); }
@Override public void handle(SeckillWebMockRequestDTO request) { long seckillId = request.getSeckillId(); redisService.removeSeckill(seckillId); Seckill seckill = redisService.getSeckill(seckillId); seckill.setStatus(SeckillStatusConstant.IN_PROGRESS); redisService.putSeckill(seckill); redisService.clearSeckillEndFlag(seckillId, request.getTaskId()); }
@Test @DisplayName("Should handle request and update seckill status") public void shouldHandleRequestAndUpdateSeckillStatus() { SeckillWebMockRequestDTO request = new SeckillWebMockRequestDTO(); request.setSeckillId(123L); Seckill seckill = new Seckill(); when(redisService.getSeckill(anyLong())).thenReturn(seckill); redisPreRequestHandler.handle(request); verify(redisService).removeSeckill(anyLong()); verify(redisService).putSeckill(seckill); verify(redisService).clearSeckillEndFlag(anyLong(), eq(request.getTaskId())); }
@VisibleForTesting void updateValueMeta() throws KettleException { List<ValueMetaInterface> outputValueMetaList = data.outputRowMeta.getValueMetaList(); List<ValueMetaInterface> aggMetaValueMetaList = data.aggMeta.getValueMetaList(); for ( int outputIndex = 0; outputIndex < outputValueMetaList.size(); ++outputIndex ) { for ( int aggIndex = 0; aggIndex < aggMetaValueMetaList.size(); ++aggIndex ) { if ( aggMetaValueMetaList.get( aggIndex ).getName().equals( outputValueMetaList.get( outputIndex ).getName() ) ) { data.outputRowMeta.removeValueMeta( outputValueMetaList.get( outputIndex ).getName() ); data.outputRowMeta.addValueMeta( outputIndex, aggMetaValueMetaList.get( aggIndex ) ); } } } }
@Test public void updateValueMetaTest() throws KettleException { ValueMetaString stringMetaFromOutput = new ValueMetaString( "stringMeta" ); ValueMetaBinary binaryMetaFromOutput = new ValueMetaBinary( "binaryMeta" ); ValueMetaBinary binaryMetaFromAgg = new ValueMetaBinary( "binaryMeta" ); ValueMetaInteger integerMetaFromOutput = new ValueMetaInteger( "integerMeta" ); memGroupByData.outputRowMeta.addValueMeta( stringMetaFromOutput ); memGroupByData.outputRowMeta.addValueMeta( binaryMetaFromOutput ); memGroupByData.outputRowMeta.addValueMeta( integerMetaFromOutput ); memGroupByData.aggMeta.addValueMeta( binaryMetaFromAgg ); doCallRealMethod().when( memGroupBy ).updateValueMeta(); memGroupBy.updateValueMeta(); assertFalse( memGroupByData.outputRowMeta.getValueMetaList().contains( binaryMetaFromOutput ) ); assertTrue( memGroupByData.outputRowMeta.getValueMetaList().contains( binaryMetaFromAgg ) ); }
@Override public String toString() { return "CSV Input (" + StringUtils.showControlCharacters(String.valueOf(getFieldDelimiter())) + ") " + Arrays.toString(getFilePaths()); }
@Test void testPojoTypeWithInvalidFieldMapping() throws Exception { File tempFile = File.createTempFile("CsvReaderPojoType", "tmp"); tempFile.deleteOnExit(); tempFile.setWritable(true); @SuppressWarnings("unchecked") PojoTypeInfo<PojoItem> typeInfo = (PojoTypeInfo<PojoItem>) TypeExtractor.createTypeInfo(PojoItem.class); try { new PojoCsvInputFormat<>( new Path(tempFile.toURI().toString()), typeInfo, new String[] {"field1", "field2"}); fail("The number of POJO fields cannot be same as that of selected CSV fields"); } catch (IllegalArgumentException e) { // success } try { new PojoCsvInputFormat<>( new Path(tempFile.toURI().toString()), typeInfo, new String[] {"field1", "field2", null, "field4"}); fail("Fields mapping cannot contain null."); } catch (NullPointerException e) { // success } try { new PojoCsvInputFormat<>( new Path(tempFile.toURI().toString()), typeInfo, new String[] {"field1", "field2", "field3", "field5"}); fail("Invalid field name"); } catch (IllegalArgumentException e) { // success } }
private DeviceId(URI uri) { this.uri = uri; this.str = uri.toString().toLowerCase(); }
@Test public void basics() { new EqualsTester() .addEqualityGroup(deviceId("of:foo"), deviceId("of:foo")) .addEqualityGroup(deviceId("of:bar")) .testEquals(); }
@Override protected List<ParentRunner<?>> getChildren() { return children; }
@Test void finds_features_based_on_implicit_package() throws InitializationError { Cucumber cucumber = new Cucumber(ImplicitFeatureAndGluePath.class); assertThat(cucumber.getChildren().size(), is(equalTo(7))); assertThat(cucumber.getChildren().get(1).getDescription().getDisplayName(), is(equalTo("Feature A"))); }
@Override public void mount(AlluxioURI alluxioPath, AlluxioURI ufsPath, MountPOptions options) throws IOException, AlluxioException { mDelegatedFileSystem.mount(alluxioPath, ufsPath, options); }
@Test public void mount() throws Exception { FileSystem fileSystem = new DelegatingFileSystem(mMockFileSystem); AlluxioURI alluxioPath = new AlluxioURI("/t"); AlluxioURI ufsPath = new AlluxioURI("/u"); MountPOptions mountOptions = MountPOptions .newBuilder() .setReadOnly(false) .setShared(false) .build(); fileSystem.mount(alluxioPath, ufsPath, mountOptions); Mockito.verify(mMockFileSystem, atLeastOnce()) .mount(eq(alluxioPath), eq(ufsPath), eq(mountOptions)); }
@Override public Object invoke(MethodInvocation methodInvocation) throws Throwable { // 入栈 DataPermission dataPermission = this.findAnnotation(methodInvocation); if (dataPermission != null) { DataPermissionContextHolder.add(dataPermission); } try { // 执行逻辑 return methodInvocation.proceed(); } finally { // 出栈 if (dataPermission != null) { DataPermissionContextHolder.remove(); } } }
@Test // 无 @DataPermission 注解 public void testInvoke_none() throws Throwable { // 参数 mockMethodInvocation(TestNone.class); // 调用 Object result = interceptor.invoke(methodInvocation); // 断言 assertEquals("none", result); assertEquals(1, interceptor.getDataPermissionCache().size()); assertTrue(CollUtil.getFirst(interceptor.getDataPermissionCache().values()).enable()); }
@Override public void sendSmsCode(SmsCodeSendReqDTO reqDTO) { SmsSceneEnum sceneEnum = SmsSceneEnum.getCodeByScene(reqDTO.getScene()); Assert.notNull(sceneEnum, "验证码场景({}) 查找不到配置", reqDTO.getScene()); // 创建验证码 String code = createSmsCode(reqDTO.getMobile(), reqDTO.getScene(), reqDTO.getCreateIp()); // 发送验证码 smsSendService.sendSingleSms(reqDTO.getMobile(), null, null, sceneEnum.getTemplateCode(), MapUtil.of("code", code)); }
@Test public void sendSmsCode_success() { // 准备参数 SmsCodeSendReqDTO reqDTO = randomPojo(SmsCodeSendReqDTO.class, o -> { o.setMobile("15601691300"); o.setScene(SmsSceneEnum.MEMBER_LOGIN.getScene()); }); // mock 方法 SqlConstants.init(DbType.MYSQL); // 调用 smsCodeService.sendSmsCode(reqDTO); // 断言 code 验证码 SmsCodeDO smsCodeDO = smsCodeMapper.selectOne(null); assertPojoEquals(reqDTO, smsCodeDO); assertEquals("9999", smsCodeDO.getCode()); assertEquals(1, smsCodeDO.getTodayIndex()); assertFalse(smsCodeDO.getUsed()); // 断言调用 verify(smsSendService).sendSingleSms(eq(reqDTO.getMobile()), isNull(), isNull(), eq("user-sms-login"), eq(MapUtil.of("code", "9999"))); }
public static ListenableFuture<EntityId> findEntityAsync( TbContext ctx, EntityId originator, RelationsQuery relationsQuery ) { var relationService = ctx.getRelationService(); var query = buildQuery(originator, relationsQuery); var relationListFuture = relationService.findByQuery(ctx.getTenantId(), query); if (relationsQuery.getDirection() == EntitySearchDirection.FROM) { return Futures.transformAsync(relationListFuture, relationList -> CollectionUtils.isNotEmpty(relationList) ? Futures.immediateFuture(relationList.get(0).getTo()) : Futures.immediateFuture(null), ctx.getDbCallbackExecutor()); } else if (relationsQuery.getDirection() == EntitySearchDirection.TO) { return Futures.transformAsync(relationListFuture, relationList -> CollectionUtils.isNotEmpty(relationList) ? Futures.immediateFuture(relationList.get(0).getFrom()) : Futures.immediateFuture(null), ctx.getDbCallbackExecutor()); } return Futures.immediateFailedFuture(new IllegalStateException("Unknown direction")); }
@Test public void givenSeveralEntitiesFound_whenFindEntityAsync_ShouldKeepOneAndDiscardOthers() throws Exception { // GIVEN var expectedEntityRelationsQuery = new EntityRelationsQuery(); var parameters = new RelationsSearchParameters( ASSET_ORIGINATOR_ID, relationsQuery.getDirection(), relationsQuery.getMaxLevel(), relationsQuery.isFetchLastLevelOnly() ); expectedEntityRelationsQuery.setParameters(parameters); expectedEntityRelationsQuery.setFilters(relationsQuery.getFilters()); var device1 = new Device(new DeviceId(UUID.randomUUID())); device1.setName("Device 1"); var device2 = new Device(new DeviceId(UUID.randomUUID())); device1.setName("Device 2"); var device3 = new Device(new DeviceId(UUID.randomUUID())); device3.setName("Device 3"); var entityRelationDevice1 = new EntityRelation(); entityRelationDevice1.setFrom(ASSET_ORIGINATOR_ID); entityRelationDevice1.setTo(device1.getId()); entityRelationDevice1.setType(EntityRelation.CONTAINS_TYPE); var entityRelationDevice2 = new EntityRelation(); entityRelationDevice2.setFrom(ASSET_ORIGINATOR_ID); entityRelationDevice2.setTo(device2.getId()); entityRelationDevice2.setType(EntityRelation.CONTAINS_TYPE); var entityRelationDevice3 = new EntityRelation(); entityRelationDevice3.setFrom(ASSET_ORIGINATOR_ID); entityRelationDevice3.setTo(device3.getId()); entityRelationDevice3.setType(EntityRelation.CONTAINS_TYPE); var expectedEntityRelationsList = List.of(entityRelationDevice1, entityRelationDevice2, entityRelationDevice3); when(ctxMock.getTenantId()).thenReturn(TENANT_ID); when(ctxMock.getRelationService()).thenReturn(relationServiceMock); when(relationServiceMock.findByQuery(eq(TENANT_ID), eq(expectedEntityRelationsQuery))) .thenReturn(Futures.immediateFuture(expectedEntityRelationsList)); when(ctxMock.getDbCallbackExecutor()).thenReturn(DB_EXECUTOR); // WHEN var deviceIdFuture = EntitiesRelatedEntityIdAsyncLoader.findEntityAsync(ctxMock, ASSET_ORIGINATOR_ID, relationsQuery); // THEN assertNotNull(deviceIdFuture); var actualDeviceId = deviceIdFuture.get(); assertNotNull(actualDeviceId); assertEquals(device1.getId(), actualDeviceId); }
@Override public Num getValue(int index) { return getBarSeries().getBar(index).getHighPrice(); }
@Test public void indicatorShouldRetrieveBarHighPrice() { for (int i = 0; i < 10; i++) { assertEquals(highPriceIndicator.getValue(i), barSeries.getBar(i).getHighPrice()); } }
@ApiOperation(value = "Create Or Update Widget Type (saveWidgetType)", notes = "Create or update the Widget Type. " + WIDGET_TYPE_DESCRIPTION + " " + "When creating the Widget Type, platform generates Widget Type Id as " + UUID_WIKI_LINK + "The newly created Widget Type Id will be present in the response. " + "Specify existing Widget Type id to update the Widget Type. " + "Referencing non-existing Widget Type Id will cause 'Not Found' error." + "\n\nWidget Type fqn is unique in the scope of System or Tenant. " + "Special Tenant Id '13814000-1dd2-11b2-8080-808080808080' is automatically used if the create request is sent by user with 'SYS_ADMIN' authority." + "Remove 'id', 'tenantId' rom the request body example (below) to create new Widget Type entity." + SYSTEM_OR_TENANT_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAnyAuthority('SYS_ADMIN', 'TENANT_ADMIN')") @RequestMapping(value = "/widgetType", method = RequestMethod.POST) @ResponseBody public WidgetTypeDetails saveWidgetType( @Parameter(description = "A JSON value representing the Widget Type Details.", required = true) @RequestBody WidgetTypeDetails widgetTypeDetails, @Parameter(description = UPDATE_EXISTING_BY_FQN_PARAM_DESCRIPTION) @RequestParam(required = false) Boolean updateExistingByFqn) throws Exception { var currentUser = getCurrentUser(); if (Authority.SYS_ADMIN.equals(currentUser.getAuthority())) { widgetTypeDetails.setTenantId(TenantId.SYS_TENANT_ID); } else { widgetTypeDetails.setTenantId(currentUser.getTenantId()); } checkEntity(widgetTypeDetails.getId(), widgetTypeDetails, Resource.WIDGET_TYPE); return tbWidgetTypeService.save(widgetTypeDetails, updateExistingByFqn != null && updateExistingByFqn, currentUser); }
@Test public void testSaveWidgetType() throws Exception { WidgetTypeDetails widgetType = new WidgetTypeDetails(); widgetType.setName("Widget Type"); widgetType.setDescriptor(JacksonUtil.fromString("{ \"someKey\": \"someValue\" }", JsonNode.class)); WidgetTypeDetails savedWidgetType = doPost("/api/widgetType", widgetType, WidgetTypeDetails.class); Assert.assertNotNull(savedWidgetType); Assert.assertNotNull(savedWidgetType.getId()); Assert.assertNotNull(savedWidgetType.getFqn()); Assert.assertTrue(savedWidgetType.getCreatedTime() > 0); Assert.assertEquals(savedTenant.getId(), savedWidgetType.getTenantId()); Assert.assertEquals(widgetType.getName(), savedWidgetType.getName()); Assert.assertEquals(widgetType.getDescriptor(), savedWidgetType.getDescriptor()); savedWidgetType.setName("New Widget Type"); doPost("/api/widgetType", savedWidgetType, WidgetType.class); WidgetTypeDetails foundWidgetType = doGet("/api/widgetType/" + savedWidgetType.getId().getId().toString(), WidgetTypeDetails.class); Assert.assertEquals(foundWidgetType.getName(), savedWidgetType.getName()); }
public static int purge(AlbumEntryDatabase db, Long albumId, Long photoId) { // purge 1 entry if (albumId != null && photoId != null) { CompoundKey key = new CompoundKey().append("photoId", photoId).append("albumId", albumId); final boolean isRemoved = (db.getData().remove(key) != null); return isRemoved ? 1 : 0; } // purge all if (albumId == null && photoId == null) { final int numPurged = db.getData().size(); db.getData().clear(); return numPurged; } // purge all matching one of key id, photo id Iterator<CompoundKey> it = db.getData().keySet().iterator(); String partName; long compareId; if (albumId != null) { partName = "albumId"; compareId = albumId; } else if (photoId != null) { partName = "photoId"; compareId = photoId; } else throw new AssertionError(); int numPurged = 0; while (it.hasNext()) { CompoundKey key = it.next(); if (key.getPart(partName).equals(compareId)) { it.remove(); numPurged++; } } return numPurged; }
@Test public void testResourcePurge() { // we put 2 photos in album 2; delete them Assert.assertEquals(_entryRes.purge(Long.valueOf(2), null), 2); }
public Trade operate(int index) { return operate(index, NaN, NaN); }
@Test public void testEqualsForExitOrders() { Position trLeft = newPosition; Position trRightEquals = new Position(); Position trRightNotEquals = new Position(); assertEquals(TradeType.BUY, trLeft.operate(1).getType()); assertEquals(TradeType.BUY, trRightEquals.operate(1).getType()); assertEquals(TradeType.BUY, trRightNotEquals.operate(1).getType()); assertEquals(TradeType.SELL, trRightNotEquals.operate(3).getType()); assertNotEquals(trLeft, trRightNotEquals); assertEquals(TradeType.SELL, trLeft.operate(2).getType()); assertEquals(TradeType.SELL, trRightEquals.operate(2).getType()); assertEquals(trLeft, trRightEquals); assertNotEquals(trLeft, trRightNotEquals); }
public static String substitute(final String input, final String pattern, final String sub) { StringBuilder ret = new StringBuilder(input.length()); int start = 0; int index = -1; final int length = pattern.length(); while ((index = input.indexOf(pattern, start)) >= start) { ret.append(input.substring(start, index)); ret.append(sub); start = index + length; } ret.append(input.substring(start)); return ret.toString(); }
@Test public void testSub4() throws Exception { String input = "//a///b////c"; String pattern = "//"; String sub = "/"; assertEquals("/a//b//c", StringUtilities.substitute(input, pattern, sub)); }
@Operation(summary = "Do basic polymorphic info validation and recieve the basic info for the rest of the process.") @PostMapping(value = { Constants.URL_OLD_RDW_POLYMORPHICINFO, Constants.URL_RDW_POLYMORPHICINFO }, consumes = "application/json", produces = "application/json") public PolyInfoResponse validatePolymorphInfoRestService(@Valid @RequestBody PolyInfoRequest request, @RequestHeader(value = "X-FORWARDED-FOR") String clientIp) { return rdwService.validatePolymorphInfoRestService(request, clientIp); }
@Test public void prepareEacRequestRestServiceTest() { PolyInfoResponse expectedResponse = new PolyInfoResponse(); when(rdwServiceMock.validatePolymorphInfoRestService(any(PolyInfoRequest.class), anyString())).thenReturn(expectedResponse); PolyInfoResponse actualResponse = rdwController.validatePolymorphInfoRestService(new PolyInfoRequest(), ""); assertEquals(expectedResponse, actualResponse); }
public static ParamType getVarArgsSchemaFromType(final Type type) { return getSchemaFromType(type, VARARGS_JAVA_TO_ARG_TYPE); }
@Test public void shouldGetDecimalSchemaForBigDecimalClassVariadic() { assertThat( UdfUtil.getVarArgsSchemaFromType(BigDecimal.class), is(ParamTypes.DECIMAL) ); }
public boolean matchIncludeTable(@NotNull String tableName) { return matchTable(tableName, this.getInclude()); }
@Test void matchIncludeTableTest() { StrategyConfig.Builder strategyConfigBuilder = GeneratorBuilder.strategyConfigBuilder(); strategyConfigBuilder.addInclude("system", "user_1", "test[a|b]"); StrategyConfig strategyConfig = strategyConfigBuilder.build(); Assertions.assertTrue(strategyConfig.matchIncludeTable("system")); Assertions.assertFalse(strategyConfig.matchIncludeTable("test_exclude")); Assertions.assertTrue(strategyConfig.matchIncludeTable("testa")); Assertions.assertTrue(strategyConfig.matchIncludeTable("testb")); Assertions.assertFalse(strategyConfig.matchIncludeTable("testc")); }
public static Ip4Prefix valueOf(int address, int prefixLength) { return new Ip4Prefix(Ip4Address.valueOf(address), prefixLength); }
@Test(expected = IllegalArgumentException.class) public void testInvalidValueOfIntegerTooLongPrefixLengthIPv4() { Ip4Prefix ipPrefix; ipPrefix = Ip4Prefix.valueOf(0x01020304, 33); }
@Transactional public boolean disable(ResourceId resourceId, TimeSlot timeSlot, Owner requester) { ResourceGroupedAvailability toDisable = findGrouped(resourceId, timeSlot); if (toDisable.hasNoSlots()) { return false; } Set<Owner> previousOwners = toDisable.owners(); boolean result = toDisable.disable(requester); if (result) { result = availabilityRepository.saveCheckingVersion(toDisable); if (result) { eventsPublisher.publish(new ResourceTakenOver(resourceId, previousOwners, timeSlot, Instant.now(clock))); } } return result; }
@Test void cantDisableWhenNoSlotsCreated() { //given ResourceId resourceId = ResourceId.newOne(); TimeSlot oneDay = TimeSlot.createDailyTimeSlotAtUTC(2021, 1, 1); Owner owner = Owner.newOne(); //when boolean result = availabilityFacade.disable(resourceId, oneDay, owner); //then assertFalse(result); }
@Override public void incrCounter(String key, long amount) { ensureStateEnabled(); defaultStateStore.incrCounter(key, amount); }
@Test(expectedExceptions = IllegalStateException.class) public void testIncrCounterStateDisabled() { context.incrCounter("test-key", 10); }
@Override public void createHost(K8sHost host) { checkNotNull(host, ERR_NULL_HOST); hostStore.createHost(host); log.info(String.format(MSG_HOST, host.hostIp().toString(), MSG_CREATED)); }
@Test(expected = IllegalArgumentException.class) public void testCreateDuplicateHost() { target.createHost(HOST_1); target.createHost(HOST_1); }
public void start(BrokerService service) { handlers.values().forEach(handler -> handler.start(service)); }
@Test public void testStart() { BrokerService service = mock(BrokerService.class); handlers.start(service); verify(handler1, times(1)).start(same(service)); verify(handler2, times(1)).start(same(service)); }
@PublicAPI(usage = ACCESS) public static Transformer matching(String packageIdentifier) { PackageMatchingSliceIdentifier sliceIdentifier = new PackageMatchingSliceIdentifier(packageIdentifier); String description = "slices matching " + sliceIdentifier.getDescription(); return new Transformer(sliceIdentifier, description); }
@Test public void name_parts_are_resolved_correctly() { JavaClasses classes = importClassesWithContext(Object.class); DescribedIterable<Slice> slices = Slices.matching("(*).(*)..").transform(classes); assertThat(getOnlyElement(slices).getNamePart(1)).isEqualTo("java"); assertThat(getOnlyElement(slices).getNamePart(2)).isEqualTo("lang"); }
public static String byte2hex(byte[] bytes) { StringBuilder hs = new StringBuilder(); String stmp; for (int n = 0; bytes != null && n < bytes.length; n++) { stmp = Integer.toHexString(bytes[n] & 0XFF); if (stmp.length() == 1) { hs.append('0'); } hs.append(stmp); } return hs.toString().toUpperCase(); }
@Test public void byte2hex() { String s = "5"; try { CodecUtils.hex2byte(s); Assert.fail(); } catch (Exception e) { Assert.assertTrue(e instanceof IllegalArgumentException); } s = "567400075b6f626a65"; byte[] bs = CodecUtils.hex2byte(s); Assert.assertTrue(s.equalsIgnoreCase(CodecUtils.byte2hex(bs))); }
@Override public CompilationResult compile( String[] pResourcePaths, ResourceReader pReader, ResourceStore pStore, ClassLoader pClassLoader, JavaCompilerSettings pSettings) { DiagnosticCollector<JavaFileObject> diagnostics = new DiagnosticCollector<>(); JavaCompiler compiler = getJavaCompiler(); try (StandardJavaFileManager jFileManager = compiler.getStandardFileManager(diagnostics, null, Charset.forName(pSettings.getSourceEncoding()))) { try { jFileManager.setLocation( StandardLocation.CLASS_PATH, pSettings.getClasspathLocations() ); jFileManager.setLocation( StandardLocation.CLASS_OUTPUT, Collections.singletonList(new File("target/classes")) ); } catch (IOException e) { // ignore if cannot set the classpath } try (MemoryFileManager fileManager = new MemoryFileManager( jFileManager, pClassLoader )) { final List<JavaFileObject> units = new ArrayList<>(); for (final String sourcePath : pResourcePaths) { units.add( new CompilationUnit( PortablePath.of(sourcePath), pReader ) ); } Iterable<String> options = new NativeJavaCompilerSettings( pSettings ).toOptionsList(); if ( compiler.getTask( null, fileManager, diagnostics, options, null, units ).call() ) { for (CompilationOutput compilationOutput : fileManager.getOutputs()) { pStore.write( compilationOutput.getBinaryName().replace( '.', '/' ) + ".class", compilationOutput.toByteArray() ); } return new CompilationResult( new CompilationProblem[0] ); } } List<Diagnostic<? extends JavaFileObject>> problems = diagnostics.getDiagnostics(); CompilationProblem[] result = new CompilationProblem[problems.size()]; for (int i = 0; i < problems.size(); i++) { result[i] = new NativeCompilationProblem( ( Diagnostic<JavaFileObject> ) problems.get( i ) ); } return new CompilationResult( result ); } catch (IOException e) { throw new RuntimeException( e ); } }
@Test(expected = KieMemoryCompilerException.class) public void simulateJre() { NativeJavaCompiler compiler = new NativeJavaCompiler(new NullJavaCompilerFinder()); compiler.compile(null, null, null, null, null); }
@Override public ChannelFuture writeHeaders(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, boolean endStream, ChannelPromise promise) { return writeHeaders0(ctx, streamId, headers, false, 0, (short) 0, false, padding, endStream, promise); }
@Test public void headersWithPriority() { writeAllFlowControlledFrames(); final int streamId = 6; ChannelPromise promise = newPromise(); encoder.writeHeaders(ctx, streamId, EmptyHttp2Headers.INSTANCE, 10, DEFAULT_PRIORITY_WEIGHT, true, 1, false, promise); verify(writer).writeHeaders(eq(ctx), eq(streamId), eq(EmptyHttp2Headers.INSTANCE), eq(10), eq(DEFAULT_PRIORITY_WEIGHT), eq(true), eq(1), eq(false), eq(promise)); }
@Override public NSImage applicationIcon(final Application app, final Integer size) { NSImage icon = this.load(app.getIdentifier(), size); if(null == icon) { final String path = workspace.absolutePathForAppBundleWithIdentifier(app.getIdentifier()); // Null if the bundle cannot be found if(StringUtils.isNotBlank(path)) { return this.cache(app.getIdentifier(), this.convert(app.getIdentifier(), workspace.iconForFile(path), size), size); } } if(null == icon) { return this.iconNamed("notfound.tiff", size); } return icon; }
@Test public void testIconForApplication() { final NSImageIconCache cache = new NSImageIconCache(); assertNotNull(cache.applicationIcon(new Application("com.apple.TextEdit"), 32)); }
@ApiOperation(value = "Get a deployment", tags = { "Deployment" }) @ApiResponses(value = { @ApiResponse(code = 200, message = "Indicates the deployment was found and returned."), @ApiResponse(code = 404, message = "Indicates the requested deployment was not found.") }) @GetMapping(value = "/repository/deployments/{deploymentId}", produces = "application/json") public DeploymentResponse getDeployment(@ApiParam(name = "deploymentId", value ="The id of the deployment to get.") @PathVariable String deploymentId) { Deployment deployment = repositoryService.createDeploymentQuery().deploymentId(deploymentId).singleResult(); if (deployment == null) { throw new FlowableObjectNotFoundException("Could not find a deployment with id '" + deploymentId + "'.", Deployment.class); } if (restApiInterceptor != null) { restApiInterceptor.accessDeploymentById(deployment); } return restResponseFactory.createDeploymentResponse(deployment); }
@Test @org.flowable.engine.test.Deployment(resources = { "org/flowable/rest/service/api/repository/oneTaskProcess.bpmn20.xml" }) public void testGetDeployment() throws Exception { Deployment existingDeployment = repositoryService.createDeploymentQuery().singleResult(); HttpGet httpGet = new HttpGet(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_DEPLOYMENT, existingDeployment.getId())); CloseableHttpResponse response = executeRequest(httpGet, HttpStatus.SC_OK); JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent()); closeResponse(response); assertThatJson(responseNode) .when(Option.IGNORING_EXTRA_FIELDS) .isEqualTo("{" + "id: '" + existingDeployment.getId() + "'," + "name: '" + existingDeployment.getName() + "'," + "url: '" + SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_DEPLOYMENT, existingDeployment.getId()) + "'," + "category: " + existingDeployment.getCategory() + "," + "deploymentTime: '${json-unit.any-string}'," + "tenantId: ''" + "}"); }
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) { return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature); }
@Test public void testFinishBundleWithAllParameters() throws Exception { DoFnSignature sig = DoFnSignatures.getSignature( new DoFn<String, String>() { @ProcessElement public void processElement() {} @FinishBundle public void finishBundle( FinishBundleContext context, BundleFinalizer bundleFinalizer, PipelineOptions pipelineOptions) {} }.getClass()); assertThat(sig.finishBundle().extraParameters().size(), equalTo(3)); assertThat( sig.finishBundle().extraParameters().get(0), instanceOf(FinishBundleContextParameter.class)); assertThat( sig.finishBundle().extraParameters().get(1), instanceOf(BundleFinalizerParameter.class)); assertThat( sig.finishBundle().extraParameters().get(2), instanceOf(PipelineOptionsParameter.class)); }
private static File targetBetaFile(String dataId, String group, String tenant) { // fix https://github.com/alibaba/nacos/issues/10067 dataId = PathEncoderManager.getInstance().encode(dataId); group = PathEncoderManager.getInstance().encode(group); tenant = PathEncoderManager.getInstance().encode(tenant); File file = null; if (StringUtils.isBlank(tenant)) { file = new File(EnvUtil.getNacosHome(), BETA_DIR); } else { file = new File(EnvUtil.getNacosHome(), TENANT_BETA_DIR); file = new File(file, tenant); } file = new File(file, group); file = new File(file, dataId); return file; }
@Test void testTargetBetaFile() throws NoSuchMethodException, IllegalAccessException, InvocationTargetException { Method method = ConfigRawDiskService.class.getDeclaredMethod("targetBetaFile", String.class, String.class, String.class); method.setAccessible(true); File result = (File) method.invoke(null, "aaaa?dsaknkf", "aaaa*dsaknkf", "aaaa:dsaknkf"); // 分解路径 Path path = Paths.get(result.getPath()); Path parent = path.getParent(); Path grandParent = parent.getParent(); // 获取最后三段路径 String lastSegment = path.getFileName().toString(); String secondLastSegment = parent.getFileName().toString(); String thirdLastSegment = grandParent.getFileName().toString(); assertEquals(isWindows() ? "aaaa%A3%dsaknkf" : thirdLastSegment, thirdLastSegment); assertEquals(isWindows() ? "aaaa%A4%dsaknkf" : secondLastSegment, secondLastSegment); assertEquals(isWindows() ? "aaaa%A5%dsaknkf" : lastSegment, lastSegment); }
@VisibleForTesting static Response getPinotQueryResponse(BrokerResponse brokerResponse) throws Exception { int queryErrorCodeHeaderValue = -1; // default value of the header. List<QueryProcessingException> exceptions = brokerResponse.getExceptions(); if (!exceptions.isEmpty()) { // set the header value as first exception error code value. queryErrorCodeHeaderValue = exceptions.get(0).getErrorCode(); } // returning the Response with OK status and header value. return Response.ok() .header(PINOT_QUERY_ERROR_CODE_HEADER, queryErrorCodeHeaderValue) .entity((StreamingOutput) brokerResponse::toOutputStream).type(MediaType.APPLICATION_JSON) .build(); }
@Test public void testGetPinotQueryResponse() throws Exception { // for successful query result the 'X-Pinot-Error-Code' should be -1 BrokerResponse emptyResultBrokerResponse = BrokerResponseNative.EMPTY_RESULT; Response successfulResponse = PinotClientRequest.getPinotQueryResponse(emptyResultBrokerResponse); Assert.assertEquals(successfulResponse.getStatus(), Response.Status.OK.getStatusCode()); Assert.assertTrue(successfulResponse.getHeaders().containsKey(PINOT_QUERY_ERROR_CODE_HEADER)); Assert.assertEquals(successfulResponse.getHeaders().get(PINOT_QUERY_ERROR_CODE_HEADER).size(), 1); Assert.assertEquals(successfulResponse.getHeaders().get(PINOT_QUERY_ERROR_CODE_HEADER).get(0), -1); // for failed query result the 'X-Pinot-Error-Code' should be Error code fo exception. BrokerResponse tableDoesNotExistBrokerResponse = BrokerResponseNative.TABLE_DOES_NOT_EXIST; Response tableDoesNotExistResponse = PinotClientRequest.getPinotQueryResponse(tableDoesNotExistBrokerResponse); Assert.assertEquals(tableDoesNotExistResponse.getStatus(), Response.Status.OK.getStatusCode()); Assert.assertTrue(tableDoesNotExistResponse.getHeaders().containsKey(PINOT_QUERY_ERROR_CODE_HEADER)); Assert.assertEquals(tableDoesNotExistResponse.getHeaders().get(PINOT_QUERY_ERROR_CODE_HEADER).size(), 1); Assert.assertEquals(tableDoesNotExistResponse.getHeaders().get(PINOT_QUERY_ERROR_CODE_HEADER).get(0), TABLE_DOES_NOT_EXIST_ERROR_CODE); }
public static <K> KStreamHolder<K> build( final KStreamHolder<K> stream, final StreamSelect<K> step, final RuntimeBuildContext buildContext ) { final QueryContext queryContext = step.getProperties().getQueryContext(); final LogicalSchema sourceSchema = stream.getSchema(); final Optional<ImmutableList<ColumnName>> selectedKeys = step.getSelectedKeys(); final Selection<K> selection = Selection.of( sourceSchema, step.getKeyColumnNames(), selectedKeys, step.getSelectExpressions(), buildContext.getKsqlConfig(), buildContext.getFunctionRegistry() ); final ImmutableList.Builder<Integer> keyIndexBuilder = ImmutableList.builder(); if (selectedKeys.isPresent()) { final ImmutableList<ColumnName> keyNames = sourceSchema.key().stream() .map(Column::name) .collect(ImmutableList.toImmutableList()); for (final ColumnName keyName : selectedKeys.get()) { keyIndexBuilder.add(keyNames.indexOf(keyName)); } } final ImmutableList<Integer> keyIndices = keyIndexBuilder.build(); final SelectValueMapper<K> selectMapper = selection.getMapper(); final ProcessingLogger logger = buildContext.getProcessingLogger(queryContext); final Named selectName = Named.as(StreamsUtil.buildOpName(queryContext)); if (selectedKeys.isPresent() && !selectedKeys.get().containsAll( sourceSchema.key().stream().map(Column::name).collect(ImmutableList.toImmutableList()) )) { return stream.withStream( stream.getStream().transform( () -> new KsTransformer<>( (readOnlyKey, value, ctx) -> { if (keyIndices.isEmpty()) { return null; } if (readOnlyKey instanceof GenericKey) { final GenericKey keys = (GenericKey) readOnlyKey; final Builder resultKeys = GenericKey.builder(keyIndices.size()); for (final int keyIndex : keyIndices) { resultKeys.append(keys.get(keyIndex)); } return (K) resultKeys.build(); } else { throw new UnsupportedOperationException(); } }, selectMapper.getTransformer(logger) ), selectName ), selection.getSchema() ); } else { return stream.withStream( stream.getStream().transformValues( () -> new KsValueTransformer<>(selectMapper.getTransformer(logger)), selectName ), selection.getSchema() ); } }
@Test public void shouldBuildKsNodeWithRightName() { // When: step.build(planBuilder, planInfo); // Then: verify(sourceKStream).transformValues( any(ValueTransformerWithKeySupplier.class), nameCaptor.capture() ); assertThat(NamedTestAccessor.getName(nameCaptor.getValue()), is(SELECT_STEP_NAME)); }
public byte[] encrypt(char[] password, byte[] salt) { final PBEKeySpec pbeKeySpec = new PBEKeySpec(password, salt, iterationCount, keyLength); final SecretKey secretKey = KeyUtil.generateKey(algorithm, pbeKeySpec); return secretKey.getEncoded(); }
@Test public void encryptTest(){ final String s = SecureUtil.pbkdf2("123456".toCharArray(), RandomUtil.randomBytes(16)); assertEquals(128, s.length()); }
public String prometheusName(String recordName, String metricName) { String baseName = StringUtils.capitalize(recordName) + StringUtils.capitalize(metricName); String[] parts = SPLIT_PATTERN.split(baseName); String joined = String.join("_", parts).toLowerCase(); return DELIMITERS.matcher(joined).replaceAll("_"); }
@Test public void testNamingPeriods() { PrometheusMetricsSink sink = new PrometheusMetricsSink(); String recordName = "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl"; String metricName = "DfsUsed"; Assert.assertEquals( "org_apache_hadoop_hdfs_server_datanode_fsdataset_impl_fs_dataset_impl_dfs_used", sink.prometheusName(recordName, metricName)); }
protected Subject subject(String saslMechanism) { return subjects.get(saslMechanism); }
@Test public void testNativeGssapiCredentials() throws Exception { System.setProperty(SaslChannelBuilder.GSS_NATIVE_PROP, "true"); TestJaasConfig jaasConfig = new TestJaasConfig(); jaasConfig.addEntry("jaasContext", TestGssapiLoginModule.class.getName(), new HashMap<>()); JaasContext jaasContext = new JaasContext("jaasContext", JaasContext.Type.SERVER, jaasConfig, null); Map<String, JaasContext> jaasContexts = Collections.singletonMap("GSSAPI", jaasContext); GSSManager gssManager = Mockito.mock(GSSManager.class); GSSName gssName = Mockito.mock(GSSName.class); Mockito.when(gssManager.createName(Mockito.anyString(), Mockito.any())) .thenAnswer(unused -> gssName); Oid oid = new Oid("1.2.840.113554.1.2.2"); Mockito.when(gssManager.createCredential(gssName, GSSContext.INDEFINITE_LIFETIME, oid, GSSCredential.ACCEPT_ONLY)) .thenAnswer(unused -> Mockito.mock(GSSCredential.class)); SaslChannelBuilder channelBuilder1 = createGssapiChannelBuilder(jaasContexts, gssManager); assertEquals(1, channelBuilder1.subject("GSSAPI").getPrincipals().size()); assertEquals(1, channelBuilder1.subject("GSSAPI").getPrivateCredentials().size()); SaslChannelBuilder channelBuilder2 = createGssapiChannelBuilder(jaasContexts, gssManager); assertEquals(1, channelBuilder2.subject("GSSAPI").getPrincipals().size()); assertEquals(1, channelBuilder2.subject("GSSAPI").getPrivateCredentials().size()); assertSame(channelBuilder1.subject("GSSAPI"), channelBuilder2.subject("GSSAPI")); Mockito.verify(gssManager, Mockito.times(1)) .createCredential(gssName, GSSContext.INDEFINITE_LIFETIME, oid, GSSCredential.ACCEPT_ONLY); }
public String doLayout(ILoggingEvent event) { if (!isStarted()) { return CoreConstants.EMPTY_STRING; } return writeLoopOnConverters(event); }
@Test public void replaceNewline() { String pattern = "%replace(A\nB){'\n', '\n\t'}"; String substPattern = OptionHelper.substVars(pattern, null, lc); assertEquals(pattern, substPattern); pl.setPattern(substPattern); pl.start(); StatusPrinter.print(lc); String val = pl.doLayout(makeLoggingEvent("", null)); assertEquals("A\n\tB", val); }
@Override public AuthUser getAuthUser(Integer socialType, Integer userType, String code, String state) { // 构建请求 AuthRequest authRequest = buildAuthRequest(socialType, userType); AuthCallback authCallback = AuthCallback.builder().code(code).state(state).build(); // 执行请求 AuthResponse<?> authResponse = authRequest.login(authCallback); log.info("[getAuthUser][请求社交平台 type({}) request({}) response({})]", socialType, toJsonString(authCallback), toJsonString(authResponse)); if (!authResponse.ok()) { throw exception(SOCIAL_USER_AUTH_FAILURE, authResponse.getMsg()); } return (AuthUser) authResponse.getData(); }
@Test public void testAuthSocialUser_success() { // 准备参数 Integer socialType = SocialTypeEnum.WECHAT_MP.getType(); Integer userType = randomPojo(UserTypeEnum.class).getValue(); String code = randomString(); String state = randomString(); // mock 方法(AuthRequest) AuthRequest authRequest = mock(AuthRequest.class); when(authRequestFactory.get(eq("WECHAT_MP"))).thenReturn(authRequest); // mock 方法(AuthResponse) AuthUser authUser = randomPojo(AuthUser.class); AuthResponse<?> authResponse = new AuthResponse<>(2000, null, authUser); when(authRequest.login(argThat(authCallback -> { assertEquals(code, authCallback.getCode()); assertEquals(state, authCallback.getState()); return true; }))).thenReturn(authResponse); // 调用 AuthUser result = socialClientService.getAuthUser(socialType, userType, code, state); // 断言 assertSame(authUser, result); }
@Override public OrganizedImports organizeImports(List<Import> imports) { OrganizedImports organized = new OrganizedImports(); // Group into static and non-static. Map<Boolean, List<Import>> partionedByStatic = imports.stream().collect(Collectors.partitioningBy(Import::isStatic)); for (Boolean key : order.groupOrder()) { organizePartition(organized, partionedByStatic.get(key)); } return organized; }
@Test public void staticLastOrdering() { AndroidImportOrganizer organizer = new AndroidImportOrganizer(StaticOrder.STATIC_LAST); ImportOrganizer.OrganizedImports organized = organizer.organizeImports(IMPORTS); assertThat(organized.asImportBlock()) .isEqualTo( "import android.foo;\n" + "\n" + "import com.android.blah;\n" + "\n" + "import net.wilma;\n" + "\n" + "import unknown.barney;\n" + "import unknown.fred;\n" + "\n" + "import java.ping;\n" + "\n" + "import javax.pong;\n" + "\n" + "import static android.foo.bar;\n" + "\n" + "import static com.android.blah.blah;\n" + "\n" + "import static net.wilma.flintstone;\n" + "\n" + "import static unknown.fred.flintstone;\n" + "\n" + "import static java.ping.pong;\n" + "\n" + "import static javax.pong.ping;\n"); }
public Flux<CosmosContainerProperties> queryContainers( final String query, final CosmosQueryRequestOptions queryRequestOptions) { CosmosDbUtils.validateIfParameterIsNotEmpty(query, PARAM_QUERY); return database .flatMapMany(database -> CosmosDbUtils .convertCosmosPagedFluxToFluxResults(database.queryContainers(query, queryRequestOptions))); }
@Test void testQueryContainers() { final CosmosAsyncDatabase database = mock(CosmosAsyncDatabase.class); final CosmosDbDatabaseOperations databaseOperations = new CosmosDbDatabaseOperations(Mono.just(database)); CosmosDbTestUtils.assertIllegalArgumentException(() -> databaseOperations.queryContainers(null, null)); CosmosDbTestUtils.assertIllegalArgumentException(() -> databaseOperations.queryContainers("", null)); }
public void setContract(@Nullable Produce contract) { this.contract = contract; setStoredContract(contract); handleContractState(); }
@Test public void guamContractGuamDead() { // Get the bush patch final FarmingPatch patch = farmingGuildPatches.get(Varbits.FARMING_4775); assertNotNull(patch); when(farmingTracker.predictPatch(patch)) .thenReturn(new PatchPrediction(Produce.GUAM, CropState.DEAD, 0, 2, 3)); farmingContractManager.setContract(Produce.GUAM); assertEquals(SummaryState.IN_PROGRESS, farmingContractManager.getSummary()); assertEquals(CropState.DEAD, farmingContractManager.getContractCropState()); }
@Override public <T> T clone(T object) { if (object instanceof String) { return object; } else if (object instanceof Collection) { Object firstElement = findFirstNonNullElement((Collection) object); if (firstElement != null && !(firstElement instanceof Serializable)) { JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), firstElement.getClass()); return objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type); } } else if (object instanceof Map) { Map.Entry firstEntry = this.findFirstNonNullEntry((Map) object); if (firstEntry != null) { Object key = firstEntry.getKey(); Object value = firstEntry.getValue(); if (!(key instanceof Serializable) || !(value instanceof Serializable)) { JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), key.getClass(), value.getClass()); return (T) objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type); } } } else if (object instanceof JsonNode) { return (T) ((JsonNode) object).deepCopy(); } if (object instanceof Serializable) { try { return (T) SerializationHelper.clone((Serializable) object); } catch (SerializationException e) { //it is possible that object itself implements java.io.Serializable, but underlying structure does not //in this case we switch to the other JSON marshaling strategy which doesn't use the Java serialization } } return jsonClone(object); }
@Test public void should_clone_map_of_non_serializable_key() { Map<NonSerializableObject, String> original = new HashMap<>(); original.put(new NonSerializableObject("key"), "value"); Object cloned = serializer.clone(original); assertEquals(original, cloned); assertNotSame(original, cloned); }
public static <T extends CharSequence> T validateIpv4(T value, String errorMsg) throws ValidateException { if (false == isIpv4(value)) { throw new ValidateException(errorMsg); } return value; }
@Test public void validateIpv4Test() { Validator.validateIpv4("192.168.1.1", "Error ip"); Validator.validateIpv4("8.8.8.8", "Error ip"); Validator.validateIpv4("0.0.0.0", "Error ip"); Validator.validateIpv4("255.255.255.255", "Error ip"); Validator.validateIpv4("127.0.0.0", "Error ip"); }
@SuppressWarnings("unused") // Part of required API. public void execute( final ConfiguredStatement<InsertValues> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { final InsertValues insertValues = statement.getStatement(); final MetaStore metaStore = executionContext.getMetaStore(); final KsqlConfig config = statement.getSessionConfig().getConfig(true); final DataSource dataSource = getDataSource(config, metaStore, insertValues); validateInsert(insertValues.getColumns(), dataSource); final ProducerRecord<byte[], byte[]> record = buildRecord(statement, metaStore, dataSource, serviceContext); try { producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps()); } catch (final TopicAuthorizationException e) { // TopicAuthorizationException does not give much detailed information about why it failed, // except which topics are denied. Here we just add the ACL to make the error message // consistent with other authorization error messages. final Exception rootCause = new KsqlTopicAuthorizationException( AclOperation.WRITE, e.unauthorizedTopics() ); throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause); } catch (final ClusterAuthorizationException e) { // ClusterAuthorizationException is thrown when using idempotent producers // and either a topic write permission or a cluster-level idempotent write // permission (only applicable for broker versions no later than 2.8) is // missing. In this case, we include additional context to help the user // distinguish this type of failure from other permissions exceptions // such as the ones thrown above when TopicAuthorizationException is caught. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } catch (final KafkaException e) { if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) { // The error message thrown when an idempotent producer is missing permissions // is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException, // as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException. // ksqlDB handles these two the same way, accordingly. // See https://issues.apache.org/jira/browse/KAFKA-14138 for more. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } else { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } } catch (final Exception e) { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } }
@Test public void shouldThrowOnOtherExceptionWrappedInKafkaException() { // Given: final ConfiguredStatement<InsertValues> statement = givenInsertValues( allAndPseudoColumnNames(SCHEMA), ImmutableList.of( new LongLiteral(1L), new StringLiteral("str"), new StringLiteral("str"), new LongLiteral(2L)) ); doThrow(new KafkaException( "Cannot execute transactional method because we are in an error state", new RuntimeException("boom")) ).when(producer).send(any()); // When: final Exception e = assertThrows( KsqlException.class, () -> executor.execute(statement, mock(SessionProperties.class), engine, serviceContext) ); // Then: assertThat(e.getCause(), (hasMessage( containsString("Cannot execute transactional method because we are in an error state")))); }
public FEELFnResult<List> invoke(@ParameterName( "list" ) List list, @ParameterName( "position" ) BigDecimal position, @ParameterName( "newItem" ) Object newItem) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } if ( position == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", "cannot be null")); } if ( position.intValue() == 0 ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", "cannot be zero (parameter 'position' is 1-based)")); } if ( position.abs().intValue() > list.size() ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", "inconsistent with 'list' size")); } // spec requires us to return a new list final List<Object> result = new ArrayList<>( list ); if( position.intValue() > 0 ) { result.add( position.intValue() - 1, newItem ); } else { result.add( list.size() + position.intValue(), newItem ); } return FEELFnResult.ofResult( result ); }
@Test void invokePositionZero() { FunctionTestUtil.assertResultError(insertBeforeFunction.invoke(Collections.emptyList(), BigDecimal.ZERO, new Object()), InvalidParametersEvent.class); }
@Override public List<Path> getPluginPaths() { extractZipFiles(); return super.getPluginPaths(); }
@Test public void testGetPluginArchives() { PluginRepository repository = new DefaultPluginRepository(pluginsPath1, pluginsPath2); List<Path> pluginPaths = repository.getPluginPaths(); assertEquals(3, pluginPaths.size()); assertPathExists(pluginPaths, pluginsPath1.resolve("plugin-1")); assertPathExists(pluginPaths, pluginsPath2.resolve("plugin-2")); assertPathExists(pluginPaths, pluginsPath2.resolve("plugin-3")); }
public OpenConfigComponentHandler addConfig(OpenConfigConfigOfComponentHandler config) { modelObject.config(config.getModelObject()); return this; }
@Test public void testAddConfig() { // test Handler OpenConfigComponentHandler component = new OpenConfigComponentHandler("name", parent); // call addConfig OpenConfigConfigOfComponentHandler config = new OpenConfigConfigOfComponentHandler(component); // expected ModelObject DefaultComponent modelObject = new DefaultComponent(); modelObject.name("name"); DefaultConfig con = new DefaultConfig(); modelObject.config(con); assertEquals("[NG]addConfig:ModelObject(Config added) is not an expected one.\n", modelObject, component.getModelObject()); }
@Override public Status check() { List<ProtocolServer> servers = DubboProtocol.getDubboProtocol().getServers(); if (servers == null || servers.isEmpty()) { return new Status(Status.Level.UNKNOWN); } Status.Level level = Status.Level.OK; StringBuilder buf = new StringBuilder(); for (ProtocolServer protocolServer : servers) { RemotingServer server = protocolServer.getRemotingServer(); if (!server.isBound()) { level = Status.Level.ERROR; buf.setLength(0); buf.append(server.getLocalAddress()); break; } if (buf.length() > 0) { buf.append(','); } buf.append(server.getLocalAddress()); buf.append("(clients:"); buf.append(server.getChannels().size()); buf.append(')'); } return new Status(level, buf.toString()); }
@Test void test() { ServerStatusChecker serverStatusChecker = new ServerStatusChecker(); Status status = serverStatusChecker.check(); Assertions.assertEquals(status.getLevel(), Status.Level.UNKNOWN); DubboProtocol dubboProtocol = Mockito.mock(DubboProtocol.class); ProtocolServer protocolServer = Mockito.mock(ProtocolServer.class); RemotingServer remotingServer = Mockito.mock(RemotingServer.class); List<ProtocolServer> servers = Arrays.asList(protocolServer); Mockito.when(dubboProtocol.getServers()).thenReturn(servers); Mockito.when(protocolServer.getRemotingServer()).thenReturn(remotingServer); Mockito.when(remotingServer.isBound()).thenReturn(true); Mockito.when(remotingServer.getLocalAddress()) .thenReturn(InetSocketAddress.createUnresolved("127.0.0.1", 9999)); Mockito.when(remotingServer.getChannels()).thenReturn(Arrays.asList(new MockChannel())); try (MockedStatic<DubboProtocol> mockDubboProtocol = Mockito.mockStatic(DubboProtocol.class)) { mockDubboProtocol.when(() -> DubboProtocol.getDubboProtocol()).thenReturn(dubboProtocol); status = serverStatusChecker.check(); Assertions.assertEquals(status.getLevel(), Status.Level.OK); // In JDK 17 : 127.0.0.1/<unresolved>:9999(clients:1) Assertions.assertTrue(status.getMessage().contains("127.0.0.1")); Assertions.assertTrue(status.getMessage().contains("9999(clients:1)")); Mockito.when(remotingServer.isBound()).thenReturn(false); status = serverStatusChecker.check(); Assertions.assertEquals(status.getLevel(), Status.Level.ERROR); // In JDK 17 : 127.0.0.1/<unresolved>:9999 Assertions.assertTrue(status.getMessage().contains("127.0.0.1")); Assertions.assertTrue(status.getMessage().contains("9999")); } }
@Override public HashSlotCursor8byteKey cursor() { return new Cursor(); }
@Test public void testCursor_advance() { insert(random.nextLong()); HashSlotCursor8byteKey cursor = hsa.cursor(); assertTrue(cursor.advance()); assertFalse(cursor.advance()); }
public static String[] getNumberFormats() { if ( numberFormats == null ) { int numberFormatsCount = toInt( BaseMessages.getString( PKG, "Const.NumberFormat.Count" ), 0 ); numberFormats = new String[numberFormatsCount + 1]; numberFormats[0] = DEFAULT_NUMBER_FORMAT; for ( int i = 1; i <= numberFormatsCount; i++ ) { numberFormats[i] = BaseMessages.getString( PKG, "Const.NumberFormat" + Integer.toString( i ) ); } } return numberFormats; }
@Test public void testGetNumberFormats() { final String[] formats = Const.getNumberFormats(); assertTrue( formats.length > 0 ); for ( String format : formats ) { assertTrue( format != null && !format.isEmpty() ); } }
public boolean transitionToAborted() { return state.setIf(ABORTED, currentState -> !currentState.isDone()); }
@Test public void testAborted() { StageExecutionStateMachine stateMachine = createStageStateMachine(); assertTrue(stateMachine.transitionToAborted()); assertFinalState(stateMachine, StageExecutionState.ABORTED); }
@Override public ClientPoolHandler addLast(String name, ChannelHandler handler) { super.addLast(name, handler); return this; }
@Test public void addLast() { ClientPoolHandler handler = new ClientPoolHandler(); Assert.assertTrue(handler.isEmpty()); handler.addLast(null, new TestHandler()); Assert.assertFalse(handler.isEmpty()); }
protected TypeMapping createTypeMapping(IndexMainType mainType) { checkArgument(mainType.getIndex().equals(index), "Main type must belong to index %s", index); return new TypeMapping(this); }
@Test public void createTypeMapping_with_IndexRelationType_fails_with_ISE_if_index_does_not_allow_relations() { IndexType.IndexRelationType indexRelationType = IndexType.relation(IndexType.main(Index.withRelations(someIndexName), "bar"), "bar"); Index index = Index.simple(someIndexName); IndexMainType mainType = IndexType.main(index, "foo"); NewIndex underTest = new NewIndex(index, defaultSettingsConfiguration) { @Override public IndexMainType getMainType() { return mainType; } @Override public BuiltIndex build() { throw new UnsupportedOperationException("build not implemented"); } }; assertThatThrownBy(() -> underTest.createTypeMapping(indexRelationType)) .isInstanceOf(IllegalStateException.class) .hasMessage("Index is not configured to accept relations. Update IndexDefinition.Descriptor instance for this index"); }
@Override public void release() { Collection<InstancePublishInfo> instancePublishInfos = publishers.values(); for (InstancePublishInfo instancePublishInfo : instancePublishInfos) { if (instancePublishInfo instanceof BatchInstancePublishInfo) { MetricsMonitor.decrementIpCountWithBatchRegister(instancePublishInfo); } else { MetricsMonitor.getIpCountMonitor().decrementAndGet(); } } MetricsMonitor.getSubscriberCount().addAndGet(-1 * subscribers.size()); }
@Test void release() { abstractClient.addServiceInstance(service, instancePublishInfo); assertEquals(1, MetricsMonitor.getIpCountMonitor().get()); abstractClient.addServiceSubscriber(service, subscriber); assertEquals(1, MetricsMonitor.getSubscriberCount().get()); abstractClient.release(); assertEquals(0, MetricsMonitor.getSubscriberCount().get()); assertEquals(0, MetricsMonitor.getIpCountMonitor().get()); }
@Override public void execute(final ConnectionSession connectionSession) { String name = showStatement.getName().orElse("").toLowerCase(Locale.ROOT); if ("ALL".equalsIgnoreCase(name)) { executeShowAll(connectionSession); return; } queryResultMetaData = new RawQueryResultMetaData(Collections.singletonList(new RawQueryResultColumnMetaData("", "", name, Types.VARCHAR, "VARCHAR", -1, 0))); VariableRowDataGenerator variableRowDataGenerator = VARIABLE_ROW_DATA_GENERATORS.getOrDefault(name, unused -> new String[]{"", "", ""}); mergedResult = new LocalDataMergedResult(Collections.singletonList(new LocalDataQueryResultRow(variableRowDataGenerator.getVariable(connectionSession)[1]))); }
@Test void assertExecuteShowOne() throws SQLException { ConnectionSession connectionSession = mock(ConnectionSession.class); PostgreSQLShowVariableExecutor executor = new PostgreSQLShowVariableExecutor(new PostgreSQLShowStatement("client_encoding")); executor.execute(connectionSession); QueryResultMetaData actualMetaData = executor.getQueryResultMetaData(); assertThat(actualMetaData.getColumnCount(), is(1)); assertThat(actualMetaData.getColumnLabel(1), is("client_encoding")); MergedResult actualResult = executor.getMergedResult(); assertTrue(actualResult.next()); assertThat(actualResult.getValue(1, String.class), is("UTF8")); assertFalse(actualResult.next()); }
@Override public boolean retryRequest(IOException exception, int executionCount, HttpContext ctx) { log.fine(() -> String.format("retryRequest(exception='%s', executionCount='%d', ctx='%s'", exception.getClass().getName(), executionCount, ctx)); HttpClientContext clientCtx = HttpClientContext.adapt(ctx); if (!predicate.test(exception, clientCtx)) { log.fine(() -> String.format("Not retrying for '%s'", ctx)); return false; } if (executionCount > maxRetries) { log.fine(() -> String.format("Max retries exceeded for '%s'", ctx)); retryFailedConsumer.onRetryFailed(exception, executionCount, clientCtx); return false; } Duration delay = delaySupplier.getDelay(executionCount); log.fine(() -> String.format("Retrying after %s for '%s'", delay, ctx)); retryConsumer.onRetry(exception, delay, executionCount, clientCtx); sleeper.sleep(delay); return true; }
@Test void does_not_retry_for_non_listed_exception() { DelayedConnectionLevelRetryHandler handler = DelayedConnectionLevelRetryHandler.Builder .withFixedDelay(Duration.ofSeconds(2), 2) .retryForExceptions(List.of(SSLException.class, ConnectException.class)) .withSleeper(mock(Sleeper.class)) .build(); IOException ioException = new IOException(); HttpClientContext ctx = new HttpClientContext(); assertFalse(handler.retryRequest(ioException, 1, ctx)); }
@Override public AppResponse process(Flow flow, MrzDocumentRequest params) { if(!(params.getDocumentType().equals("PASSPORT") || params.getDocumentType().equals("ID_CARD"))){ return new NokResponse(); } Map<String, String> travelDocument = Map.of( "documentNumber", params.getDocumentNumber(), "dateOfBirth", params.getDateOfBirth(), "dateOfExpiry", params.getDateOfExpiry()); digidClient.remoteLog("867", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), HIDDEN, true)); appSession.setRdaSessionStatus("DOCUMENTS_RECEIVED"); Map<String, String> rdaSession = rdaClient.startSession( returnUrl.concat("/iapi/rda/confirm"), appSession.getId(), params.getIpAddress(), List.of(travelDocument), List.of()); if(rdaSession.isEmpty()){ digidClient.remoteLog("873", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), HIDDEN, true)); return new NokResponse(); } appSession.setConfirmSecret(rdaSession.get("confirmSecret")); appSession.setUrl(rdaSession.get("url")); appSession.setRdaSessionId(rdaSession.get("sessionId")); appSession.setRdaSessionTimeoutInSeconds(rdaSession.get("expiration")); appSession.setRdaSessionStatus("SCANNING_FOREIGN"); appSession.setRdaDocumentType(params.getDocumentType()); appSession.setRdaDocumentNumber(params.getDocumentNumber()); digidClient.remoteLog("868", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), HIDDEN, true)); return new RdaResponse(appSession.getUrl(), appSession.getRdaSessionId()); }
@Test public void processNOK() { //given MrzDocumentRequest mrzDocumentRequest = new MrzDocumentRequest(); mrzDocumentRequest.setDocumentType("A"); //when AppResponse appResponse = mrzDocumentInitialized.process(mockedFlow, mrzDocumentRequest); //then assertTrue(appResponse instanceof NokResponse); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { if (schema != null && schema.type() != Schema.Type.BYTES) throw new DataException("Invalid schema type for ByteArrayConverter: " + schema.type().toString()); if (value != null && !(value instanceof byte[]) && !(value instanceof ByteBuffer)) throw new DataException("ByteArrayConverter is not compatible with objects of type " + value.getClass()); return value instanceof ByteBuffer ? getBytesFromByteBuffer((ByteBuffer) value) : (byte[]) value; }
@Test public void testFromConnectInvalidValue() { assertThrows(DataException.class, () -> converter.fromConnectData(TOPIC, Schema.BYTES_SCHEMA, 12)); }
@Override public String named() { return PluginEnum.BASIC_AUTH.getName(); }
@Test public void testNamed() { final String result = basicAuthPlugin.named(); Assertions.assertEquals(PluginEnum.BASIC_AUTH.getName(), result); }
public static StructType partitionType(Table table) { Collection<PartitionSpec> specs = table.specs().values(); return buildPartitionProjectionType("table partition", specs, allFieldIds(specs)); }
@Test public void testPartitionTypeWithRenamesInV1TableCaseInsensitive() { PartitionSpec initialSpec = PartitionSpec.builderFor(SCHEMA).caseSensitive(false).identity("DATA", "p1").build(); TestTables.TestTable table = TestTables.create(tableDir, "test", SCHEMA, initialSpec, V1_FORMAT_VERSION); table.updateSpec().addField("category").commit(); table.updateSpec().renameField("p1", "p2").commit(); StructType expectedType = StructType.of( NestedField.optional(1000, "p2", Types.StringType.get()), NestedField.optional(1001, "category", Types.StringType.get())); StructType actualType = Partitioning.partitionType(table); assertThat(actualType).isEqualTo(expectedType); }
@Override public void configure(ResourceInfo resourceInfo, FeatureContext context) { final Method resourceMethod = resourceInfo.getResourceMethod(); final Class resourceClass = resourceInfo.getResourceClass(); if ((resourceMethod != null && (resourceMethod.isAnnotationPresent(SupportedSearchVersion.class) || resourceMethod.isAnnotationPresent(SupportedSearchVersions.class))) || (resourceClass != null && (resourceClass.isAnnotationPresent(SupportedSearchVersion.class) || resourceClass.isAnnotationPresent(SupportedSearchVersions.class)))) { context.register(SupportedSearchVersionFilter.class); } }
@Test public void configureRegistersResponseFilterIfAnnotationIsPresentOnClass() throws Exception { final Class clazz = TestResourceWithClassAnnotation.class; when(resourceInfo.getResourceClass()).thenReturn(clazz); supportedSearchVersionDynamicFeature.configure(resourceInfo, featureContext); verify(featureContext, only()).register(SupportedSearchVersionFilter.class); }
public static void repair(final Path path) throws IOException { if (!path.toFile().isDirectory()) { throw new IllegalArgumentException( String.format("Given PQ path %s is not a directory.", path) ); } LOGGER.info("Start repairing queue dir: {}", path.toString()); deleteTempCheckpoint(path); final Map<Integer, Path> pageFiles = new HashMap<>(); try (final DirectoryStream<Path> pfs = Files.newDirectoryStream(path, "page.*")) { pfs.forEach(p -> pageFiles.put( Integer.parseInt(p.getFileName().toString().substring("page.".length())), p) ); } final Map<Integer, Path> checkpointFiles = new HashMap<>(); try (final DirectoryStream<Path> cpfs = Files.newDirectoryStream(path, "checkpoint.*")) { cpfs.forEach( c -> { final String cpFilename = c.getFileName().toString(); if (!"checkpoint.head".equals(cpFilename)) { checkpointFiles.put( Integer.parseInt(cpFilename.substring("checkpoint.".length())), c ); } } ); } deleteFullyAcked(path, pageFiles, checkpointFiles); fixMissingPages(pageFiles, checkpointFiles); fixZeroSizePages(pageFiles, checkpointFiles); fixMissingCheckpoints(pageFiles, checkpointFiles); LOGGER.info("Repair is done"); }
@Test public void testRecreateMissingCheckPoint() throws Exception { Files.delete(dataPath.resolve("checkpoint.1")); PqRepair.repair(dataPath); verifyQueue(); }
public SerializableFunction<T, Row> getToRowFunction() { return toRowFunction; }
@Test public void testNullRepeatedProtoToRow() throws InvalidProtocolBufferException { ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(RepeatPrimitive.getDescriptor()); SerializableFunction<DynamicMessage, Row> toRow = schemaProvider.getToRowFunction(); assertEquals(NULL_REPEATED_ROW, toRow.apply(toDynamic(NULL_REPEATED_PROTO))); }
public static KeyManager[] initKeyStore(File tlsKeyFile, File tlsCertFile, String tlsKeyPassword) throws IOException, GeneralSecurityException { final KeyStore ks = KeyStore.getInstance("JKS"); ks.load(null, null); final Collection<? extends Certificate> certChain = loadCertificates(tlsCertFile.toPath()); final PrivateKey privateKey = loadPrivateKey(tlsKeyFile, tlsKeyPassword); final char[] password = Strings.nullToEmpty(tlsKeyPassword).toCharArray(); ks.setKeyEntry("key", privateKey, password, certChain.toArray(new Certificate[certChain.size()])); if (LOG.isDebugEnabled()) { LOG.debug("Private key file: {}", tlsKeyFile); LOG.debug("Certificate file: {}", tlsCertFile); LOG.debug("Aliases: {}", join(ks.aliases())); } final KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); kmf.init(ks, password); return kmf.getKeyManagers(); }
@Test public void testCreateNettySslHandler() throws Exception { if (exceptionClass != null) { expectedException.expect(exceptionClass); expectedException.expectMessage(exceptionMessage); } final File keyFile = resourceToFile(keyFileName); final File certFile = resourceToFile(CERTIFICATES.get(keyAlgorithm)); final KeyManager[] keyManagers = KeyUtil.initKeyStore(keyFile, certFile, keyPassword); final SSLContext sslContext = SSLContext.getInstance("TLS"); sslContext.init(keyManagers, new TrustManager[0], new SecureRandom()); assertThat(sslContext.getProtocol()).isEqualTo("TLS"); final SSLEngine sslEngine = sslContext.createSSLEngine(); assertThat(sslEngine.getEnabledCipherSuites()).isNotEmpty(); assertThat(sslEngine.getEnabledProtocols()).isNotEmpty(); final SslHandler sslHandler = new SslHandler(sslEngine); assertThat(sslHandler).isNotNull(); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testCreateTable() { analyze("CREATE TABLE test (id bigint)"); analyze("CREATE TABLE test (id bigint) WITH (p1 = 'p1')"); assertFails(MISSING_ATTRIBUTE, ".*'y' cannot be resolved", "CREATE TABLE test (x bigint) WITH (p1 = y)"); assertFails(DUPLICATE_PROPERTY, ".* Duplicate property: p1", "CREATE TABLE test (id bigint) WITH (p1 = 'p1', p2 = 'p2', p1 = 'p3')"); assertFails(DUPLICATE_PROPERTY, ".* Duplicate property: p1", "CREATE TABLE test (id bigint) WITH (p1 = 'p1', \"p1\" = 'p2')"); }
@Override public String getTableName() { SQLSelectQueryBlock selectQueryBlock = getSelect(); SQLTableSource tableSource = selectQueryBlock.getFrom(); StringBuilder sb = new StringBuilder(); SQLServerOutputVisitor visitor = new SQLServerOutputVisitor(sb) { @Override public boolean visit(SQLExprTableSource x) { printTableSourceExpr(x.getExpr()); return false; } }; visitor.visit((SQLExprTableSource) tableSource); return sb.toString(); }
@Test public void testGetTableName() { String sql = "SELECT * FROM t WITH (UPDLOCK)"; SQLStatement ast = getSQLStatement(sql); SqlServerSelectForUpdateRecognizer recognizer = new SqlServerSelectForUpdateRecognizer(sql, ast); Assertions.assertEquals("t", recognizer.getTableName()); //test for alias sql = "SELECT * FROM t t1 WITH (UPDLOCK)"; ast = getSQLStatement(sql); recognizer = new SqlServerSelectForUpdateRecognizer(sql, ast); Assertions.assertEquals("t", recognizer.getTableName()); }
protected void configNeighbourHandler() { neighbourService.unregisterNeighbourHandlers(appId); interfaceService .getInterfaces() .forEach(intf -> neighbourService.registerNeighbourHandler(intf, neighbourHandler, appId)); }
@Test public void testConfigNeighbourHandler() { vplsNeighbourHandler.configNeighbourHandler(); assertEquals(9, vplsNeighbourHandler.neighbourService.getHandlerRegistrations().size()); }
@Deprecated @Override public Beacon fromScanData(byte[] scanData, int rssi, BluetoothDevice device) { return fromScanData(scanData, rssi, device, System.currentTimeMillis(), new AltBeacon()); }
@Test public void testDetectsAlternateBeconType() { org.robolectric.shadows.ShadowLog.stream = System.err; byte[] bytes = hexStringToByteArray("02011a1bff1801aabb2f234454cf6d4a0fadf2f4911ba9ffa600010002c50900"); AltBeaconParser parser = new AltBeaconParser(); parser.setMatchingBeaconTypeCode(0xaabbl); Beacon beacon = parser.fromScanData(bytes, -55, null, 123456L); assertNotNull("Beacon should be not null if parsed successfully", beacon); }
@SuppressWarnings("unchecked") @Override public RegisterNodeManagerResponse registerNodeManager( RegisterNodeManagerRequest request) throws YarnException, IOException { NodeId nodeId = request.getNodeId(); String host = nodeId.getHost(); int cmPort = nodeId.getPort(); int httpPort = request.getHttpPort(); Resource capability = request.getResource(); String nodeManagerVersion = request.getNMVersion(); Resource physicalResource = request.getPhysicalResource(); NodeStatus nodeStatus = request.getNodeStatus(); RegisterNodeManagerResponse response = recordFactory .newRecordInstance(RegisterNodeManagerResponse.class); if (!minimumNodeManagerVersion.equals("NONE")) { if (minimumNodeManagerVersion.equals("EqualToRM")) { minimumNodeManagerVersion = YarnVersionInfo.getVersion(); } if ((nodeManagerVersion == null) || (VersionUtil.compareVersions(nodeManagerVersion,minimumNodeManagerVersion)) < 0) { String message = "Disallowed NodeManager Version " + nodeManagerVersion + ", is less than the minimum version " + minimumNodeManagerVersion + " sending SHUTDOWN signal to " + "NodeManager."; LOG.info(message); response.setDiagnosticsMessage(message); response.setNodeAction(NodeAction.SHUTDOWN); return response; } } if (checkIpHostnameInRegistration) { InetSocketAddress nmAddress = NetUtils.createSocketAddrForHost(host, cmPort); InetAddress inetAddress = Server.getRemoteIp(); if (inetAddress != null && nmAddress.isUnresolved()) { // Reject registration of unresolved nm to prevent resourcemanager // getting stuck at allocations. final String message = "hostname cannot be resolved (ip=" + inetAddress.getHostAddress() + ", hostname=" + host + ")"; LOG.warn("Unresolved nodemanager registration: " + message); response.setDiagnosticsMessage(message); response.setNodeAction(NodeAction.SHUTDOWN); return response; } } // Check if this node is a 'valid' node if (!this.nodesListManager.isValidNode(host) && !isNodeInDecommissioning(nodeId)) { String message = "Disallowed NodeManager from " + host + ", Sending SHUTDOWN signal to the NodeManager."; LOG.info(message); response.setDiagnosticsMessage(message); response.setNodeAction(NodeAction.SHUTDOWN); return response; } // check if node's capacity is load from dynamic-resources.xml String nid = nodeId.toString(); Resource dynamicLoadCapability = loadNodeResourceFromDRConfiguration(nid); if (dynamicLoadCapability != null) { LOG.debug("Resource for node: {} is adjusted from: {} to: {} due to" + " settings in dynamic-resources.xml.", nid, capability, dynamicLoadCapability); capability = dynamicLoadCapability; // sync back with new resource. response.setResource(capability); } // Check if this node has minimum allocations if (capability.getMemorySize() < minAllocMb || capability.getVirtualCores() < minAllocVcores) { String message = "NodeManager from " + host + " doesn't satisfy minimum allocations, Sending SHUTDOWN" + " signal to the NodeManager. Node capabilities are " + capability + "; minimums are " + minAllocMb + "mb and " + minAllocVcores + " vcores"; LOG.info(message); response.setDiagnosticsMessage(message); response.setNodeAction(NodeAction.SHUTDOWN); return response; } response.setContainerTokenMasterKey(containerTokenSecretManager .getCurrentKey()); response.setNMTokenMasterKey(nmTokenSecretManager .getCurrentKey()); RMNode rmNode = new RMNodeImpl(nodeId, rmContext, host, cmPort, httpPort, resolve(host), capability, nodeManagerVersion, physicalResource); RMNode oldNode = this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode); if (oldNode == null) { RMNodeStartedEvent startEvent = new RMNodeStartedEvent(nodeId, request.getNMContainerStatuses(), request.getRunningApplications(), nodeStatus); if (request.getLogAggregationReportsForApps() != null && !request.getLogAggregationReportsForApps().isEmpty()) { if (LOG.isDebugEnabled()) { LOG.debug("Found the number of previous cached log aggregation " + "status from nodemanager:" + nodeId + " is :" + request.getLogAggregationReportsForApps().size()); } startEvent.setLogAggregationReportsForApps(request .getLogAggregationReportsForApps()); } this.rmContext.getDispatcher().getEventHandler().handle( startEvent); } else { LOG.info("Reconnect from the node at: " + host); this.nmLivelinessMonitor.unregister(nodeId); if (CollectionUtils.isEmpty(request.getRunningApplications()) && rmNode.getState() != NodeState.DECOMMISSIONING && rmNode.getHttpPort() != oldNode.getHttpPort()) { // Reconnected node differs, so replace old node and start new node switch (rmNode.getState()) { case RUNNING: ClusterMetrics.getMetrics().decrNumActiveNodes(); break; case UNHEALTHY: ClusterMetrics.getMetrics().decrNumUnhealthyNMs(); break; default: LOG.debug("Unexpected Rmnode state"); } this.rmContext.getDispatcher().getEventHandler() .handle(new NodeRemovedSchedulerEvent(rmNode)); this.rmContext.getRMNodes().put(nodeId, rmNode); this.rmContext.getDispatcher().getEventHandler() .handle(new RMNodeStartedEvent(nodeId, null, null, nodeStatus)); } else { // Reset heartbeat ID since node just restarted. oldNode.resetLastNodeHeartBeatResponse(); this.rmContext.getDispatcher().getEventHandler() .handle(new RMNodeReconnectEvent(nodeId, rmNode, request.getRunningApplications(), request.getNMContainerStatuses())); } } // On every node manager register we will be clearing NMToken keys if // present for any running application. this.nmTokenSecretManager.removeNodeKey(nodeId); this.nmLivelinessMonitor.register(nodeId); // Handle received container status, this should be processed after new // RMNode inserted if (!rmContext.isWorkPreservingRecoveryEnabled()) { if (!request.getNMContainerStatuses().isEmpty()) { LOG.info("received container statuses on node manager register :" + request.getNMContainerStatuses()); for (NMContainerStatus status : request.getNMContainerStatuses()) { handleNMContainerStatus(status, nodeId); } } } // Update node's labels to RM's NodeLabelManager. Set<String> nodeLabels = NodeLabelsUtils.convertToStringSet( request.getNodeLabels()); if (isDistributedNodeLabelsConf && nodeLabels != null) { try { updateNodeLabelsFromNMReport(nodeLabels, nodeId); response.setAreNodeLabelsAcceptedByRM(true); } catch (IOException ex) { // Ensure the exception is captured in the response response.setDiagnosticsMessage(ex.getMessage()); response.setAreNodeLabelsAcceptedByRM(false); } } else if (isDelegatedCentralizedNodeLabelsConf) { this.rmContext.getRMDelegatedNodeLabelsUpdater().updateNodeLabels(nodeId); } // Update node's attributes to RM's NodeAttributesManager. if (request.getNodeAttributes() != null) { try { // update node attributes if necessary then update heartbeat response updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes()); response.setAreNodeAttributesAcceptedByRM(true); } catch (IOException ex) { //ensure the error message is captured and sent across in response String errorMsg = response.getDiagnosticsMessage() == null ? ex.getMessage() : response.getDiagnosticsMessage() + "\n" + ex.getMessage(); response.setDiagnosticsMessage(errorMsg); response.setAreNodeAttributesAcceptedByRM(false); } } StringBuilder message = new StringBuilder(); message.append("NodeManager from node ").append(host).append("(cmPort: ") .append(cmPort).append(" httpPort: "); message.append(httpPort).append(") ") .append("registered with capability: ").append(capability); message.append(", assigned nodeId ").append(nodeId); if (response.getAreNodeLabelsAcceptedByRM()) { message.append(", node labels { ").append( StringUtils.join(",", nodeLabels) + " } "); } if (response.getAreNodeAttributesAcceptedByRM()) { message.append(", node attributes { ") .append(request.getNodeAttributes() + " } "); } LOG.info(message.toString()); response.setNodeAction(NodeAction.NORMAL); response.setRMIdentifier(ResourceManager.getClusterTimeStamp()); response.setRMVersion(YarnVersionInfo.getVersion()); return response; }
@Test public void testNodeRegistrationWithCentralLabelConfig() throws Exception { writeToHostsFile("host2"); Configuration conf = new Configuration(); conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, hostFile.getAbsolutePath()); conf.set(YarnConfiguration.NODELABEL_CONFIGURATION_TYPE, YarnConfiguration.DEFAULT_NODELABEL_CONFIGURATION_TYPE); final RMNodeLabelsManager nodeLabelsMgr = new NullRMNodeLabelsManager(); rm = new MockRM(conf) { @Override protected RMNodeLabelsManager createNodeLabelManager() { return nodeLabelsMgr; } }; rm.start(); try { nodeLabelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("A", "B", "C")); } catch (IOException e) { Assert.fail("Caught Exception while initializing"); e.printStackTrace(); } ResourceTrackerService resourceTrackerService = rm.getResourceTrackerService(); RegisterNodeManagerRequest req = Records.newRecord(RegisterNodeManagerRequest.class); NodeId nodeId = NodeId.newInstance("host2", 1234); Resource capability = Resources.createResource(1024); req.setResource(capability); req.setNodeId(nodeId); req.setHttpPort(1234); req.setNMVersion(YarnVersionInfo.getVersion()); req.setNodeLabels(toNodeLabelSet("A")); RegisterNodeManagerResponse response = resourceTrackerService.registerNodeManager(req); // registered to RM with central label config Assert.assertEquals(NodeAction.NORMAL, response.getNodeAction()); Assert.assertNull(nodeLabelsMgr.getNodeLabels().get(nodeId)); Assert .assertFalse( "Node Labels should not accepted by RM If its configured with " + "Central configuration", response.getAreNodeLabelsAcceptedByRM()); if (rm != null) { rm.stop(); } }
@Override public void resetToCheckpoint(final long checkpointId, @Nullable final byte[] checkpointData) { // First bump up the coordinator epoch to fence out the active coordinator. LOG.info("Resetting coordinator to checkpoint."); // Replace the coordinator variable with a new DeferrableCoordinator instance. // At this point the internal coordinator of the new coordinator has not been created. // After this point all the subsequent calls will be made to the new coordinator. final DeferrableCoordinator oldCoordinator = coordinator; final DeferrableCoordinator newCoordinator = new DeferrableCoordinator(context.getOperatorId()); coordinator = newCoordinator; // Close the old coordinator asynchronously in a separate closing thread. // The future will be completed when the old coordinator closes. CompletableFuture<Void> closingFuture = oldCoordinator.closeAsync(closingTimeoutMs); // Create and possibly start the coordinator and apply all meanwhile deferred calls // capture the status whether the coordinator was started when this method was called final boolean wasStarted = this.started; closingFuture.whenComplete( (ignored, e) -> { if (e != null) { LOG.warn( String.format( "Received exception when closing " + "operator coordinator for %s.", oldCoordinator.operatorId), e); } if (!closed) { // The previous coordinator has closed. Create a new one. newCoordinator.createNewInternalCoordinator(context, provider); newCoordinator.resetAndStart(checkpointId, checkpointData, wasStarted); newCoordinator.processPendingCalls(); } }); }
@Test void testResetToCheckpoint() throws Exception { TestingCoordinatorProvider provider = new TestingCoordinatorProvider(null); MockOperatorCoordinatorContext context = new MockOperatorCoordinatorContext(OPERATOR_ID, NUM_SUBTASKS); RecreateOnResetOperatorCoordinator coordinator = createCoordinator(provider, context); RecreateOnResetOperatorCoordinator.QuiesceableContext contextBeforeReset = coordinator.getQuiesceableContext(); TestingOperatorCoordinator internalCoordinatorBeforeReset = getInternalCoordinator(coordinator); byte[] stateToRestore = new byte[0]; coordinator.resetToCheckpoint(1L, stateToRestore); // Use the checkpoint to ensure all the previous method invocation has succeeded. coordinator.waitForAllAsyncCallsFinish(); assertThat(contextBeforeReset.isQuiesced()).isTrue(); assertThat(internalCoordinatorBeforeReset.getLastRestoredCheckpointState()).isNull(); TestingOperatorCoordinator internalCoordinatorAfterReset = getInternalCoordinator(coordinator); assertThat(internalCoordinatorAfterReset.getLastRestoredCheckpointState()) .isEqualTo(stateToRestore); assertThat(internalCoordinatorBeforeReset).isNotEqualTo(internalCoordinatorAfterReset); }
@CanIgnoreReturnValue public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) { List<@Nullable Object> expected = (varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs); return containsExactlyElementsIn( expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable); }
@Test public void iterableContainsExactlyInOrderWithOneShotIterable() { Iterator<Object> iterator = asList((Object) 1, null, 3).iterator(); Iterable<Object> iterable = new Iterable<Object>() { @Override public Iterator<Object> iterator() { return iterator; } }; assertThat(iterable).containsExactly(1, null, 3).inOrder(); }
@Nullable public static TNetworkAddress getBackendHost(ImmutableMap<Long, ComputeNode> backendMap, Reference<Long> backendIdRef) { ComputeNode node = getBackend(backendMap); if (node != null) { backendIdRef.setRef(node.getId()); return new TNetworkAddress(node.getHost(), node.getBePort()); } return null; }
@Test public void testChooseBackendConcurrently() throws InterruptedException { ImmutableMap.Builder<Long, ComputeNode> builder = ImmutableMap.builder(); for (int i = 0; i < 6; i++) { Backend backend = new Backend(i, "address" + i, 0); backend.setAlive(i == 0); builder.put(backend.getId(), backend); } ImmutableMap<Long, ComputeNode> backends = builder.build(); List<Thread> threads = new ArrayList<>(); for (int i = 0; i < 4; i++) { Thread t = new Thread(() -> { for (int i1 = 0; i1 < 50; i1++) { Reference<Long> idRef = new Reference<>(); TNetworkAddress address = SimpleScheduler.getBackendHost(backends, idRef); Assert.assertNotNull(address); Assert.assertEquals("address0", address.hostname); } }); threads.add(t); } for (Thread t : threads) { t.start(); } for (Thread t : threads) { t.join(); } }
public boolean isValid() { if (PluggableTaskConfigStore.store().preferenceFor(pluginConfiguration.getId()) == null) { addError(TYPE, String.format("Could not find plugin for given pluggable id:[%s].", pluginConfiguration.getId())); } configuration.validateTree(); return (errors.isEmpty() && !configuration.hasErrors()); }
@Test public void isValidShouldVerifyIfPluginIdIsValid() { PluginConfiguration pluginConfiguration = new PluginConfiguration("does_not_exist", "1.1"); Configuration configuration = new Configuration(); PluggableTask pluggableTask = new PluggableTask(pluginConfiguration, configuration); pluggableTask.isValid(); assertThat(pluggableTask.errors().get("pluggable_task").get(0), is("Could not find plugin for given pluggable id:[does_not_exist].")); }
@Override protected double maintain() { expireReserved(); return (deprovisionRemovable() + pruneReals()) / 2; }
@Test public void expire_reserved() { LoadBalancerExpirer expirer = new LoadBalancerExpirer(tester.nodeRepository(), Duration.ofDays(1), tester.loadBalancerService(), new TestMetric()); Supplier<Map<LoadBalancerId, LoadBalancer>> loadBalancers = () -> tester.nodeRepository().database().readLoadBalancers((ignored) -> true); // Prepare application ClusterSpec.Id cluster = ClusterSpec.Id.from("qrs"); ApplicationId app = ProvisioningTester.applicationId(); LoadBalancerId lb = new LoadBalancerId(app, cluster); deployApplication(app, false, cluster); // Provisions load balancer in reserved assertSame(LoadBalancer.State.reserved, loadBalancers.get().get(lb).state()); // Expirer does nothing expirer.maintain(); assertSame(LoadBalancer.State.reserved, loadBalancers.get().get(lb).state()); // Application never activates and nodes are dirtied and readied. Expirer moves load balancer to inactive after timeout removeNodesOf(app, cluster); tester.clock().advance(Duration.ofHours(1).plus(Duration.ofSeconds(1))); expirer.maintain(); assertSame(LoadBalancer.State.inactive, loadBalancers.get().get(lb).state()); // Expirer does nothing as inactive expiration time has not yet passed expirer.maintain(); assertSame(LoadBalancer.State.inactive, loadBalancers.get().get(lb).state()); // Expirer removes inactive load balancer tester.clock().advance(Duration.ofHours(1).plus(Duration.ofSeconds(1))); expirer.maintain(); assertFalse("Inactive load balancer removed", loadBalancers.get().containsKey(lb)); }