focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static List<Global> getVariableList( final List<String> variableCells ){ final List<Global> variableList = new ArrayList<>(); if ( variableCells == null ) { return variableList; } for( String variableCell: variableCells ){ final StringTokenizer tokens = new StringTokenizer( variableCell, "," ); while ( tokens.hasMoreTokens() ) { final String token = tokens.nextToken(); final Global vars = new Global(); final StringTokenizer paramTokens = new StringTokenizer( token, " " ); vars.setClassName( paramTokens.nextToken() ); if ( !paramTokens.hasMoreTokens() ) { throw new DecisionTableParseException( "The format for global variables is incorrect. " + "It should be: [Class name, Class otherName]. But it was: [" + variableCell + "]" ); } vars.setIdentifier( paramTokens.nextToken() ); variableList.add( vars ); } } return variableList; }
@Test public void testListVariables() { List<Global> varList = getVariableList(List.of("Var1 var1, Var2 var2,Var3 var3")); assertThat(varList).isNotNull().hasSize(3).extracting(x -> x.getClassName()).containsExactly("Var1", "Var2", "Var3"); }
@Override public boolean process(NacosTask task) { MergeDataTask mergeTask = (MergeDataTask) task; final String dataId = mergeTask.dataId; final String group = mergeTask.groupId; final String tenant = mergeTask.tenant; final String tag = mergeTask.tag; final String clientIp = mergeTask.getClientIp(); try { List<ConfigInfoAggr> datumList = new ArrayList<>(); int rowCount = configInfoAggrPersistService.aggrConfigInfoCount(dataId, group, tenant); int pageCount = (int) Math.ceil(rowCount * 1.0 / PAGE_SIZE); for (int pageNo = 1; pageNo <= pageCount; pageNo++) { Page<ConfigInfoAggr> page = configInfoAggrPersistService.findConfigInfoAggrByPage(dataId, group, tenant, pageNo, PAGE_SIZE); if (page != null) { datumList.addAll(page.getPageItems()); LOGGER.info("[merge-query] {}, {}, size/total={}/{}", dataId, group, datumList.size(), rowCount); } } final Timestamp time = TimeUtils.getCurrentTime(); if (datumList.size() > 0) { // merge ConfigInfo cf = merge(dataId, group, tenant, datumList); configInfoPersistService.insertOrUpdate(null, null, cf, null); LOGGER.info("[merge-ok] {}, {}, size={}, length={}, md5={}, content={}", dataId, group, datumList.size(), cf.getContent().length(), cf.getMd5(), ContentUtils.truncateContent(cf.getContent())); ConfigTraceService.logPersistenceEvent(dataId, group, tenant, null, time.getTime(), InetUtils.getSelfIP(), ConfigTraceService.PERSISTENCE_EVENT, ConfigTraceService.PERSISTENCE_TYPE_MERGE, cf.getContent()); } else { String eventType; // remove if (StringUtils.isBlank(tag)) { eventType = ConfigTraceService.PERSISTENCE_EVENT; configInfoPersistService.removeConfigInfo(dataId, group, tenant, clientIp, null); } else { eventType = ConfigTraceService.PERSISTENCE_EVENT_TAG + "-" + tag; configInfoTagPersistService.removeConfigInfoTag(dataId, group, tenant, tag, clientIp, null); } LOGGER.warn( "[merge-delete] delete config info because no datum. dataId=" + dataId + ", groupId=" + group); ConfigTraceService.logPersistenceEvent(dataId, group, tenant, null, time.getTime(), InetUtils.getSelfIP(), eventType, ConfigTraceService.PERSISTENCE_TYPE_REMOVE, null); } NotifyCenter.publishEvent(new ConfigDataChangeEvent(false, dataId, group, tenant, tag, time.getTime())); } catch (Exception e) { mergeService.addMergeTask(dataId, group, tenant, mergeTask.getClientIp()); LOGGER.info("[merge-error] " + dataId + ", " + group + ", " + e.toString(), e); } return true; }
@Test void testTagMergerNotExistAggrConfig() throws InterruptedException { String dataId = "dataId12345"; String group = "group123"; String tenant = "tenant1234"; String tag = "23456789"; when(configInfoAggrPersistService.aggrConfigInfoCount(eq(dataId), eq(group), eq(tenant))).thenReturn(0); AtomicReference<ConfigDataChangeEvent> reference = new AtomicReference<>(); NotifyCenter.registerSubscriber(new Subscriber() { @Override public void onEvent(Event event) { ConfigDataChangeEvent event1 = (ConfigDataChangeEvent) event; if (event1.dataId.equals(dataId) && event1.group.equals(group) && tenant.equals(event1.tenant) && tag.equals(event1.tag)) { reference.set((ConfigDataChangeEvent) event); } } @Override public Class<? extends Event> subscribeType() { return ConfigDataChangeEvent.class; } }); MergeDataTask mergeDataTask = new MergeDataTask(dataId, group, tenant, tag, "127.0.0.1"); Mockito.doNothing().when(configInfoTagPersistService) .removeConfigInfoTag(eq(dataId), eq(group), eq(tenant), eq(tag), eq("127.0.0.1"), eq(null)); mergeTaskProcessor.process(mergeDataTask); Mockito.verify(configInfoTagPersistService, times(1)) .removeConfigInfoTag(eq(dataId), eq(group), eq(tenant), eq(tag), eq("127.0.0.1"), eq(null)); Thread.sleep(1000L); assertTrue(reference.get() != null); }
@Override public V put(K key, V value) { begin(); V oldValue = transactionalMap.put(key, value); commit(); return oldValue; }
@Test public void testPut() { map.put(42, "oldValue"); String oldValue = adapter.put(42, "newValue"); assertEquals("oldValue", oldValue); assertEquals("newValue", map.get(42)); }
public R execute(Retryable<R> retryable) throws ExecutionException { long endMs = time.milliseconds() + retryBackoffMaxMs; int currAttempt = 0; ExecutionException error = null; while (time.milliseconds() <= endMs) { currAttempt++; try { return retryable.call(); } catch (UnretryableException e) { // We've deemed this error to not be worth retrying, so collect the error and // fail immediately. if (error == null) error = new ExecutionException(e); break; } catch (ExecutionException e) { log.warn("Error during retry attempt {}", currAttempt, e); if (error == null) error = e; long waitMs = retryBackoffMs * (long) Math.pow(2, currAttempt - 1); long diff = endMs - time.milliseconds(); waitMs = Math.min(waitMs, diff); if (waitMs <= 0) break; String message = String.format("Attempt %d to make call resulted in an error; sleeping %d ms before retrying", currAttempt, waitMs); log.warn(message, e); time.sleep(waitMs); } } if (error == null) // Really shouldn't ever get to here, but... error = new ExecutionException(new IllegalStateException("Exhausted all retry attempts but no attempt returned value or encountered exception")); throw error; }
@Test public void testRuntimeExceptionFailureOnFirstAttempt() { Exception[] attempts = new Exception[] { new NullPointerException("pretend JSON node /userId in response is null"), null }; long retryWaitMs = 1000; long maxWaitMs = 10000; Retryable<String> call = createRetryable(attempts); Time time = new MockTime(0, 0, 0); assertEquals(0L, time.milliseconds()); Retry<String> r = new Retry<>(time, retryWaitMs, maxWaitMs); assertThrows(RuntimeException.class, () -> r.execute(call)); assertEquals(0, time.milliseconds()); }
@Override public void updateHost(K8sHost host) { checkNotNull(host, ERR_NULL_HOST); hostStore.updateHost(host); log.info(String.format(MSG_HOST, host.hostIp().toString(), MSG_UPDATED)); }
@Test(expected = NullPointerException.class) public void testUpdateNullHost() { target.updateHost(null); }
public Collection<String> getShardingRuleTableNames(final Collection<String> logicTableNames) { return logicTableNames.stream().filter(this::isShardingTable).collect(Collectors.toList()); }
@Test void assertGetShardingRuleTableNames() { ShardingRule actual = createMaximumShardingRule(); Collection<String> shardingRuleTableNames = actual.getShardingRuleTableNames(Collections.singleton("Logic_Table")); assertTrue(shardingRuleTableNames.contains("Logic_Table")); }
@Override public void stop() { stop(true); }
@Test @SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT") public void shouldCloseKafkaStreamsOnStop() { // When: query.stop(); // Then: final InOrder inOrder = inOrder(sharedKafkaStreamsRuntimeImpl); inOrder.verify(sharedKafkaStreamsRuntimeImpl).stop(QUERY_ID, true); inOrder.verifyNoMoreInteractions(); }
public Response get(URL url, Request request) throws IOException { return call(HttpMethods.GET, url, request); }
@Test public void testHttpTimeout_doNotSetByDefault() throws IOException { try (Response ignored = newHttpClient(false, false).get(fakeUrl.toURL(), fakeRequest(null))) { // intentionally empty } Mockito.verify(mockHttpRequest, Mockito.never()).setConnectTimeout(Mockito.anyInt()); Mockito.verify(mockHttpRequest, Mockito.never()).setReadTimeout(Mockito.anyInt()); }
public static Optional<PfxOptions> getPfxKeyStoreOptions(final Map<String, String> props) { // PFX key stores do not have a Private key password final String location = getKeyStoreLocation(props); final String password = getKeyStorePassword(props); if (!Strings.isNullOrEmpty(location)) { return Optional.of(buildPfxOptions(location, password)); } return Optional.empty(); }
@Test public void shouldBuildKeyStorePfxOptionsWithPathOnly() { // When final Optional<PfxOptions> pfxOptions = VertxSslOptionsFactory.getPfxKeyStoreOptions( ImmutableMap.of( SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, "path" ) ); // Then assertThat(pfxOptions.get().getPath(), is("path")); assertThat(pfxOptions.get().getPassword(), is("")); }
@Override public void deleteVersion( RepositoryElementInterface element, String versionId ) throws KettleException { String fileId = element.getObjectId().getId(); deleteVersion( fileId, versionId ); }
@Test public void deleteVersionTest() throws KettleException { IUnifiedRepository mockRepo = mock( IUnifiedRepository.class ); final HashMap<String, List<VersionSummary>> versionListMap = processVersionMap( mockRepo ); UnifiedRepositoryPurgeService purgeService = new UnifiedRepositoryPurgeService( mockRepo ); String fileId = "1"; String versionId = "103"; purgeService.deleteVersion( element1, versionId ); verify( mockRepo, times( 1 ) ).deleteFileAtVersion( fileId, versionId ); verify( mockRepo, never() ).deleteFileAtVersion( eq( "2" ), anyString() ); }
@Override public ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map<String, ListConsumerGroupOffsetsSpec> groupSpecs, ListConsumerGroupOffsetsOptions options) { SimpleAdminApiFuture<CoordinatorKey, Map<TopicPartition, OffsetAndMetadata>> future = ListConsumerGroupOffsetsHandler.newFuture(groupSpecs.keySet()); ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler(groupSpecs, options.requireStable(), logContext); invokeDriver(handler, future, options.timeoutMs); return new ListConsumerGroupOffsetsResult(future.all()); }
@Test public void testBatchedListConsumerGroupOffsetsWithNoOffsetFetchBatching() throws Exception { Cluster cluster = mockCluster(1, 0); Time time = new MockTime(); Map<String, ListConsumerGroupOffsetsSpec> groupSpecs = batchedListConsumerGroupOffsetsSpec(); ApiVersion offsetFetchV7 = new ApiVersion() .setApiKey(ApiKeys.OFFSET_FETCH.id) .setMinVersion((short) 0) .setMaxVersion((short) 7); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "0")) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(Collections.singleton(offsetFetchV7))); env.kafkaClient().prepareResponse(prepareBatchedFindCoordinatorResponse(Errors.NONE, env.cluster().controller(), groupSpecs.keySet())); // Prepare a response to force client to attempt batched request creation that throws // NoBatchedOffsetFetchRequestException. This triggers creation of non-batched requests. env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.COORDINATOR_NOT_AVAILABLE, Collections.emptyMap())); ListConsumerGroupOffsetsResult result = env.adminClient().listConsumerGroupOffsets(groupSpecs); // The request handler attempts both FindCoordinator and OffsetFetch requests. This seems // ok since we expect this scenario only during upgrades from versions < 3.0.0 where // some upgraded brokers could handle batched FindCoordinator while non-upgraded coordinators // rejected batched OffsetFetch requests. sendFindCoordinatorResponse(env.kafkaClient(), env.cluster().controller()); sendFindCoordinatorResponse(env.kafkaClient(), env.cluster().controller()); sendOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.NONE); sendOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.NONE); verifyListOffsetsForMultipleGroups(groupSpecs, result); } }
public <T> T fromXmlPartial(String partial, Class<T> o) throws Exception { return fromXmlPartial(toInputStream(partial, UTF_8), o); }
@Test void shouldLoadIgnoresFromHgPartial() throws Exception { String buildXmlPartial = """ <hg url="file:///tmp/testSvnRepo/project1/trunk" > <filter> <ignore pattern="x"/> </filter> </hg>"""; MaterialConfig hgMaterial = xmlLoader.fromXmlPartial(buildXmlPartial, HgMaterialConfig.class); Filter parsedFilter = hgMaterial.filter(); Filter expectedFilter = new Filter(); expectedFilter.add(new IgnoredFiles("x")); assertThat(parsedFilter).isEqualTo(expectedFilter); }
@Override public KsMaterializedQueryResult<Row> get( final GenericKey key, final int partition, final Optional<Position> position ) { try { final KeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = KeyQuery.withKey(key); StateQueryRequest<ValueAndTimestamp<GenericRow>> request = inStore(stateStore.getStateStoreName()) .withQuery(query) .withPartitions(ImmutableSet.of(partition)); if (position.isPresent()) { request = request.withPositionBound(PositionBound.at(position.get())); } final StateQueryResult<ValueAndTimestamp<GenericRow>> result = stateStore.getKafkaStreams().query(request); final QueryResult<ValueAndTimestamp<GenericRow>> queryResult = result.getPartitionResults().get(partition); // Some of these failures are retriable, and in the future, we may want to retry // locally before throwing. if (queryResult.isFailure()) { throw failedQueryException(queryResult); } else if (queryResult.getResult() == null) { return KsMaterializedQueryResult.rowIteratorWithPosition( Collections.emptyIterator(), queryResult.getPosition()); } else { final ValueAndTimestamp<GenericRow> row = queryResult.getResult(); return KsMaterializedQueryResult.rowIteratorWithPosition( ImmutableList.of(Row.of(stateStore.schema(), key, row.value(), row.timestamp())) .iterator(), queryResult.getPosition()); } } catch (final NotUpToBoundException | MaterializationException e) { throw e; } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldThrowIfKeyQueryResultIsError() { // Given: when(kafkaStreams.query(any())).thenReturn(getErrorResult()); // When: final Exception e = assertThrows( MaterializationException.class, () -> table.get(A_KEY, PARTITION) ); // Then: assertThat(e.getMessage(), containsString("Error!")); assertThat(e, (instanceOf(MaterializationException.class))); }
Object[] getOneRow() throws KettleException { if ( !openNextFile() ) { return null; } // Build an empty row based on the meta-data Object[] outputRowData = buildEmptyRow(); try { // Create new row or clone if ( meta.getIsInFields() ) { outputRowData = copyOrCloneArrayFromLoadFile( outputRowData, data.readrow ); } // Read fields... for ( int i = 0; i < data.nrInputFields; i++ ) { // Get field LoadFileInputField loadFileInputField = meta.getInputFields()[i]; Object o = null; int indexField = data.totalpreviousfields + i; ValueMetaInterface targetValueMeta = data.outputRowMeta.getValueMeta( indexField ); ValueMetaInterface sourceValueMeta = data.convertRowMeta.getValueMeta( indexField ); switch ( loadFileInputField.getElementType() ) { case LoadFileInputField.ELEMENT_TYPE_FILECONTENT: // DO Trimming! switch ( loadFileInputField.getTrimType() ) { case LoadFileInputField.TYPE_TRIM_LEFT: if ( meta.getEncoding() != null ) { data.filecontent = Const.ltrim( new String( data.filecontent, meta.getEncoding() ) ).getBytes(); } else { data.filecontent = Const.ltrim( new String( data.filecontent ) ).getBytes(); } break; case LoadFileInputField.TYPE_TRIM_RIGHT: if ( meta.getEncoding() != null ) { data.filecontent = Const.rtrim( new String( data.filecontent, meta.getEncoding() ) ).getBytes(); } else { data.filecontent = Const.rtrim( new String( data.filecontent ) ).getBytes(); } break; case LoadFileInputField.TYPE_TRIM_BOTH: if ( meta.getEncoding() != null ) { data.filecontent = Const.trim( new String( data.filecontent, meta.getEncoding() ) ).getBytes(); } else { data.filecontent = Const.trim( new String( data.filecontent ) ).getBytes(); } break; default: break; } if ( targetValueMeta.getType() != ValueMetaInterface.TYPE_BINARY ) { // handle as a String if ( meta.getEncoding() != null ) { o = new String( data.filecontent, meta.getEncoding() ); } else { o = new String( data.filecontent ); } } else { // save as byte[] without any conversion o = data.filecontent; } break; case LoadFileInputField.ELEMENT_TYPE_FILESIZE: o = String.valueOf( data.fileSize ); break; default: break; } if ( targetValueMeta.getType() == ValueMetaInterface.TYPE_BINARY ) { // save as byte[] without any conversion outputRowData[indexField] = o; } else { // convert string (processing type) to the target type outputRowData[indexField] = targetValueMeta.convertData( sourceValueMeta, o ); } // Do we need to repeat this field if it is null? if ( loadFileInputField.isRepeated() ) { if ( data.previousRow != null && o == null ) { outputRowData[indexField] = data.previousRow[indexField]; } } } // End of loop over fields... int rowIndex = data.totalpreviousfields + data.nrInputFields; // See if we need to add the filename to the row... if ( meta.includeFilename() && meta.getFilenameField() != null && meta.getFilenameField().length() > 0 ) { outputRowData[rowIndex++] = data.filename; } // See if we need to add the row number to the row... if ( meta.includeRowNumber() && meta.getRowNumberField() != null && meta.getRowNumberField().length() > 0 ) { outputRowData[rowIndex++] = new Long( data.rownr ); } // Possibly add short filename... if ( meta.getShortFileNameField() != null && meta.getShortFileNameField().length() > 0 ) { outputRowData[rowIndex++] = data.shortFilename; } // Add Extension if ( meta.getExtensionField() != null && meta.getExtensionField().length() > 0 ) { outputRowData[rowIndex++] = data.extension; } // add path if ( meta.getPathField() != null && meta.getPathField().length() > 0 ) { outputRowData[rowIndex++] = data.path; } // add Hidden if ( meta.isHiddenField() != null && meta.isHiddenField().length() > 0 ) { outputRowData[rowIndex++] = new Boolean( data.hidden ); } // Add modification date if ( meta.getLastModificationDateField() != null && meta.getLastModificationDateField().length() > 0 ) { outputRowData[rowIndex++] = data.lastModificationDateTime; } // Add Uri if ( meta.getUriField() != null && meta.getUriField().length() > 0 ) { outputRowData[rowIndex++] = data.uriName; } // Add RootUri if ( meta.getRootUriField() != null && meta.getRootUriField().length() > 0 ) { outputRowData[rowIndex++] = data.rootUriName; } RowMetaInterface irow = getInputRowMeta(); data.previousRow = irow == null ? outputRowData : irow.cloneRow( outputRowData ); // copy it to make // surely the next step doesn't change it in between... incrementLinesInput(); data.rownr++; } catch ( Exception e ) { throw new KettleException( "Error during processing a row", e ); } return outputRowData; }
@Test public void testByteArray() throws Exception { RowMetaInterface mockedRowMetaInterface = mock( RowMetaInterface.class ); stepLoadFileInput.data.outputRowMeta = mockedRowMetaInterface; stepLoadFileInput.data.convertRowMeta = mockedRowMetaInterface; Mockito.doReturn( new ValueMetaString() ).when( mockedRowMetaInterface ).getValueMeta( anyInt() ); // byte array Mockito.doReturn( new ValueMetaBinary() ).when( mockedRowMetaInterface ).getValueMeta( anyInt() ); ( (LoadFileInputMeta) runtimeSMI ).setEncoding( "UTF-8" ); stepInputFiles.addFile( getFile( "pentaho_splash.png" ) ); inputField = new LoadFileInputField(); inputField.setType( ValueMetaInterface.TYPE_BINARY ); ( (LoadFileInputMeta) runtimeSMI ).setInputFields( new LoadFileInputField[]{ inputField } ); assertNotNull( stepLoadFileInput.getOneRow() ); assertArrayEquals( IOUtils.toByteArray( getFile( "pentaho_splash.png" ).getContent().getInputStream() ), stepLoadFileInput.data.filecontent ); }
public static <T> T retryUntilTimeout(Callable<T> callable, Supplier<String> description, Duration timeoutDuration, long retryBackoffMs) throws Exception { return retryUntilTimeout(callable, description, timeoutDuration, retryBackoffMs, Time.SYSTEM); }
@Test public void testInvalidTimeDuration() throws Exception { Mockito.when(mockCallable.call()).thenReturn("success"); assertEquals("success", RetryUtil.retryUntilTimeout(mockCallable, testMsg, null, 10, mockTime)); assertEquals("success", RetryUtil.retryUntilTimeout(mockCallable, testMsg, Duration.ofMillis(-1), 10, mockTime)); Mockito.verify(mockCallable, Mockito.times(2)).call(); }
public FileInputStream openInputStream(File file) { try { return openInputStreamOrThrowIOE(file); } catch (IOException e) { throw new IllegalStateException("Can not open file " + file, e); } }
@Test public void openInputStream_throws_ISE_if_file_is_a_directory() throws Exception { File dir = temp.newFolder(); assertThatThrownBy(() -> underTest.openInputStream(dir)) .isInstanceOf(IllegalStateException.class) .hasMessage("Can not open file " + dir) .hasRootCauseMessage("File " + dir + " exists but is a directory"); }
public static PTransformMatcher writeWithRunnerDeterminedSharding() { return application -> { if (PTransformTranslation.WRITE_FILES_TRANSFORM_URN.equals( PTransformTranslation.urnForTransformOrNull(application.getTransform()))) { try { return WriteFilesTranslation.isRunnerDeterminedSharding((AppliedPTransform) application); } catch (IOException exc) { throw new RuntimeException( String.format( "Transform with URN %s failed to parse: %s", PTransformTranslation.WRITE_FILES_TRANSFORM_URN, application.getTransform()), exc); } } return false; }; }
@Test public void writeWithRunnerDeterminedSharding() { ResourceId outputDirectory = LocalResources.fromString("/foo/bar", true /* isDirectory */); FilenamePolicy policy = DefaultFilenamePolicy.fromStandardParameters( StaticValueProvider.of(outputDirectory), DefaultFilenamePolicy.DEFAULT_UNWINDOWED_SHARD_TEMPLATE, "", false); WriteFiles<Integer, Void, Integer> write = WriteFiles.to( new FileBasedSink<Integer, Void, Integer>( StaticValueProvider.of(outputDirectory), DynamicFileDestinations.constant(policy)) { @Override public WriteOperation<Void, Integer> createWriteOperation() { return null; } }); assertThat( PTransformMatchers.writeWithRunnerDeterminedSharding().matches(appliedWrite(write)), is(true)); WriteFiles<Integer, Void, Integer> withStaticSharding = write.withNumShards(3); assertThat( PTransformMatchers.writeWithRunnerDeterminedSharding() .matches(appliedWrite(withStaticSharding)), is(false)); WriteFiles<Integer, Void, Integer> withCustomSharding = write.withSharding(Sum.integersGlobally().asSingletonView()); assertThat( PTransformMatchers.writeWithRunnerDeterminedSharding() .matches(appliedWrite(withCustomSharding)), is(false)); }
static String parseAccessToken(String responseBody) throws IOException { ObjectMapper mapper = new ObjectMapper(); JsonNode rootNode = mapper.readTree(responseBody); JsonNode accessTokenNode = rootNode.at("/access_token"); if (accessTokenNode == null) { // Only grab the first N characters so that if the response body is huge, we don't // blow up. String snippet = responseBody; if (snippet.length() > MAX_RESPONSE_BODY_LENGTH) { int actualLength = responseBody.length(); String s = responseBody.substring(0, MAX_RESPONSE_BODY_LENGTH); snippet = String.format("%s (trimmed to first %d characters out of %d total)", s, MAX_RESPONSE_BODY_LENGTH, actualLength); } throw new IOException(String.format("The token endpoint response did not contain an access_token value. Response: (%s)", snippet)); } return sanitizeString("the token endpoint response's access_token JSON attribute", accessTokenNode.textValue()); }
@Test public void testParseAccessTokenMissingAccessToken() { ObjectMapper mapper = new ObjectMapper(); ObjectNode node = mapper.createObjectNode(); node.put("sub", "jdoe"); assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.parseAccessToken(mapper.writeValueAsString(node))); }
@Override public Num getValue(int index) { return values.get(index); }
@Test public void cashFlowShortSell() { BarSeries sampleBarSeries = new MockBarSeries(numFunction, 1, 2, 4, 8, 16, 32); TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, sampleBarSeries), Trade.sellAt(2, sampleBarSeries), Trade.sellAt(2, sampleBarSeries), Trade.buyAt(4, sampleBarSeries), Trade.buyAt(4, sampleBarSeries), Trade.sellAt(5, sampleBarSeries)); CashFlow cashFlow = new CashFlow(sampleBarSeries, tradingRecord); assertNumEquals(1, cashFlow.getValue(0)); assertNumEquals(2, cashFlow.getValue(1)); assertNumEquals(4, cashFlow.getValue(2)); assertNumEquals(0, cashFlow.getValue(3)); assertNumEquals(-8, cashFlow.getValue(4)); assertNumEquals(-8, cashFlow.getValue(5)); }
public static Query convertToMultiTermSpanQuery(Query query) { if (!(query instanceof BooleanQuery)) { return query; } LOGGER.debug("Perform rewriting for the phrase query {}.", query); ArrayList<SpanQuery> spanQueryLst = new ArrayList<>(); boolean prefixOrSuffixQueryFound = false; for (BooleanClause clause : ((BooleanQuery) query).clauses()) { Query q = clause.getQuery(); if (q instanceof WildcardQuery || q instanceof PrefixQuery) { prefixOrSuffixQueryFound = true; spanQueryLst.add(new SpanMultiTermQueryWrapper<>((AutomatonQuery) q)); } else if (q instanceof TermQuery) { spanQueryLst.add(new SpanTermQuery(((TermQuery) q).getTerm())); } else { LOGGER.info("query can not be handled currently {} ", q); return query; } } if (!prefixOrSuffixQueryFound) { return query; } SpanNearQuery spanNearQuery = new SpanNearQuery(spanQueryLst.toArray(new SpanQuery[0]), 0, true); LOGGER.debug("The phrase query {} is re-written as {}", query, spanNearQuery); return spanNearQuery; }
@Test public void testBooleanQueryRewrittenToSpanQuery() { // Test 1: The input is a boolean query with 2 clauses: "*pache pino*" BooleanQuery.Builder builder = new BooleanQuery.Builder(); WildcardQuery wildcardQuery = new WildcardQuery(new Term("field", "*apche")); PrefixQuery prefixQuery = new PrefixQuery(new Term("field", "pino")); builder.add(new BooleanClause(wildcardQuery, BooleanClause.Occur.SHOULD)) .add(new BooleanClause(prefixQuery, BooleanClause.Occur.SHOULD)); SpanQuery[] spanQueries1 = {new SpanMultiTermQueryWrapper<>(wildcardQuery), new SpanMultiTermQueryWrapper<>(prefixQuery)}; SpanQuery expectedQuery = new SpanNearQuery(spanQueries1, 0, true); Assert.assertEquals(expectedQuery, LuceneTextIndexUtils.convertToMultiTermSpanQuery(builder.build())); // Test 2: The input is a boolean query with 3 clauses: "*pache real pino*" builder = new BooleanQuery.Builder(); Term term = new Term("field", "real"); builder.add(new BooleanClause(wildcardQuery, BooleanClause.Occur.SHOULD)) .add(new BooleanClause(new TermQuery(term), BooleanClause.Occur.SHOULD)) .add(new BooleanClause(prefixQuery, BooleanClause.Occur.SHOULD)); SpanQuery[] spanQueries2 = {new SpanMultiTermQueryWrapper<>(wildcardQuery), new SpanTermQuery(term), new SpanMultiTermQueryWrapper<>( prefixQuery)}; expectedQuery = new SpanNearQuery(spanQueries2, 0, true); Assert.assertEquals(expectedQuery, LuceneTextIndexUtils.convertToMultiTermSpanQuery(builder.build())); // Test 3: The input is a boolean query with 3 clauses: "*pache real* pino*" builder = new BooleanQuery.Builder(); builder.add(new BooleanClause(wildcardQuery, BooleanClause.Occur.SHOULD)) .add(new BooleanClause(prefixQuery, BooleanClause.Occur.SHOULD)) .add(new BooleanClause(prefixQuery, BooleanClause.Occur.SHOULD)); SpanQuery[] spanQueries3 = {new SpanMultiTermQueryWrapper<>(wildcardQuery), new SpanMultiTermQueryWrapper<>( prefixQuery), new SpanMultiTermQueryWrapper<>(prefixQuery)}; expectedQuery = new SpanNearQuery(spanQueries3, 0, true); Assert.assertEquals(expectedQuery, LuceneTextIndexUtils.convertToMultiTermSpanQuery(builder.build())); // Test 4: The input is a boolean query with 1 clause: "*pino*". WildcardQuery wildcardQuery1 = new WildcardQuery(new Term("field", "*pino*")); builder = new BooleanQuery.Builder(); builder.add(new BooleanClause(wildcardQuery1, BooleanClause.Occur.SHOULD)); SpanQuery[] spanQueries4 = {new SpanMultiTermQueryWrapper<>(wildcardQuery1)}; expectedQuery = new SpanNearQuery(spanQueries4, 0, true); Assert.assertEquals(expectedQuery, LuceneTextIndexUtils.convertToMultiTermSpanQuery(builder.build())); // Test 5: Boolean queries without any wildcard/prefix subqueries are left unchanged. builder = new BooleanQuery.Builder(); builder.add(new BooleanClause(new TermQuery(term), BooleanClause.Occur.SHOULD)) .add(new BooleanClause(new TermQuery(term), BooleanClause.Occur.SHOULD)); BooleanQuery q = builder.build(); Assert.assertEquals(q, LuceneTextIndexUtils.convertToMultiTermSpanQuery(q)); }
@Override protected Result[] run(String value) { final Map<String, Object> extractedJson; try { extractedJson = extractJson(value); } catch (IOException e) { throw new ExtractorException(e); } final List<Result> results = new ArrayList<>(extractedJson.size()); for (Map.Entry<String, Object> entry : extractedJson.entrySet()) { results.add(new Result(entry.getValue(), entry.getKey(), -1, -1)); } return results.toArray(new Result[results.size()]); }
@Test public void testRunWithObject() throws Exception { final String value = "{\"object\": {\"text\": \"foobar\", \"number\": 1234.5678, \"bool\": true, \"nested\": {\"text\": \"foobar\"}}}"; final Extractor.Result[] results = jsonExtractor.run(value); assertThat(results).contains( new Extractor.Result("foobar", "object_text", -1, -1), new Extractor.Result(1234.5678, "object_number", -1, -1), new Extractor.Result(true, "object_bool", -1, -1), new Extractor.Result("foobar", "object_nested_text", -1, -1) ); }
static void populateOutputFields(final PMML4Result toUpdate, final ProcessingDTO processingDTO) { logger.debug("populateOutputFields {} {}", toUpdate, processingDTO); for (KiePMMLOutputField outputField : processingDTO.getOutputFields()) { Object variableValue = outputField.evaluate(processingDTO); if (variableValue != null) { String variableName = outputField.getName(); toUpdate.addResultVariable(variableName, variableValue); processingDTO.addKiePMMLNameValue(new KiePMMLNameValue(variableName, variableValue)); } } }
@Test void populateTransformedOutputFieldWithConstant() { KiePMMLConstant kiePMMLConstant = new KiePMMLConstant("NAME", Collections.emptyList(), "String", null); KiePMMLOutputField outputField = KiePMMLOutputField.builder(OUTPUT_NAME, Collections.emptyList()) .withResultFeature(RESULT_FEATURE.TRANSFORMED_VALUE) .withKiePMMLExpression(kiePMMLConstant) .build(); KiePMMLTestingModel kiePMMLModel = testingModelBuilder(outputField).build(); ProcessingDTO processingDTO = buildProcessingDTOWithEmptyNameValues(kiePMMLModel); PMML4Result toUpdate = new PMML4Result(); PostProcess.populateOutputFields(toUpdate, processingDTO); assertThat(toUpdate.getResultVariables()).isNotEmpty(); assertThat(toUpdate.getResultVariables()).containsKey(OUTPUT_NAME); assertThat(toUpdate.getResultVariables().get(OUTPUT_NAME)).isEqualTo(kiePMMLConstant.getValue()); }
@Override public Collection<String> getJdbcUrlPrefixes() { return Arrays.asList("jdbc:mysql:", "jdbc:mysqlx:"); }
@Test void assertGetJdbcUrlPrefixes() { assertThat(TypedSPILoader.getService(DatabaseType.class, "MySQL").getJdbcUrlPrefixes(), is(Arrays.asList("jdbc:mysql:", "jdbc:mysqlx:"))); }
@Override public ObjectNode encode(Intent intent, CodecContext context) { checkNotNull(intent, "Intent cannot be null"); final ObjectNode result = context.mapper().createObjectNode() .put(TYPE, intent.getClass().getSimpleName()) .put(ID, intent.id().toString()) .put(KEY, intent.key().toString()) .put(APP_ID, UrlEscapers.urlPathSegmentEscaper() .escape(intent.appId().name())); if (intent.resourceGroup() != null) { result.put(RESOURCE_GROUP, intent.resourceGroup().toString()); } final ArrayNode jsonResources = result.putArray(RESOURCES); intent.resources() .forEach(resource -> { if (resource instanceof Link) { jsonResources.add(context.codec(Link.class).encode((Link) resource, context)); } else { jsonResources.add(resource.toString()); } }); IntentService service = context.getService(IntentService.class); IntentState state = service.getIntentState(intent.key()); if (state != null) { result.put(STATE, state.toString()); } return result; }
@Test public void pointToPointIntent() { ConnectPoint ingress = NetTestTools.connectPoint("ingress", 1); ConnectPoint egress = NetTestTools.connectPoint("egress", 2); final PointToPointIntent intent = PointToPointIntent.builder() .appId(appId) .selector(emptySelector) .treatment(emptyTreatment) .filteredIngressPoint(new FilteredConnectPoint(ingress)) .filteredEgressPoint(new FilteredConnectPoint(egress)) .build(); final JsonCodec<PointToPointIntent> intentCodec = context.codec(PointToPointIntent.class); assertThat(intentCodec, notNullValue()); final ObjectNode intentJson = intentCodec.encode(intent, context); assertThat(intentJson, matchesIntent(intent)); }
@Override @SuppressWarnings("nullness") public boolean write(String tableName, List<Map<String, Object>> rows) throws JDBCResourceManagerException { if (rows.size() == 0) { return false; } LOG.info("Attempting to write {} rows to {}.{}.", rows.size(), databaseName, tableName); try (Connection con = driver.getConnection(getUri(), username, password)) { Statement stmt = con.createStatement(); for (Map<String, Object> row : rows) { List<String> columns = new ArrayList<>(row.keySet()); StringBuilder sql = new StringBuilder("INSERT INTO ") .append(tableName) .append("(") .append(String.join(",", columns)) .append(") VALUES ("); List<String> valueList = new ArrayList<>(); for (String colName : columns) { Object value = row.get(colName); if (value == null) { valueList.add(null); } else if (NumberUtils.isCreatable(value.toString()) || "true".equalsIgnoreCase(value.toString()) || "false".equalsIgnoreCase(value.toString()) || value.toString().startsWith("ARRAY[")) { valueList.add(String.valueOf(value)); } else { valueList.add("'" + value + "'"); } } sql.append(String.join(",", valueList)).append(")"); try { LOG.info("Running SQL statement: " + sql); stmt.executeUpdate(sql.toString()); } catch (SQLException e) { throw new JDBCResourceManagerException( "Failed to insert values into table with SQL statement: " + sql, e); } } stmt.close(); } catch (SQLException e) { throw new JDBCResourceManagerException( String.format("Exception occurred when trying to write records to %s.", tableName), e); } LOG.info("Successfully wrote {} rows to {}.{}.", rows.size(), databaseName, tableName); return true; }
@Test public void testWriteShouldReturnTrueIfJDBCDoesNotThrowAnyError() throws SQLException { when(container.getHost()).thenReturn(HOST); when(container.getMappedPort(JDBC_PORT)).thenReturn(MAPPED_PORT); assertTrue(testManager.write(TABLE_NAME, ImmutableList.of(ImmutableMap.of("key", "test")))); verify(driver.getConnection(any(), any(), any()).createStatement()).executeUpdate(anyString()); }
@Override public List<JreInfoRestResponse> getJresMetadata(String os, String arch) { return jresHandler.getJresMetadata(os, arch); }
@Test void getJres_shoudlReturnEmptyJsonArray_whenNoResults() throws Exception { when(jresHandler.getJresMetadata(null, null)).thenReturn(List.of()); mockMvc.perform(get(JRE_ENDPOINT)) .andExpect(status().isOk()) .andExpect(content().json("[]")); }
@Override public long ack(String groupName, StreamMessageId... id) { return get(ackAsync(groupName, id)); }
@Test public void testAck() { RStream<String, String> stream = redisson.getStream("test"); stream.add(StreamAddArgs.entry("0", "0")); stream.createGroup(StreamCreateGroupArgs.name("testGroup")); StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1")); StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2")); Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered()); assertThat(s.size()).isEqualTo(2); assertThat(stream.ack("testGroup", id1, id2)).isEqualTo(2); }
@Override public SchemaAndValue get(final ProcessingLogConfig config) { final Struct struct = new Struct(ProcessingLogMessageSchema.PROCESSING_LOG_SCHEMA) .put(ProcessingLogMessageSchema.TYPE, MessageType.SERIALIZATION_ERROR.getTypeId()) .put(ProcessingLogMessageSchema.SERIALIZATION_ERROR, serializationError(config)); return new SchemaAndValue(ProcessingLogMessageSchema.PROCESSING_LOG_SCHEMA, struct); }
@Test public void shouldBuildSerializationErrorWithNullRecordIfIncludeRowFalse() { // Given: final ProcessingLogConfig config = new ProcessingLogConfig( Collections.singletonMap(ProcessingLogConfig.INCLUDE_ROWS, false) ); final SerializationError<GenericRow> serError = new SerializationError<>( ERROR, Optional.of(RECORD), TOPIC, false ); // When: final SchemaAndValue msg = serError.get(config); // Then: final Struct struct = (Struct) msg.value(); assertThat( struct.get(ProcessingLogMessageSchema.TYPE), equalTo(ProcessingLogMessageSchema.MessageType.SERIALIZATION_ERROR.getTypeId())); final Struct serializationError = struct.getStruct(SERIALIZATION_ERROR); assertThat(serializationError.get(SERIALIZATION_ERROR_FIELD_RECORD), is(nullValue())); }
public void startAsync() { try { udfLoader.load(); ProcessingLogServerUtils.maybeCreateProcessingLogTopic( serviceContext.getTopicClient(), processingLogConfig, ksqlConfig); if (processingLogConfig.getBoolean(ProcessingLogConfig.STREAM_AUTO_CREATE)) { log.warn("processing log auto-create is enabled, but this is not supported " + "for headless mode."); } rocksDBConfigSetterHandler.accept(ksqlConfig); processesQueryFile(readQueriesFile(queriesFile)); showWelcomeMessage(); final Properties properties = new Properties(); ksqlConfig.originals().forEach((key, value) -> { if (nonNull(value)) { properties.put(key, value.toString()); } }); versionChecker.start(KsqlModuleType.SERVER, properties); } catch (final Exception e) { log.error("Failed to start KSQL Server with query file: " + queriesFile, e); throw e; } }
@Test public void shouldOnlyPrepareNextStatementOncePreviousStatementHasBeenExecuted() { // Given: when(ksqlEngine.parse(any())).thenReturn( ImmutableList.of(PARSED_STMT_0, PARSED_STMT_1)); // When: standaloneExecutor.startAsync(); // Then: final InOrder inOrder = inOrder(ksqlEngine); inOrder.verify(ksqlEngine).prepare(PARSED_STMT_0); inOrder.verify(ksqlEngine).execute(serviceContext, CFG_STMT_0); inOrder.verify(ksqlEngine).prepare(PARSED_STMT_1); inOrder.verify(ksqlEngine).execute(serviceContext, CFG_STMT_1); }
public static <T> Mono<Long> writeAll(Writer writer, Flux<T> values) throws IOException { return writeAll(DEFAULT_OBJECT_MAPPER, writer, values); }
@Test void writeAll_fromMultiValuedSource() throws IOException { final Path outputTempFilePath = createTempFile(); final List<SimpleEntry> inputValues = List.of(new SimpleEntry(1, "value1"), new SimpleEntry(2, "value2"), new SimpleEntry(3, "value3")); final Long outputCount = FileSerde.writeAll(Files.newBufferedWriter(outputTempFilePath), Flux.fromIterable(inputValues)).block(); assertThat(outputCount, is(3L)); final List<String> outputLines = Files.readAllLines(outputTempFilePath); assertThat(outputLines, hasSize(3)); assertThat(outputLines.getFirst(), equalTo("{id:1,value:\"value1\"}")); assertThat(outputLines.get(1), equalTo("{id:2,value:\"value2\"}")); assertThat(outputLines.get(2), equalTo("{id:3,value:\"value3\"}")); }
@Override public final Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException { try { synchronized (getClassLoadingLock(name)) { return loadClassWithoutExceptionHandling(name, resolve); } } catch (Throwable classLoadingException) { classLoadingExceptionHandler.accept(classLoadingException); throw classLoadingException; } }
@Test void testExceptionHandling() { RuntimeException expectedException = new RuntimeException("Expected exception"); AtomicReference<Throwable> handledException = new AtomicReference<>(); assertThatThrownBy( () -> { try (FlinkUserCodeClassLoader classLoaderWithErrorHandler = new ThrowingURLClassLoader( handledException::set, expectedException)) { classLoaderWithErrorHandler.loadClass("dummy.class"); } }) .isSameAs(expectedException); assertThat(handledException.get()).isSameAs(expectedException); }
@Override public long count() { return totalCount(UserImpl.class); }
@Test @MongoDBFixtures("UserServiceImplTest.json") public void testCount() throws Exception { assertThat(userService.count()).isEqualTo(5L); }
public CreateTableBuilder addColumn(ColumnDef columnDef) { columnDefs.add(requireNonNull(columnDef, "column def can't be null")); return this; }
@Test public void addColumn_throws_NPE_if_ColumnDef_is_null() { assertThatThrownBy(() -> underTest.addColumn(null)) .isInstanceOf(NullPointerException.class) .hasMessageContaining("column def can't be null"); }
public static boolean validateCSConfiguration( final Configuration oldConfParam, final Configuration newConf, final RMContext rmContext) throws IOException { // ensure that the oldConf is deep copied Configuration oldConf = new Configuration(oldConfParam); QueueMetrics.setConfigurationValidation(oldConf, true); QueueMetrics.setConfigurationValidation(newConf, true); CapacityScheduler liveScheduler = (CapacityScheduler) rmContext.getScheduler(); CapacityScheduler newCs = new CapacityScheduler(); try { //TODO: extract all the validation steps and replace reinitialize with //the specific validation steps newCs.setConf(oldConf); newCs.setRMContext(rmContext); newCs.init(oldConf); newCs.addNodes(liveScheduler.getAllNodes()); newCs.reinitialize(newConf, rmContext, true); return true; } finally { newCs.stop(); } }
@Test public void testValidateCSConfigDefaultRCAbsoluteModeParentMaxMemoryExceeded() throws Exception { setUpMockRM(false); RMContext rmContext = mockRM.getRMContext(); CapacitySchedulerConfiguration oldConfiguration = cs.getConfiguration(); CapacitySchedulerConfiguration newConfiguration = new CapacitySchedulerConfiguration(cs.getConfiguration()); newConfiguration.setMaximumResourceRequirement("", LEAF_A_FULL_PATH, FULL_MAXRES); try { CapacitySchedulerConfigValidator .validateCSConfiguration(oldConfiguration, newConfiguration, rmContext); fail("Parent maximum capacity exceeded"); } catch (IOException e) { Assert.assertTrue(e.getCause().getMessage() .startsWith("Max resource configuration")); } finally { mockRM.stop(); } }
@Override public String key() { return PropertyType.INTEGER.name(); }
@Test public void key() { assertThat(validation.key()).isEqualTo("INTEGER"); }
@Override public boolean checkCredentials(String username, String password) { if (username == null || password == null) { return false; } Credentials credentials = new Credentials(username, password); if (validCredentialsCache.contains(credentials)) { return true; } else if (invalidCredentialsCache.contains(credentials)) { return false; } boolean isValid = this.username.equals(username) && this.passwordHash.equals( generatePasswordHash( algorithm, salt, iterations, keyLength, password)); if (isValid) { validCredentialsCache.add(credentials); } else { invalidCredentialsCache.add(credentials); } return isValid; }
@Test public void testPBKDF2WithHmacSHA256_lowerCase() throws Exception { String algorithm = "PBKDF2WithHmacSHA256"; int iterations = 1000; int keyLength = 128; String hash = "B6:9C:5C:8A:10:3E:41:7B:BA:18:FC:E1:F2:0C:BC:D9:65:70:D3:53:AB:97:EE:2F:3F:A8:88:AF:43:EA:E6:D7:FB" + ":70:14:23:F9:51:29:5C:3A:9F:65:C3:20:EE:09:C9:C6:8A:B7:D3:0A:E1:F3:10:2B:9B:36:3F:1F:B6:1D:52:A7" + ":9C:CB:AD:55:25:46:C5:73:09:6C:38:9C:F2:FD:82:7F:90:E5:31:EF:7E:3E:6B:B2:0C:38:77:23:EC:3A:CF:29" + ":F7:E5:4D:4E:CC:35:7A:C2:E5:CB:E3:B3:E5:09:2B:CC:B9:40:26:A4:28:E9:5F:2D:18:B2:14:41:E7:4D:5B"; hash = hash.toLowerCase(); PBKDF2Authenticator PBKDF2Authenticator = new PBKDF2Authenticator( "/", VALID_USERNAME, hash, algorithm, SALT, iterations, keyLength); for (String username : TEST_USERNAMES) { for (String password : TEST_PASSWORDS) { boolean expectedIsAuthenticated = VALID_USERNAME.equals(username) && VALID_PASSWORD.equals(password); boolean actualIsAuthenticated = PBKDF2Authenticator.checkCredentials(username, password); assertEquals(expectedIsAuthenticated, actualIsAuthenticated); } } }
public String substring(final int beginIndex) { split(); final int beginChar = splitted.get(beginIndex); return input.substring(beginChar); }
@Test public void testSubstringCPs() { final UnicodeHelper lh = new UnicodeHelper("a", Method.CODEPOINTS); assertEquals("a", lh.substring(0)); final UnicodeHelper lh2 = new UnicodeHelper(new String(Character.toChars(0x1f600)), Method.CODEPOINTS); assertEquals(new String(Character.toChars(0x1f600)), lh2.substring(0)); final UnicodeHelper lh3 = new UnicodeHelper(UCSTR, Method.CODEPOINTS); assertEquals(UCSTR, lh3.substring(0)); final UnicodeHelper lh4 = new UnicodeHelper("a" + UCSTR + "A", Method.CODEPOINTS); assertEquals(UCSTR + "A", lh4.substring(1)); assertEquals(new String(Character.toChars(0x1f3ff)) + "\u200d\u2642\ufe0fA", lh4.substring(2)); final UnicodeHelper lh5 = new UnicodeHelper("k\u035fh", Method.CODEPOINTS); assertEquals("\u035fh", lh5.substring(1)); }
static byte[] readPrivateKey(Path path) throws KeyException { final byte[] bytes; try { bytes = Files.readAllBytes(path); } catch (IOException e) { throw new KeyException("Couldn't read private key from file: " + path, e); } final String content = new String(bytes, StandardCharsets.US_ASCII); final Matcher m = KEY_PATTERN.matcher(content); if (!m.find()) { throw new KeyException("No private key found in file: " + path); } final String s = CharMatcher.breakingWhitespace().removeFrom(m.group(1)); byte[] base64 = s.getBytes(StandardCharsets.US_ASCII); return Base64.getDecoder().decode(base64); }
@Test public void readPrivateKeyHandlesPrivateKey() throws Exception { final URL url = Resources.getResource("org/graylog2/shared/security/tls/private.key"); final byte[] privateKey = PemReader.readPrivateKey(Paths.get(url.toURI())); assertThat(privateKey).isNotEmpty(); }
public static String getAppName() { String appName; appName = getAppNameByProjectName(); if (appName != null) { return appName; } appName = getAppNameByServerHome(); if (appName != null) { return appName; } return DEFAULT_APP_NAME; }
@Test void testGetAppNameByServerTypeForTomcat() { System.setProperty("catalina.base", "/home/admin/testAppName/"); String appName = AppNameUtils.getAppName(); assertEquals("testAppName", appName); }
@Override public boolean pushIfNotExists(String queueName, String id, long offsetTimeInSecond) { return pushIfNotExists(queueName, id, 0, offsetTimeInSecond); }
@Test public void testPushIfNotExists() { String queueName = "test-queue"; String id = "abcd-1234-defg-5678"; assertTrue(queueDao.pushIfNotExists(queueName, id, 123)); assertEquals(1, internalQueue.size()); assertTrue(internalQueue.containsKey(queueName)); assertEquals(1, internalQueue.get(queueName).size()); assertEquals(id, internalQueue.get(queueName).peek()); assertFalse(queueDao.pushIfNotExists(queueName, id, 123)); assertEquals(1, internalQueue.size()); assertTrue(internalQueue.containsKey(queueName)); assertEquals(1, internalQueue.get(queueName).size()); assertEquals(id, internalQueue.get(queueName).peek()); }
public static LoadMetadataPOptions loadMetadataDefaults(AlluxioConfiguration conf) { return LoadMetadataPOptions.newBuilder() .setCommonOptions(commonDefaults(conf)) .setCreateAncestors(false) .setLoadDescendantType(LoadDescendantPType.NONE) .setRecursive(false) .build(); }
@Test public void loadMetadataOptionsDefaults() { LoadMetadataPOptions options = FileSystemOptionsUtils.loadMetadataDefaults(mConf); assertNotNull(options); assertFalse(options.getCreateAncestors()); assertFalse(options.getRecursive()); assertEquals(options.getLoadDescendantType(), LoadDescendantPType.NONE); }
@Override public DeterministicKeyChain toEncrypted(CharSequence password) { Objects.requireNonNull(password); checkArgument(password.length() > 0); checkState(seed != null, () -> "attempt to encrypt a watching chain"); checkState(!seed.isEncrypted()); KeyCrypter scrypt = new KeyCrypterScrypt(); AesKey derivedKey = scrypt.deriveKey(password); return toEncrypted(scrypt, derivedKey); }
@Test(expected = IllegalStateException.class) public void encryptTwice() { chain = chain.toEncrypted("once"); chain = chain.toEncrypted("twice"); }
@Override public void execute(String commandName, BufferedReader reader, BufferedWriter writer) throws Py4JException, IOException { String fqn = reader.readLine(); List<Object> arguments = getArguments(reader); ReturnObject returnObject = invokeConstructor(fqn, arguments); String returnCommand = Protocol.getOutputCommand(returnObject); logger.finest("Returning command: " + returnCommand); writer.write(returnCommand); writer.flush(); }
@Test public void testConstructor0Arg() { String inputCommand = "py4j.examples.ExampleClass\ne\n"; try { command.execute("i", new BufferedReader(new StringReader(inputCommand)), writer); assertEquals("!yro0\n", sWriter.toString()); } catch (Exception e) { e.printStackTrace(); fail(); } }
@Override public Collection<LocalDataQueryResultRow> getRows(final ShowLogicalTablesStatement sqlStatement, final ContextManager contextManager) { DialectDatabaseMetaData dialectDatabaseMetaData = new DatabaseTypeRegistry(database.getProtocolType()).getDialectDatabaseMetaData(); String schemaName = dialectDatabaseMetaData.getDefaultSchema().orElse(database.getName()); if (null == database.getSchema(schemaName)) { return Collections.emptyList(); } return getTables(schemaName, sqlStatement).stream().map(each -> getRow(each, sqlStatement)).collect(Collectors.toList()); }
@Test void assertGetRowData() { Collection<LocalDataQueryResultRow> actual = executor.getRows(mock(ShowLogicalTablesStatement.class), mock(ContextManager.class)); assertThat(actual.size(), is(2)); Iterator<LocalDataQueryResultRow> iterator = actual.iterator(); LocalDataQueryResultRow row = iterator.next(); assertThat(row.getCell(1), is("t_order")); row = iterator.next(); assertThat(row.getCell(1), is("t_order_item")); }
public static MetadataUpdate fromJson(String json) { return JsonUtil.parse(json, MetadataUpdateParser::fromJson); }
@Test public void testSetSnapshotRefTagFromJsonAllFields_NullValuesMissing() { String action = MetadataUpdateParser.SET_SNAPSHOT_REF; long snapshotId = 1L; SnapshotRefType type = SnapshotRefType.TAG; String refName = "hank"; Integer minSnapshotsToKeep = null; Long maxSnapshotAgeMs = null; Long maxRefAgeMs = 1L; String json = "{\"action\":\"set-snapshot-ref\",\"ref-name\":\"hank\"," + "\"snapshot-id\":1,\"type\":\"tag\",\"max-ref-age-ms\":1}"; MetadataUpdate expected = new MetadataUpdate.SetSnapshotRef( refName, snapshotId, type, minSnapshotsToKeep, maxSnapshotAgeMs, maxRefAgeMs); assertEquals(action, expected, MetadataUpdateParser.fromJson(json)); }
@Override public String toString() { return "CountMinSketch{" + "eps=" + eps + ", confidence=" + confidence + ", depth=" + depth + ", width=" + width + ", size=" + size + '}'; }
@Test public void testToString() { double eps = 0.0001; double confidence = 0.99; int seed = 1; final CountMinSketch sketch = new CountMinSketch(eps, confidence, seed); assertEquals("CountMinSketch{" + "eps=" + eps + ", confidence=" + confidence + ", depth=" + 7 + ", width=" + 20000 + ", size=" + 0 + '}', sketch.toString()); sketch.add(12, 145); assertEquals("CountMinSketch{" + "eps=" + eps + ", confidence=" + confidence + ", depth=" + 7 + ", width=" + 20000 + ", size=" + 145 + '}', sketch.toString()); }
public static Gson instance() { return SingletonHolder.INSTANCE; }
@Test void serializesFiles() { File file = new File("/hello/world"); assertThatJson(Serialization.instance().toJson(file)) .isEqualTo(format("{\"path\":\"%s\"}", escapeJson(separatorsToSystem(file.getPath())))); }
public static void init(long intervalMs) { checkers.put(JobState.PENDING, new ExportChecker(JobState.PENDING, intervalMs)); checkers.put(JobState.EXPORTING, new ExportChecker(JobState.EXPORTING, intervalMs)); int poolSize = Config.export_running_job_num_limit == 0 ? 5 : Config.export_running_job_num_limit; LeaderTaskExecutor pendingTaskExecutor = new LeaderTaskExecutor("export_pending_job", poolSize, true); executors.put(JobState.PENDING, pendingTaskExecutor); LeaderTaskExecutor exportingTaskExecutor = new LeaderTaskExecutor("export_exporting_job", poolSize, true); executors.put(JobState.EXPORTING, exportingTaskExecutor); // One export job will be split into multiple exporting sub tasks, the queue size is not determined, so set Integer.MAX_VALUE. exportingSubTaskExecutor = new LeaderTaskExecutor("export_exporting_sub_task", Config.export_task_pool_size, Integer.MAX_VALUE, true); }
@Test public void testCheckBeStatus() throws NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException, NoSuchMethodException, InvocationTargetException { new MockUp<ExportJob>() { @Mock public synchronized void cancel(ExportFailMsg.CancelType type, String msg) throws UserException { } }; Backend be = new Backend(); new MockUp<SystemInfoService>() { @Mock public ComputeNode getBackendOrComputeNode(long backendId) { return be; } }; ExportChecker.init(1000L); Field field = ExportChecker.class.getDeclaredField("checkers"); field.setAccessible(true); Object obj = field.get(ExportChecker.class); Map<JobState, ExportChecker> map = (Map<JobState, ExportChecker>) obj; ExportChecker checker = map.get(JobState.EXPORTING); Method method = ExportChecker.class.getDeclaredMethod("checkJobNeedCancel", ExportJob.class); method.setAccessible(true); ExportJob job = new ExportJob(); job.setBeStartTime(1, 1000L); boolean cancelled = (boolean) method.invoke(checker, job); Assert.assertTrue(cancelled); be.setAlive(true); be.setDecommissioned(true); be.setLastStartTime(1001L); cancelled = (boolean) method.invoke(checker, job); Assert.assertTrue(cancelled); be.setLastStartTime(999L); cancelled = (boolean) method.invoke(checker, job); Assert.assertTrue(!cancelled); new MockUp<SystemInfoService>() { @Mock public ComputeNode getBackendOrComputeNode(long backendId) { return null; } }; cancelled = (boolean) method.invoke(checker, job); Assert.assertTrue(cancelled); }
public static Configuration loadConfiguration( String workingDirectory, Configuration dynamicParameters, Map<String, String> env) { final Configuration configuration = GlobalConfiguration.loadConfiguration(workingDirectory, dynamicParameters); final String keytabPrincipal = env.get(YarnConfigKeys.KEYTAB_PRINCIPAL); final String hostname = env.get(ApplicationConstants.Environment.NM_HOST.key()); Preconditions.checkState( hostname != null, "ApplicationMaster hostname variable %s not set", ApplicationConstants.Environment.NM_HOST.key()); configuration.set(JobManagerOptions.ADDRESS, hostname); configuration.set(RestOptions.ADDRESS, hostname); configuration.set(RestOptions.BIND_ADDRESS, hostname); // if a web monitor shall be started, set the port to random binding if (configuration.get(WebOptions.PORT, 0) >= 0) { configuration.set(WebOptions.PORT, 0); } if (!configuration.contains(RestOptions.BIND_PORT)) { // set the REST port to 0 to select it randomly configuration.set(RestOptions.BIND_PORT, "0"); } // if the user has set the deprecated YARN-specific config keys, we add the // corresponding generic config keys instead. that way, later code needs not // deal with deprecated config keys BootstrapTools.substituteDeprecatedConfigPrefix( configuration, ConfigConstants.YARN_APPLICATION_MASTER_ENV_PREFIX, ResourceManagerOptions.CONTAINERIZED_MASTER_ENV_PREFIX); BootstrapTools.substituteDeprecatedConfigPrefix( configuration, ConfigConstants.YARN_TASK_MANAGER_ENV_PREFIX, ResourceManagerOptions.CONTAINERIZED_TASK_MANAGER_ENV_PREFIX); final String keytabPath = Utils.resolveKeytabPath( workingDirectory, env.get(YarnConfigKeys.LOCAL_KEYTAB_PATH)); if (keytabPath != null && keytabPrincipal != null) { configuration.set(SecurityOptions.KERBEROS_LOGIN_KEYTAB, keytabPath); configuration.set(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL, keytabPrincipal); } final String localDirs = env.get(ApplicationConstants.Environment.LOCAL_DIRS.key()); BootstrapTools.updateTmpDirectoriesInConfiguration(configuration, localDirs); return configuration; }
@Test void testParsingValidKerberosEnv() throws IOException { final Configuration initialConfiguration = new Configuration(); Map<String, String> env = new HashMap<>(); File keytabFile = Files.createTempFile(tempBaseDir, UUID.randomUUID().toString(), "").toFile(); env.put(YarnConfigKeys.LOCAL_KEYTAB_PATH, keytabFile.getAbsolutePath()); env.put(YarnConfigKeys.KEYTAB_PRINCIPAL, "starlord"); Configuration configuration = loadConfiguration(initialConfiguration, env); assertThat(configuration.get(SecurityOptions.KERBEROS_LOGIN_KEYTAB)) .isEqualTo(keytabFile.getAbsolutePath()); assertThat(configuration.get(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL)) .isEqualTo("starlord"); }
@Override public synchronized boolean isEmpty() { return this.rootSet.isEmpty() && CollectionUtils.isEmpty(tailList); }
@Test void testIsEmpty() { List<String> list = Arrays.asList("A", "B", "C"); BitList<String> bitList = new BitList<>(list); bitList.add("D"); bitList.removeAll(list); Assertions.assertEquals(1, bitList.size()); bitList.remove("D"); Assertions.assertTrue(bitList.isEmpty()); }
public Map<String, String> values() { return new HashMap<>(this.data); }
@Test public void testScript_whenMetadataWithoutPropertiesValueNull_returnMetadataWithAllValue() { TbMsgMetaData tbMsgMetaData = new TbMsgMetaData(metadataExpected); Map<String, String> dataActual = tbMsgMetaData.values(); assertEquals(metadataExpected.size(), dataActual.size()); }
public boolean performClick() { boolean blockTouchEvent = doValidate(false) == ReturnState.STATE_ERROR; return blockTouchEvent; }
@Test public void testValidate() { AppCompatEditText textfield = new AppCompatEditText(context); WarnableTextInputLayout layout = new WarnableTextInputLayout(context, Robolectric.buildAttributeSet().build()); AppCompatButton button = new AppCompatButton(context); WarnableTextInputValidator.OnTextValidate validator = text -> ("Pass".equals(text)) ? new WarnableTextInputValidator.ReturnState( WarnableTextInputValidator.ReturnState.STATE_NORMAL, R.string.ok) : new WarnableTextInputValidator.ReturnState( WarnableTextInputValidator.ReturnState.STATE_ERROR, R.string.error); WarnableTextInputValidator target = new WarnableTextInputValidator( ApplicationProvider.getApplicationContext(), textfield, layout, button, validator); textfield.setText(""); target.performClick(); assertFalse(button.isEnabled()); assertEquals(context.getString(R.string.error), layout.getError()); textfield.setText("pass"); target.performClick(); assertFalse(button.isEnabled()); assertEquals(context.getString(R.string.error), layout.getError()); textfield.setText("Pass"); target.performClick(); assertTrue(button.isEnabled()); assertNull(layout.getError()); }
public static ParseResult parse(String text) { Map<String, String> localProperties = new HashMap<>(); String intpText = ""; String scriptText = null; Matcher matcher = REPL_PATTERN.matcher(text); if (matcher.find()) { String headingSpace = matcher.group(1); intpText = matcher.group(2); int startPos = headingSpace.length() + intpText.length() + 1; if (startPos < text.length() && text.charAt(startPos) == '(') { startPos = parseLocalProperties(text, startPos, localProperties); } scriptText = text.substring(startPos); } else { intpText = ""; scriptText = text; } return new ParseResult(intpText, removeLeadingWhiteSpaces(scriptText), localProperties); }
@Test void testJupyter() { ParagraphTextParser.ParseResult parseResult = ParagraphTextParser.parse("%jupyter(kernel=ir)"); assertEquals("jupyter", parseResult.getIntpText()); assertEquals(1, parseResult.getLocalProperties().size()); assertEquals("ir", parseResult.getLocalProperties().get("kernel")); assertEquals("", parseResult.getScriptText()); }
@Udf(description = "Returns a new string encoded using the outputEncoding ") public String encode( @UdfParameter( description = "The source string. If null, then function returns null.") final String str, @UdfParameter( description = "The input encoding." + " If null, then function returns null.") final String inputEncoding, @UdfParameter( description = "The output encoding." + " If null, then function returns null.") final String outputEncoding) { if (str == null || inputEncoding == null || outputEncoding == null) { return null; } final String encodedString = inputEncoding.toLowerCase() + outputEncoding.toLowerCase(); final Encode.Encoder encoder = ENCODER_MAP.get(encodedString); if (encoder == null) { throw new KsqlFunctionException("Supported input and output encodings are: " + "hex, utf8, ascii and base64"); } return encoder.apply(str); }
@Test public void shouldEncodeHexToUtf8() { assertThat(udf.encode("4578616d706c6521", "hex", "utf8"), is("Example!")); assertThat(udf.encode("506c616e74207472656573", "hex", "utf8"), is("Plant trees")); assertThat(udf.encode("31202b2031203d2031", "hex", "utf8"), is("1 + 1 = 1")); assertThat(udf.encode("ce95cebbcebbceacceb4ceb1", "hex", "utf8"), is("Ελλάδα")); assertThat(udf.encode("c39c6265726d656e736368", "hex", "utf8"), is("Übermensch")); assertThat(udf.encode("0x4578616d706c6521", "hex", "utf8"), is("Example!")); assertThat(udf.encode("0x", "hex", "utf8"), is("")); assertThat(udf.encode("X'506C6174666F726D2D7C5F5F5F5F5F7C2D'", "hex", "utf8"), is("Platform-|_____|-")); assertThat(udf.encode("x'31202b2031203d2031'", "hex", "utf8"), is("1 + 1 = 1")); assertThat(udf.encode("X''", "hex", "utf8"), is("")); assertThat(udf.encode("x''", "hex", "utf8"), is("")); }
@Override public List<String> detect(ClassLoader classLoader) { List<File> classpathContents = classGraph .disableNestedJarScanning() .addClassLoader(classLoader) .scan(1) .getClasspathFiles(); return classpathContents.stream().map(File::getAbsolutePath).collect(Collectors.toList()); }
@Test public void shouldDetectDirectories() throws Exception { File folder = tmpFolder.newFolder("folder1"); ClassLoader classLoader = new URLClassLoader(new URL[] {folder.toURI().toURL()}); ClasspathScanningResourcesDetector detector = new ClasspathScanningResourcesDetector(new ClassGraph()); List<String> result = detector.detect(classLoader); assertThat(result, hasItem(containsString(folder.getCanonicalPath()))); }
@Delete(uri = "/by-query") @ExecuteOn(TaskExecutors.IO) @Operation(tags = {"Executions"}, summary = "Delete executions filter by query parameters") public HttpResponse<BulkResponse> deleteByQuery( @Parameter(description = "A string filter") @Nullable @QueryValue(value = "q") String query, @Parameter(description = "A namespace filter prefix") @Nullable @QueryValue String namespace, @Parameter(description = "A flow id filter") @Nullable @QueryValue String flowId, @Parameter(description = "The start datetime") @Nullable @Format("yyyy-MM-dd'T'HH:mm[:ss][.SSS][XXX]") @QueryValue ZonedDateTime startDate, @Parameter(description = "The end datetime") @Nullable @Format("yyyy-MM-dd'T'HH:mm[:ss][.SSS][XXX]") @QueryValue ZonedDateTime endDate, @Parameter(description = "A time range filter relative to the current time", examples = { @ExampleObject(name = "Filter last 5 minutes", value = "PT5M"), @ExampleObject(name = "Filter last 24 hours", value = "P1D") }) @Nullable @QueryValue Duration timeRange, @Parameter(description = "A state filter") @Nullable @QueryValue List<State.Type> state, @Parameter(description = "A labels filter as a list of 'key:value'") @Nullable @QueryValue @Format("MULTI") List<String> labels, @Parameter(description = "The trigger execution id") @Nullable @QueryValue String triggerExecutionId, @Parameter(description = "A execution child filter") @Nullable @QueryValue ExecutionRepositoryInterface.ChildFilter childFilter, @Parameter(description = "Whether to delete non-terminated executions") @Nullable @QueryValue(defaultValue = "false") Boolean includeNonTerminated, @Parameter(description = "Whether to delete execution logs") @QueryValue(defaultValue = "true") Boolean deleteLogs, @Parameter(description = "Whether to delete execution metrics") @QueryValue(defaultValue = "true") Boolean deleteMetrics, @Parameter(description = "Whether to delete execution files in the internal storage") @QueryValue(defaultValue = "true") Boolean deleteStorage ) throws IOException { validateTimeline(startDate, endDate); Integer count = executionRepository .find( query, tenantService.resolveTenant(), namespace, flowId, resolveAbsoluteDateTime(startDate, timeRange, ZonedDateTime.now()), endDate, state, RequestUtils.toMap(labels), triggerExecutionId, childFilter ) .filter(it -> it.getState().isTerminated() || includeNonTerminated) .map(throwFunction(e -> { executionService.delete(e, deleteLogs, deleteMetrics, deleteStorage); return 1; })) .reduce(Integer::sum) .blockOptional() .orElse(0); return HttpResponse.ok(BulkResponse.builder().count(count).build()); }
@Test void deleteByQuery() { Execution result1 = triggerInputsFlowExecution(true); Execution result2 = triggerInputsFlowExecution(true); Execution result3 = triggerInputsFlowExecution(true); BulkResponse response = client.toBlocking().retrieve( HttpRequest.DELETE("/api/v1/executions/by-query?namespace=" + result1.getNamespace()), BulkResponse.class ); assertThat(response.getCount(), is(3)); }
static CommandLineOptions parse(Iterable<String> options) { CommandLineOptions.Builder optionsBuilder = CommandLineOptions.builder(); List<String> expandedOptions = new ArrayList<>(); expandParamsFiles(options, expandedOptions); Iterator<String> it = expandedOptions.iterator(); while (it.hasNext()) { String option = it.next(); if (!option.startsWith("-")) { optionsBuilder.filesBuilder().add(option).addAll(it); break; } String flag; String value; int idx = option.indexOf('='); if (idx >= 0) { flag = option.substring(0, idx); value = option.substring(idx + 1); } else { flag = option; value = null; } // NOTE: update usage information in UsageException when new flags are added switch (flag) { case "-i": case "-r": case "-replace": case "--replace": optionsBuilder.inPlace(true); break; case "--lines": case "-lines": case "--line": case "-line": parseRangeSet(optionsBuilder.linesBuilder(), getValue(flag, it, value)); break; case "--offset": case "-offset": optionsBuilder.addOffset(parseInteger(it, flag, value)); break; case "--length": case "-length": optionsBuilder.addLength(parseInteger(it, flag, value)); break; case "--aosp": case "-aosp": case "-a": optionsBuilder.aosp(true); break; case "--version": case "-version": case "-v": optionsBuilder.version(true); break; case "--help": case "-help": case "-h": optionsBuilder.help(true); break; case "--fix-imports-only": optionsBuilder.fixImportsOnly(true); break; case "--skip-sorting-imports": optionsBuilder.sortImports(false); break; case "--skip-removing-unused-imports": optionsBuilder.removeUnusedImports(false); break; case "--skip-reflowing-long-strings": optionsBuilder.reflowLongStrings(false); break; case "--skip-javadoc-formatting": optionsBuilder.formatJavadoc(false); break; case "-": optionsBuilder.stdin(true); break; case "-n": case "--dry-run": optionsBuilder.dryRun(true); break; case "--set-exit-if-changed": optionsBuilder.setExitIfChanged(true); break; case "-assume-filename": case "--assume-filename": optionsBuilder.assumeFilename(getValue(flag, it, value)); break; default: throw new IllegalArgumentException("unexpected flag: " + flag); } } return optionsBuilder.build(); }
@Test public void lengths() { assertThat( CommandLineOptionsParser.parse(Arrays.asList("-length", "1", "--length", "2")) .lengths()) .containsExactly(1, 2); }
@Override public <V1, R> KTable<K, R> leftJoin(final KTable<K, V1> other, final ValueJoiner<? super V, ? super V1, ? extends R> joiner) { return leftJoin(other, joiner, NamedInternal.empty()); }
@Test public void shouldNotAllowNullJoinerOnLeftJoin() { assertThrows(NullPointerException.class, () -> table.leftJoin(table, null)); }
@Override public long extract(final Object key, final GenericRow value) { final String colValue = (String) extractor.extract(key, value); try { return timestampParser.parse(colValue); } catch (final KsqlException e) { throw new KsqlException("Unable to parse string timestamp." + " timestamp=" + value + " timestamp_format=" + format, e); } }
@Test public void shouldThrowIfStringDoesNotMatchFormat() { // Given: when(columnExtractor.extract(any(), any())).thenReturn("11-Jan-2010"); // When: final Exception e = assertThrows( DateTimeParseException.class, () -> extractor.extract(key, value) ); // Then: assertThat(e.getMessage(), containsString("11-Jan-2010")); }
public static List<Type> decode(String rawInput, List<TypeReference<Type>> outputParameters) { return decoder.decodeFunctionResult(rawInput, outputParameters); }
@Test public void testDecodeDynamicStruct2() { String rawInput = "0x0000000000000000000000000000000000000000000000000000000000000020" + "0000000000000000000000000000000000000000000000000000000000000001" + "0000000000000000000000000000000000000000000000000000000000000040" + "0000000000000000000000000000000000000000000000000000000000000002" + "6964000000000000000000000000000000000000000000000000000000000000"; assertEquals( FunctionReturnDecoder.decode( rawInput, AbiV2TestFixture.getBozFunction.getOutputParameters()), Collections.singletonList(new AbiV2TestFixture.Boz(BigInteger.ONE, "id"))); }
@Override public void run() { try { interceptorChain.doInterceptor(task); } catch (Exception e) { Loggers.SRV_LOG.info("Interceptor health check task {} failed", task.getTaskId(), e); } }
@Test void testRunWithoutResponsibleClient() { when(distroMapper.responsible(client.getResponsibleId())).thenReturn(false); taskWrapper.run(); verify(globalConfig, never()).isExpireInstance(); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { final AttributedList<Path> result = new AttributedList<>(); try { final SMBSession.DiskShareWrapper share = session.openShare(directory); final List<FileIdBothDirectoryInformation> info; try { info = share.get().list(new SMBPathContainerService(session).getKey(directory)); } finally { session.releaseShare(share); } for(FileIdBothDirectoryInformation f : info) { final String filename = f.getFileName(); if(filename.equals(".") || filename.equals("..")) { if(log.isDebugEnabled()) { log.debug(String.format("Skip %s", f.getFileName())); } continue; } final EnumSet<Type> type = EnumSet.noneOf(Type.class); long fileAttributes = f.getFileAttributes(); // check for all relevant file types and add them to the EnumSet if((fileAttributes & FileAttributes.FILE_ATTRIBUTE_DIRECTORY.getValue()) != 0) { type.add(Type.directory); } else { type.add(Type.file); } final PathAttributes attr = new PathAttributes(); attr.setAccessedDate(f.getLastAccessTime().toEpochMillis()); attr.setModificationDate(f.getLastWriteTime().toEpochMillis()); attr.setCreationDate(f.getCreationTime().toEpochMillis()); attr.setSize(f.getEndOfFile()); attr.setDisplayname(f.getFileName()); result.add(new Path(directory, filename, type, attr)); listener.chunk(directory, result); } } catch(SMBRuntimeException e) { throw new SMBExceptionMappingService().map("Listing directory {0} failed", e, directory); } return result; }
@Test public void testListShareNotfound() throws Exception { assertThrows(NotfoundException.class, () -> new SMBListService(session).list( new Path("/notfound", EnumSet.of(Path.Type.directory, Path.Type.volume)), new DisabledListProgressListener())); }
public static void validateHostAndPort(final String type, final PluginConfiguration pluginConfig) { validateHost(type, pluginConfig); validatePort(type, pluginConfig); }
@Test void assertValidateHostAndPortWhenHostIsNull() { assertThrows(IllegalArgumentException.class, () -> PluginConfigurationValidator.validateHostAndPort("foo_type", new PluginConfiguration(null, 8080, "pwd", null))); }
@Override public MutableAnalysisMetadataHolder setBaseAnalysis(@Nullable Analysis baseAnalysis) { checkState(!this.baseProjectSnapshot.isInitialized(), "Base project snapshot has already been set"); this.baseProjectSnapshot.setProperty(baseAnalysis); return this; }
@Test public void setBaseProjectSnapshot_throws_ISE_when_called_twice() { AnalysisMetadataHolderImpl underTest = new AnalysisMetadataHolderImpl(editionProvider); underTest.setBaseAnalysis(baseProjectAnalysis); assertThatThrownBy(() -> underTest.setBaseAnalysis(baseProjectAnalysis)) .isInstanceOf(IllegalStateException.class) .hasMessage("Base project snapshot has already been set"); }
public static LookupDefaultMultiValue create(String valueString, LookupDefaultValue.Type valueType) { requireNonNull(valueString, "valueString cannot be null"); requireNonNull(valueType, "valueType cannot be null"); Map<Object, Object> value; try { switch (valueType) { case OBJECT: value = OBJECT_MAPPER.readValue(valueString, TypeReferences.MAP_OBJECT_OBJECT); break; case NULL: value = null; break; default: throw new IllegalArgumentException("Could not convert <" + valueString + "> to multi value type <" + valueType + ">"); } } catch (IllegalArgumentException e) { throw e; } catch (Exception e) { throw new IllegalArgumentException("Could not parse JSON "+ valueType.toString().toLowerCase(Locale.ENGLISH) + " value <" + valueString + ">", e); } return builder() .valueString(valueString) .valueType(valueType) .value(value) .build(); }
@Test public void createSingleNumber() throws Exception { expectedException.expect(IllegalArgumentException.class); LookupDefaultMultiValue.create("42", LookupDefaultMultiValue.Type.NUMBER); }
@Override public boolean containsActiveConnection(final DataSource dataSource) { return 0 != getActiveConnections(dataSource); }
@Test void assertNotContainsActiveConnectionWhenEmptyPool() { assertFalse(new HikariDataSourcePoolActiveDetector().containsActiveConnection(new HikariDataSource())); }
@Override public void resetLocal() { this.max = Long.MIN_VALUE; }
@Test void testResetLocal() { LongMaximum max = new LongMaximum(); long value = 9876543210L; max.add(value); assertThat(max.getLocalValue().longValue()).isEqualTo(value); max.resetLocal(); assertThat(max.getLocalValue().longValue()).isEqualTo(Long.MIN_VALUE); }
public Job(Tile tile, boolean hasAlpha) { if (tile == null) { throw new IllegalArgumentException("tile must not be null"); } this.tile = tile; this.hasAlpha = hasAlpha; this.key = composeKey(this.tile.zoomLevel, this.tile.tileX, this.tile.tileY); }
@Test public void jobTest() { Job job = createJob(new Tile(0, 1, (byte) 2, TILE_SIZE)); Assert.assertEquals(new Tile(0, 1, (byte) 2, TILE_SIZE), job.tile); verifyInvalidConstructor(null); }
public int toInt(String name) { return toInt(name, 0); }
@Test public void testToInt_String_int() { System.out.println("toInt"); int expResult; int result; Properties props = new Properties(); props.put("value1", "123"); props.put("value2", "-54"); props.put("empty", ""); props.put("str", "abc"); props.put("boolean", "true"); props.put("float", "24.98"); props.put("int", "12"); props.put("char", "a"); PropertyParser instance = new PropertyParser(props); expResult = 123; result = instance.toInt("value1", 17); assertEquals(expResult, result); expResult = -54; result = instance.toInt("value2", 17); assertEquals(expResult, result); expResult = 17; result = instance.toInt("empty", 17); assertEquals(expResult, result); expResult = 17; result = instance.toInt("str", 17); assertEquals(expResult, result); expResult = 17; result = instance.toInt("boolean", 17); assertEquals(expResult, result); expResult = 17; result = instance.toInt("float", 17); assertEquals(expResult, result); expResult = 12; result = instance.toInt("int", 17); assertEquals(expResult, result); expResult = 17; result = instance.toInt("char", 17); assertEquals(expResult, result); expResult = 17; result = instance.toInt("nonexistent", 17); assertEquals(expResult, result); }
Future<RecordMetadata> send(final ProducerRecord<byte[], byte[]> record, final Callback callback) { maybeBeginTransaction(); try { return producer.send(record, callback); } catch (final KafkaException uncaughtException) { if (isRecoverable(uncaughtException)) { // producer.send() call may throw a KafkaException which wraps a FencedException, // in this case we should throw its wrapped inner cause so that it can be // captured and re-wrapped as TaskMigratedException throw new TaskMigratedException( formatException("Producer got fenced trying to send a record"), uncaughtException.getCause() ); } else { throw new StreamsException( formatException(String.format("Error encountered trying to send record to topic %s", record.topic())), uncaughtException ); } } }
@Test public void shouldThrowTaskMigrateExceptionOnEosBeginTxnFenced() { eosAlphaMockProducer.fenceProducer(); final TaskMigratedException thrown = assertThrows( TaskMigratedException.class, () -> eosAlphaStreamsProducer.send(null, null) ); assertThat( thrown.getMessage(), is("Producer got fenced trying to begin a new transaction [test];" + " it means all tasks belonging to this thread should be migrated.") ); }
public void validate(ExternalIssueReport report, Path reportPath) { if (report.rules != null && report.issues != null) { Set<String> ruleIds = validateRules(report.rules, reportPath); validateIssuesCctFormat(report.issues, ruleIds, reportPath); } else if (report.rules == null && report.issues != null) { String documentationLink = documentationLinkGenerator.getDocumentationLink(DOCUMENTATION_SUFFIX); LOGGER.warn("External issues were imported with a deprecated format which will be removed soon. " + "Please switch to the newest format to fully benefit from Clean Code: {}", documentationLink); validateIssuesDeprecatedFormat(report.issues, reportPath); } else { throw new IllegalStateException(String.format("Failed to parse report '%s': invalid report detected.", reportPath)); } }
@Test public void validate_whenDeprecatedReportMissingMessageForPrimaryLocation_shouldThrowException() throws IOException { ExternalIssueReport report = read(DEPRECATED_REPORTS_LOCATION); report.issues[0].primaryLocation.message = null; assertThatThrownBy(() -> validator.validate(report, reportPath)) .isInstanceOf(IllegalStateException.class) .hasMessage("Failed to parse report 'report-path': missing mandatory field 'message' in the primary location of the issue."); assertWarningLog(); }
public static Optional<RemovalRule> getRule(String key) { return Optional.ofNullable(RULE_MAP.getOrDefault(key, rule)); }
@Test public void testRule() { Optional<RemovalRule> removalRuleOptional = RuleCache.getRule(KEY); Assert.assertTrue(removalRuleOptional.isPresent()); RemovalRule rule = removalRuleOptional.get(); Assert.assertEquals(RATE, rule.getErrorRate(), 0.0); Assert.assertEquals(rule.getScaleUpLimit(), SCALE, 0.0); Assert.assertEquals(rule.getMinInstanceNum(), NUM); }
@Override public KeyValueIterator<Windowed<K>, V> fetch(final K key) { Objects.requireNonNull(key, "key can't be null"); final List<ReadOnlySessionStore<K, V>> stores = storeProvider.stores(storeName, queryableStoreType); for (final ReadOnlySessionStore<K, V> store : stores) { try { final KeyValueIterator<Windowed<K>, V> result = store.fetch(key); if (!result.hasNext()) { result.close(); } else { return result; } } catch (final InvalidStateStoreException ise) { throw new InvalidStateStoreException("State store [" + storeName + "] is not available anymore" + " and may have been migrated to another instance; " + "please re-discover its location from the state metadata. " + "Original error message: " + ise); } } return KeyValueIterators.emptyIterator(); }
@Test public void shouldThrowInvalidStateStoreExceptionIfSessionFetchThrows() { underlyingSessionStore.setOpen(false); try { sessionStore.fetch("key"); fail("Should have thrown InvalidStateStoreException with session store"); } catch (final InvalidStateStoreException e) { } }
public EvictionConfig getEvictionConfig() { return evictionConfig; }
@Test public void cacheConfigXmlTest_CustomEvictionPolicyComparator() throws IOException { Config config = new XmlConfigBuilder(configUrl1).build(); CacheSimpleConfig cacheConfig = config.getCacheConfig("cacheWithCustomEvictionPolicyComparator"); assertNotNull(cacheConfig); EvictionConfig evictionConfig = cacheConfig.getEvictionConfig(); assertNotNull(evictionConfig); assertEquals(50, evictionConfig.getSize()); assertEquals(MaxSizePolicy.ENTRY_COUNT, evictionConfig.getMaxSizePolicy()); assertEquals("my-custom-eviction-policy-comparator", evictionConfig.getComparatorClassName()); }
public void terminateCluster(final List<String> deleteTopicPatterns) { terminatePersistentQueries(); deleteSinkTopics(deleteTopicPatterns); deleteTopics(managedTopics); ksqlEngine.close(); }
@Test public void shouldNotCleanUpSchemaForNonSchemaInferenceSupportedTopic() throws Exception { // Given: givenTopicsExistInKafka("K_Foo"); givenSinkTopicsExistInMetastore(FormatFactory.DELIMITED, "K_Foo"); givenSchemasForTopicsExistInSchemaRegistry("K_Foo"); // When: clusterTerminator.terminateCluster(ImmutableList.of("K_Foo")); // Then: verifySchemaNotDeletedForTopic("K_Foo"); }
public final void isGreaterThan(int other) { isGreaterThan((double) other); }
@Test public void isGreaterThan_int_strictly() { expectFailureWhenTestingThat(2.0).isGreaterThan(3); }
@Override protected CouchbaseEndpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception { CouchbaseEndpoint endpoint = new CouchbaseEndpoint(uri, remaining, this); setProperties(endpoint, parameters); return endpoint; }
@Test public void testCouchbaseURIWithBucket2() throws Exception { Map<String, Object> params = new HashMap<>(); params.put("bucket", "bucket"); String uri = "couchbase:http://localhost"; String remaining = "http://localhost"; CouchbaseComponent couchbaseComponent = new CouchbaseComponent(context); CouchbaseEndpoint endpoint = couchbaseComponent.createEndpoint(uri, remaining, params); assertEquals(new URI("http://localhost:8091/pools"), endpoint.makeBootstrapURI()[0]); assertEquals("bucket", endpoint.getBucket()); }
@Override public OutputStream getOutputStream() throws FileSystemException { return requireResolvedFileObject().getOutputStream(); }
@Test public void testDelegatesGetOutputStreamWithAppend() throws FileSystemException { OutputStream outputStream = mock( OutputStream.class ); when( resolvedFileObject.getOutputStream( anyBoolean() ) ).thenReturn( outputStream ); assertSame( outputStream, fileObject.getOutputStream( true ) ); }
public <T> T getStore(final StoreQueryParameters<T> storeQueryParameters) { final String storeName = storeQueryParameters.storeName(); final QueryableStoreType<T> queryableStoreType = storeQueryParameters.queryableStoreType(); final List<T> globalStore = globalStoreProvider.stores(storeName, queryableStoreType); if (!globalStore.isEmpty()) { return queryableStoreType.create(globalStoreProvider, storeName); } return queryableStoreType.create( new WrappingStoreProvider(storeProviders.values(), storeQueryParameters), storeName ); }
@Test public void shouldFindGlobalStores() { globalStateStores.put("global", new NoOpReadOnlyStore<>()); assertNotNull(storeProvider.getStore(StoreQueryParameters.fromNameAndType("global", QueryableStoreTypes.keyValueStore()))); }
@Override public void write(T record) { recordConsumer.startMessage(); try { messageWriter.writeTopLevelMessage(record); } catch (RuntimeException e) { Message m = (record instanceof Message.Builder) ? ((Message.Builder) record).build() : (Message) record; LOG.error("Cannot write message {}: {}", e.getMessage(), m); throw e; } recordConsumer.endMessage(); }
@Test public void testRepeatedIntMessageEmpty() throws Exception { RecordConsumer readConsumerMock = Mockito.mock(RecordConsumer.class); ProtoWriteSupport<TestProtobuf.RepeatedIntMessage> instance = createReadConsumerInstance(TestProtobuf.RepeatedIntMessage.class, readConsumerMock); TestProtobuf.RepeatedIntMessage.Builder msg = TestProtobuf.RepeatedIntMessage.newBuilder(); instance.write(msg.build()); InOrder inOrder = Mockito.inOrder(readConsumerMock); inOrder.verify(readConsumerMock).startMessage(); inOrder.verify(readConsumerMock).endMessage(); Mockito.verifyNoMoreInteractions(readConsumerMock); }
public void setErrorDialog(final Dialog errorDialog) { this.errorDialog = errorDialog; }
@Test public void setErrorDialog() { final ShadowGoogleApiAvailability shadowGoogleApiAvailability = Shadows.shadowOf(GoogleApiAvailability.getInstance()); final Dialog expectedDialog = mock(Dialog.class); final Activity mockActivity = mock(Activity.class); final int mockErrorCode = ConnectionResult.API_UNAVAILABLE; final int mockRequestCode = 1234; shadowGoogleApiAvailability.setErrorDialog(expectedDialog); final Dialog actualDialog = GoogleApiAvailability.getInstance() .getErrorDialog(mockActivity, mockErrorCode, mockRequestCode); assertThat(actualDialog).isEqualTo(expectedDialog); }
@Override public String decrypt(String encryptedText) { try { javax.crypto.Cipher cipher = javax.crypto.Cipher.getInstance(CRYPTO_ALGO); cipher.init(javax.crypto.Cipher.DECRYPT_MODE, loadSecretFile()); byte[] cipherData = cipher.doFinal(Base64.decodeBase64(StringUtils.trim(encryptedText))); return new String(cipherData, StandardCharsets.UTF_8); } catch (RuntimeException e) { throw e; } catch (Exception e) { throw new IllegalStateException(e); } }
@Test public void decrypt_bad_key() throws Exception { URL resource = getClass().getResource("/org/sonar/api/config/internal/AesCipherTest/bad_secret_key.txt"); AesECBCipher cipher = new AesECBCipher(new File(resource.toURI()).getCanonicalPath()); assertThatThrownBy(() -> cipher.decrypt("9mx5Zq4JVyjeChTcVjEide4kWCwusFl7P2dSVXtg9IY=")) .isInstanceOf(RuntimeException.class) .hasCauseInstanceOf(InvalidKeyException.class); }
@SuppressWarnings("unchecked") @Override public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) throws YarnException, IOException { NodeStatus remoteNodeStatus = request.getNodeStatus(); /** * Here is the node heartbeat sequence... * 1. Check if it's a valid (i.e. not excluded) node * 2. Check if it's a registered node * 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat * 4. Send healthStatus to RMNode * 5. Update node's labels if distributed Node Labels configuration is enabled */ NodeId nodeId = remoteNodeStatus.getNodeId(); // 1. Check if it's a valid (i.e. not excluded) node, if not, see if it is // in decommissioning. if (!this.nodesListManager.isValidNode(nodeId.getHost()) && !isNodeInDecommissioning(nodeId)) { String message = "Disallowed NodeManager nodeId: " + nodeId + " hostname: " + nodeId.getHost(); LOG.info(message); return YarnServerBuilderUtils.newNodeHeartbeatResponse( NodeAction.SHUTDOWN, message); } // 2. Check if it's a registered node RMNode rmNode = this.rmContext.getRMNodes().get(nodeId); if (rmNode == null) { /* node does not exist */ String message = "Node not found resyncing " + remoteNodeStatus.getNodeId(); LOG.info(message); return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC, message); } // Send ping this.nmLivelinessMonitor.receivedPing(nodeId); this.decommissioningWatcher.update(rmNode, remoteNodeStatus); // 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat NodeHeartbeatResponse lastNodeHeartbeatResponse = rmNode.getLastNodeHeartBeatResponse(); if (getNextResponseId( remoteNodeStatus.getResponseId()) == lastNodeHeartbeatResponse .getResponseId()) { LOG.info("Received duplicate heartbeat from node " + rmNode.getNodeAddress()+ " responseId=" + remoteNodeStatus.getResponseId()); return lastNodeHeartbeatResponse; } else if (remoteNodeStatus.getResponseId() != lastNodeHeartbeatResponse .getResponseId()) { String message = "Too far behind rm response id:" + lastNodeHeartbeatResponse.getResponseId() + " nm response id:" + remoteNodeStatus.getResponseId(); LOG.info(message); // TODO: Just sending reboot is not enough. Think more. this.rmContext.getDispatcher().getEventHandler().handle( new RMNodeEvent(nodeId, RMNodeEventType.REBOOTING)); return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC, message); } // Evaluate whether a DECOMMISSIONING node is ready to be DECOMMISSIONED. if (rmNode.getState() == NodeState.DECOMMISSIONING && decommissioningWatcher.checkReadyToBeDecommissioned( rmNode.getNodeID())) { String message = "DECOMMISSIONING " + nodeId + " is ready to be decommissioned"; LOG.info(message); this.rmContext.getDispatcher().getEventHandler().handle( new RMNodeEvent(nodeId, RMNodeEventType.DECOMMISSION)); this.nmLivelinessMonitor.unregister(nodeId); return YarnServerBuilderUtils.newNodeHeartbeatResponse( NodeAction.SHUTDOWN, message); } if (timelineServiceV2Enabled) { // Check & update collectors info from request. updateAppCollectorsMap(request); } // Heartbeat response long newInterval = nextHeartBeatInterval; if (heartBeatIntervalScalingEnable) { newInterval = rmNode.calculateHeartBeatInterval( nextHeartBeatInterval, heartBeatIntervalMin, heartBeatIntervalMax, heartBeatIntervalSpeedupFactor, heartBeatIntervalSlowdownFactor); } NodeHeartbeatResponse nodeHeartBeatResponse = YarnServerBuilderUtils.newNodeHeartbeatResponse( getNextResponseId(lastNodeHeartbeatResponse.getResponseId()), NodeAction.NORMAL, null, null, null, null, newInterval); rmNode.setAndUpdateNodeHeartbeatResponse(nodeHeartBeatResponse); populateKeys(request, nodeHeartBeatResponse); populateTokenSequenceNo(request, nodeHeartBeatResponse); if (timelineServiceV2Enabled) { // Return collectors' map that NM needs to know setAppCollectorsMapToResponse(rmNode.getRunningApps(), nodeHeartBeatResponse); } // 4. Send status to RMNode, saving the latest response. RMNodeStatusEvent nodeStatusEvent = new RMNodeStatusEvent(nodeId, remoteNodeStatus); if (request.getLogAggregationReportsForApps() != null && !request.getLogAggregationReportsForApps().isEmpty()) { nodeStatusEvent.setLogAggregationReportsForApps(request .getLogAggregationReportsForApps()); } this.rmContext.getDispatcher().getEventHandler().handle(nodeStatusEvent); // 5. Update node's labels to RM's NodeLabelManager. if (isDistributedNodeLabelsConf && request.getNodeLabels() != null) { try { updateNodeLabelsFromNMReport( NodeLabelsUtils.convertToStringSet(request.getNodeLabels()), nodeId); nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(true); } catch (IOException ex) { //ensure the error message is captured and sent across in response nodeHeartBeatResponse.setDiagnosticsMessage(ex.getMessage()); nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(false); } } // 6. check if node's capacity is load from dynamic-resources.xml // if so, send updated resource back to NM. String nid = nodeId.toString(); Resource capability = loadNodeResourceFromDRConfiguration(nid); // sync back with new resource if not null. if (capability != null) { nodeHeartBeatResponse.setResource(capability); } // Check if we got an event (AdminService) that updated the resources if (rmNode.isUpdatedCapability()) { nodeHeartBeatResponse.setResource(rmNode.getTotalCapability()); rmNode.resetUpdatedCapability(); } // 7. Send Container Queuing Limits back to the Node. This will be used by // the node to truncate the number of Containers queued for execution. if (this.rmContext.getNodeManagerQueueLimitCalculator() != null) { nodeHeartBeatResponse.setContainerQueuingLimit( this.rmContext.getNodeManagerQueueLimitCalculator() .createContainerQueuingLimit()); } // 8. Get node's attributes and update node-to-attributes mapping // in RMNodeAttributeManager. if (request.getNodeAttributes() != null) { try { // update node attributes if necessary then update heartbeat response updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes()); nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(true); } catch (IOException ex) { //ensure the error message is captured and sent across in response String errorMsg = nodeHeartBeatResponse.getDiagnosticsMessage() == null ? ex.getMessage() : nodeHeartBeatResponse.getDiagnosticsMessage() + "\n" + ex .getMessage(); nodeHeartBeatResponse.setDiagnosticsMessage(errorMsg); nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(false); } } return nodeHeartBeatResponse; }
@Test public void testResponseIdOverflow() throws Exception { Configuration conf = new Configuration(); rm = new MockRM(conf); rm.start(); MockNM nm1 = rm.registerNode("host1:1234", 5120); NodeHeartbeatResponse nodeHeartbeat = nm1.nodeHeartbeat(true); Assert.assertEquals(NodeAction.NORMAL, nodeHeartbeat.getNodeAction()); // prepare the responseId that's about to overflow RMNode node = rm.getRMContext().getRMNodes().get(nm1.getNodeId()); node.getLastNodeHeartBeatResponse().setResponseId(Integer.MAX_VALUE); nm1.setResponseId(Integer.MAX_VALUE); // heartbeat twice and check responseId nodeHeartbeat = nm1.nodeHeartbeat(true); Assert.assertEquals(NodeAction.NORMAL, nodeHeartbeat.getNodeAction()); Assert.assertEquals(0, nodeHeartbeat.getResponseId()); nodeHeartbeat = nm1.nodeHeartbeat(true); Assert.assertEquals(NodeAction.NORMAL, nodeHeartbeat.getNodeAction()); Assert.assertEquals(1, nodeHeartbeat.getResponseId()); }
@Override public final boolean offer(int ordinal, @Nonnull Object item) { if (ordinal == -1) { return offerInternal(allEdges, item); } else { if (ordinal == bucketCount()) { // ordinal beyond bucketCount will add to snapshot queue, which we don't allow through this method throw new IllegalArgumentException("Illegal edge ordinal: " + ordinal); } singleEdge[0] = ordinal; return offerInternal(singleEdge, item); } }
@Test public void when_offer4FailsAndDifferentItemOffered_then_fail() { do_when_offerDifferent_then_fail(e -> outbox.offer(new int[] {0}, e)); }
public CsvReader includeFields(boolean... fields) { if (fields == null || fields.length == 0) { throw new IllegalArgumentException( "The set of included fields must not be null or empty."); } int lastTruePos = -1; for (int i = 0; i < fields.length; i++) { if (fields[i]) { lastTruePos = i; } } if (lastTruePos == -1) { throw new IllegalArgumentException( "The description of fields to parse excluded all fields. At least one fields must be included."); } if (lastTruePos == fields.length - 1) { this.includedMask = fields; } else { this.includedMask = Arrays.copyOfRange(fields, 0, lastTruePos + 1); } return this; }
@Test void testIllegalCharInStringMask() { CsvReader reader = getCsvReader(); assertThatThrownBy(() -> reader.includeFields("1t0Tfht")) .withFailMessage("Reader accepted an invalid mask string") .isInstanceOf(IllegalArgumentException.class); }
public Response downloadLogFile(String host, String fileName, String user) throws IOException { workerLogs.setLogFilePermission(fileName); return logFileDownloadHelper.downloadFile(host, fileName, user, false); }
@Test public void testDownloadLogFile() throws IOException { try (TmpPath rootPath = new TmpPath()) { LogviewerLogDownloadHandler handler = createHandlerTraversalTests(rootPath.getFile().toPath()); Response topoAResponse = handler.downloadLogFile("host", "topoA/1111/worker.log", "user"); Response topoBResponse = handler.downloadLogFile("host", "topoB/1111/worker.log", "user"); Utils.forceDelete(rootPath.toString()); assertThat(topoAResponse.getStatus(), is(Response.Status.OK.getStatusCode())); assertThat(topoAResponse.getEntity(), not(nullValue())); String topoAContentDisposition = topoAResponse.getHeaderString(HttpHeaders.CONTENT_DISPOSITION); assertThat(topoAContentDisposition, containsString("host-topoA-1111-worker.log")); assertThat(topoBResponse.getStatus(), is(Response.Status.OK.getStatusCode())); assertThat(topoBResponse.getEntity(), not(nullValue())); String topoBContentDisposition = topoBResponse.getHeaderString(HttpHeaders.CONTENT_DISPOSITION); assertThat(topoBContentDisposition, containsString("host-topoB-1111-worker.log")); } }
@Override public int partitionFor( String groupId ) { throwIfNotActive(); return Utils.abs(groupId.hashCode()) % numPartitions; }
@Test public void testPartitionFor() { CoordinatorRuntime<GroupCoordinatorShard, CoordinatorRecord> runtime = mockRuntime(); GroupCoordinatorService service = new GroupCoordinatorService( new LogContext(), createConfig(), runtime, new GroupCoordinatorMetrics(), createConfigManager() ); assertThrows(CoordinatorNotAvailableException.class, () -> service.partitionFor("foo")); service.startup(() -> 10); assertEquals(Utils.abs("foo".hashCode()) % 10, service.partitionFor("foo")); }
public static NamenodeRole convert(NamenodeRoleProto role) { switch (role) { case NAMENODE: return NamenodeRole.NAMENODE; case BACKUP: return NamenodeRole.BACKUP; case CHECKPOINT: return NamenodeRole.CHECKPOINT; } return null; }
@Test public void testConvertBlockToken() { Token<BlockTokenIdentifier> token = new Token<BlockTokenIdentifier>( "identifier".getBytes(), "password".getBytes(), new Text("kind"), new Text("service")); TokenProto tokenProto = PBHelperClient.convert(token); Token<BlockTokenIdentifier> token2 = PBHelperClient.convert(tokenProto); compare(token, token2); }
public EdgeResult convertForViaWays(LongArrayList fromWays, LongArrayList viaWays, LongArrayList toWays) throws OSMRestrictionException { if (fromWays.isEmpty() || toWays.isEmpty() || viaWays.isEmpty()) throw new IllegalArgumentException("There must be at least one from-, via- and to-way"); if (fromWays.size() > 1 && toWays.size() > 1) throw new IllegalArgumentException("There can only be multiple from- or to-ways, but not both"); List<IntArrayList> solutions = new ArrayList<>(); for (LongCursor fromWay : fromWays) for (LongCursor toWay : toWays) findEdgeChain(fromWay.value, viaWays, toWay.value, solutions); if (solutions.size() < fromWays.size() * toWays.size()) throw new OSMRestrictionException("has disconnected member ways"); else if (solutions.size() > fromWays.size() * toWays.size()) throw new OSMRestrictionException("has member ways that do not form a unique path"); return buildResult(solutions, new EdgeResult(fromWays.size(), viaWays.size(), toWays.size())); }
@Test void convertForViaWays_multipleEdgesForViaWay_oppositeDirection() throws OSMRestrictionException { BaseGraph graph = new BaseGraph.Builder(1).create(); graph.edge(0, 1); graph.edge(1, 2); graph.edge(2, 3); graph.edge(3, 4); LongFunction<Iterator<IntCursor>> edgesByWay = way -> { if (way == 0) return IntArrayList.from(0).iterator(); // way 1 is split into the two edges 2, 1 (the wrong order) // Accepting an arbitrary order is important, because OSM ways are generally split into multiple edges // and a via-way might be pointing in the 'wrong' direction. else if (way == 1) return IntArrayList.from(2, 1).iterator(); else if (way == 2) return IntArrayList.from(3).iterator(); else throw new IllegalArgumentException(); }; WayToEdgeConverter.EdgeResult edgeResult = new WayToEdgeConverter(graph, edgesByWay).convertForViaWays(ways(0), ways(1), ways(2)); assertEquals(IntArrayList.from(1, 2), edgeResult.getViaEdges()); assertEquals(IntArrayList.from(1, 2, 3), edgeResult.getNodes()); }
public CompletableFuture<Void> close() { return close(true, false); }
@Test public void testPersistentPartitionedTopicUnload() throws Exception { final String topicName = "persistent://prop/ns/failedUnload"; final String ns = "prop/ns"; final int partitions = 5; final int producers = 1; // ensure that the number of bundle is greater than 1 final int bundles = 2; admin.namespaces().createNamespace(ns, bundles); admin.topics().createPartitionedTopic(topicName, partitions); List<Producer> producerSet = new ArrayList<>(); for (int i = 0; i < producers; i++) { producerSet.add(pulsarClient.newProducer(Schema.STRING).topic(topicName).create()); } assertFalse(pulsar.getBrokerService().getTopics().containsKey(topicName)); pulsar.getBrokerService().getTopicIfExists(topicName).get(); // The map topics should only contain partitions, does not contain partitioned topic. assertFalse(pulsar.getBrokerService().getTopics().containsKey(topicName)); // ref of partitioned-topic name should be empty assertFalse(pulsar.getBrokerService().getTopicReference(topicName).isPresent()); NamespaceBundle bundle = pulsar.getNamespaceService().getBundle(TopicName.get(topicName)); pulsar.getNamespaceService().unloadNamespaceBundle(bundle, 5, TimeUnit.SECONDS).get(); for (Producer producer : producerSet) { producer.close(); } }
public static void trimRecordTemplate(RecordTemplate recordTemplate, MaskTree override, final boolean failOnMismatch) { trimRecordTemplate(recordTemplate.data(), recordTemplate.schema(), override, failOnMismatch); }
@Test public void testArrayTrim() throws CloneNotSupportedException { TyperefTest test = new TyperefTest(); RecordBarArray array = new RecordBarArray(); RecordBar recordBar = new RecordBar(); recordBar.setLocation("mountain view"); array.add(recordBar); RecordBar recordBar2 = new RecordBar(); recordBar2.setLocation("palo alto"); array.add(recordBar2); test.setRecordArray(array); // Generate expected copy. TyperefTest expected = test.copy(); // Introduce bad elements. test.getRecordArray().get(0).data().put("troublemaker", "foo"); test.getRecordArray().get(0).data().put("troublemaker2", "foo"); test.getRecordArray().get(1).data().put("troublemaker", "foo"); test.getRecordArray().get(1).data().put("troublemaker2", "foo"); Assert.assertEquals(test.getRecordArray().get(0).data().size(), 3); Assert.assertEquals(test.getRecordArray().get(1).data().size(), 3); RestUtils.trimRecordTemplate(test, false); Assert.assertEquals(test, expected); }
public GenericRecord convert(String json, Schema schema) { try { Map<String, Object> jsonObjectMap = mapper.readValue(json, Map.class); return convertJsonToAvro(jsonObjectMap, schema, shouldSanitize, invalidCharMask); } catch (IOException e) { throw new HoodieIOException(e.getMessage(), e); } }
@Test public void conversionWithFieldNameAliases() throws IOException { String schemaStringWithAliases = "{\"namespace\": \"example.avro\", \"type\": \"record\", \"name\": \"User\", \"fields\": [{\"name\": \"name\", \"type\": \"string\", \"aliases\": [\"$name\"]}, " + "{\"name\": \"favorite_number\", \"type\": \"int\", \"aliases\": [\"unused\", \"favorite-number\"]}, {\"name\": \"favorite_color\", \"type\": \"string\", \"aliases\": " + "[\"favorite.color!\"]}, {\"name\": \"unmatched\", \"type\": \"string\", \"default\": \"default_value\"}]}"; Schema sanitizedSchema = Schema.parse(schemaStringWithAliases); String name = "John Smith"; int number = 1337; String color = "Blue. No yellow!"; Map<String, Object> data = new HashMap<>(); data.put("$name", name); data.put("favorite-number", number); data.put("favorite.color!", color); String json = MAPPER.writeValueAsString(data); GenericRecord rec = new GenericData.Record(sanitizedSchema); rec.put("name", name); rec.put("favorite_number", number); rec.put("favorite_color", color); Assertions.assertEquals(rec, CONVERTER.convert(json, sanitizedSchema)); }
public static Expression convert(Filter[] filters) { Expression expression = Expressions.alwaysTrue(); for (Filter filter : filters) { Expression converted = convert(filter); Preconditions.checkArgument( converted != null, "Cannot convert filter to Iceberg: %s", filter); expression = Expressions.and(expression, converted); } return expression; }
@Test public void testTimestampFilterConversion() { Instant instant = Instant.parse("2018-10-18T00:00:57.907Z"); Timestamp timestamp = Timestamp.from(instant); long epochMicros = ChronoUnit.MICROS.between(Instant.EPOCH, instant); Expression instantExpression = SparkFilters.convert(GreaterThan.apply("x", instant)); Expression timestampExpression = SparkFilters.convert(GreaterThan.apply("x", timestamp)); Expression rawExpression = Expressions.greaterThan("x", epochMicros); Assert.assertEquals( "Generated Timestamp expression should be correct", rawExpression.toString(), timestampExpression.toString()); Assert.assertEquals( "Generated Instant expression should be correct", rawExpression.toString(), instantExpression.toString()); }
public static void tryShutdown(HazelcastInstance hazelcastInstance) { if (hazelcastInstance == null) { return; } HazelcastInstanceImpl factory = (HazelcastInstanceImpl) hazelcastInstance; closeSockets(factory); try { factory.node.shutdown(true); } catch (Throwable ignored) { ignore(ignored); } }
@Test public void testTryShutdown_shouldDoNothingWithNullInstance() { tryShutdown(null); }
public static String jsonFromMap(Map<String, Object> jsonData) { try { JsonDocument json = new JsonDocument(); json.startGroup(); for (String key : jsonData.keySet()) { Object data = jsonData.get(key); if (data instanceof Map) { /* it's a nested map, so we'll recursively add the JSON of this map to the current JSON */ json.addValue(key, jsonFromMap((Map<String, Object>) data)); } else if (data instanceof Object[]) { /* it's an object array, so we'll iterate the elements and put them all in here */ json.addValue(key, "[" + stringArrayFromObjectArray((Object[]) data) + "]"); } else if (data instanceof Collection) { /* it's a collection, so we'll iterate the elements and put them all in here */ json.addValue(key, "[" + stringArrayFromObjectArray(((Collection) data).toArray()) + "]"); } else if (data instanceof int[]) { /* it's an int array, so we'll get the string representation */ String intArray = Arrays.toString((int[]) data); /* remove whitespace */ intArray = intArray.replaceAll(" ", ""); json.addValue(key, intArray); } else if (data instanceof JsonCapableObject) { json.addValue(key, jsonFromMap(((JsonCapableObject) data).jsonMap())); } else { /* all other objects we assume we are to just put the string value in */ json.addValue(key, String.valueOf(data)); } } json.endGroup(); logger.debug("created json from map => {}", json); return json.toString(); } catch (Exception e) { logger.error("Could not create JSON from Map. ", e); return "{}"; } }
@Test void testArrayOne() { Map<String, Object> jsonData = new LinkedHashMap<String, Object>(); int[] numbers = {1, 2, 3, 4}; jsonData.put("myKey", numbers); String json = JsonUtility.jsonFromMap(jsonData); String expected = "{\"myKey\":[1,2,3,4]}"; assertEquals(expected, json); }
public static void maybeConfigureRocksDBConfigSetter(final KsqlConfig ksqlConfig) { final Map<String, Object> streamsProps = ksqlConfig.getKsqlStreamConfigProps(); final Class<?> clazz = (Class) streamsProps.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG); if (clazz != null && org.apache.kafka.common.Configurable.class.isAssignableFrom(clazz)) { try { ((org.apache.kafka.common.Configurable) Utils.newInstance(clazz)) .configure(ksqlConfig.originals()); } catch (final Exception e) { throw new ConfigException( "Failed to configure Configurable RocksDBConfigSetter. " + StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG + ": " + clazz.getName(), e); } } }
@Test public void shouldConfigure() throws Exception { // Given: when(ksqlConfig.getKsqlStreamConfigProps()).thenReturn( ImmutableMap.of( StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, Class.forName("io.confluent.ksql.rest.util.RocksDBConfigSetterHandlerTest$ConfigurableTestRocksDBConfigSetter")) ); final Runnable mockRunnable = mock(Runnable.class); when(ksqlConfig.originals()).thenReturn( ImmutableMap.of(ConfigurableTestRocksDBConfigSetter.TEST_CONFIG, mockRunnable)); // When: RocksDBConfigSetterHandler.maybeConfigureRocksDBConfigSetter(ksqlConfig); // Then: verify(mockRunnable).run(); }
@VisibleForTesting static void setupAndModifyConfiguration( Configuration configuration, String currDir, Map<String, String> variables) throws Exception { final String localDirs = variables.get(Environment.LOCAL_DIRS.key()); LOG.info("Current working/local Directory: {}", localDirs); BootstrapTools.updateTmpDirectoriesInConfiguration(configuration, localDirs); setupConfigurationFromVariables(configuration, currDir, variables); }
@Test public void testPreInstallKerberosKeytabConfiguration() throws Exception { final String resourceDirPath = Paths.get("src", "test", "resources").toAbsolutePath().toString(); final Map<String, String> envs = new HashMap<>(2); envs.put(YarnConfigKeys.KEYTAB_PRINCIPAL, "testuser1@domain"); // Try directly resolving local path when no remote keytab path is provided. envs.put(YarnConfigKeys.LOCAL_KEYTAB_PATH, "src/test/resources/krb5.keytab"); Configuration configuration = new Configuration(); YarnTaskExecutorRunner.setupAndModifyConfiguration(configuration, resourceDirPath, envs); // the SecurityContext is installed on TaskManager startup SecurityUtils.install(new SecurityConfiguration(configuration)); final List<SecurityModule> modules = SecurityUtils.getInstalledModules(); Optional<SecurityModule> moduleOpt = modules.stream().filter(module -> module instanceof HadoopModule).findFirst(); if (moduleOpt.isPresent()) { HadoopModule hadoopModule = (HadoopModule) moduleOpt.get(); assertThat(hadoopModule.getSecurityConfig().getPrincipal()) .isEqualTo("testuser1@domain"); // Using containString verification as the absolute path varies depending on runtime // environment assertThat(hadoopModule.getSecurityConfig().getKeytab()) .containsSequence("src/test/resources/krb5.keytab"); } else { fail("Can not find HadoopModule!"); } assertThat(configuration.get(SecurityOptions.KERBEROS_LOGIN_KEYTAB)) .containsSequence("src/test/resources/krb5.keytab"); assertThat(configuration.get(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL)) .isEqualTo("testuser1@domain"); }
static void validateValues(EvaluationContext ctx, Object start, Object end) { if (start.getClass() != end.getClass()) { ctx.notifyEvt(() -> new ASTEventBase(FEELEvent.Severity.ERROR, Msg.createMessage(Msg.X_TYPE_INCOMPATIBLE_WITH_Y_TYPE, start, end), null)); throw new EndpointOfRangeOfDifferentTypeException(); } valueMustBeValid(ctx, start); valueMustBeValid(ctx, end); }
@Test void validateValuesFalseTest() { try { validateValues(ctx, "INVALID", "INVALID"); } catch (Exception e) { assertTrue(e instanceof EndpointOfRangeNotValidTypeException); final ArgumentCaptor<FEELEvent> captor = ArgumentCaptor.forClass(FEELEvent.class); verify(listener, times(1)).onEvent(captor.capture()); reset(listener); } try { validateValues(ctx, BigDecimal.valueOf(1), LocalDate.of(2021, 1, 1)); } catch (Exception e) { assertTrue(e instanceof EndpointOfRangeOfDifferentTypeException); final ArgumentCaptor<FEELEvent> captor = ArgumentCaptor.forClass(FEELEvent.class); verify(listener, times(1)).onEvent(captor.capture()); reset(listener); } try { validateValues(ctx, LocalDate.of(2021, 1, 1), BigDecimal.valueOf(1)); } catch (Exception e) { assertTrue(e instanceof EndpointOfRangeOfDifferentTypeException); final ArgumentCaptor<FEELEvent> captor = ArgumentCaptor.forClass(FEELEvent.class); verify(listener, times(1)).onEvent(captor.capture()); reset(listener); } }
@Override public T deserialize(final String topic, final byte[] bytes) { try { if (bytes == null) { return null; } // don't use the JsonSchemaConverter to read this data because // we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS, // which is not currently available in the standard converters final JsonNode value = isJsonSchema ? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class) : MAPPER.readTree(bytes); final Object coerced = enforceFieldType( "$", new JsonValueContext(value, schema) ); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } return SerdeUtils.castToTargetType(coerced, targetType); } catch (final Exception e) { // Clear location in order to avoid logging data, for security reasons if (e instanceof JsonParseException) { ((JsonParseException) e).clearLocation(); } throw new SerializationException( "Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e); } }
@Test public void shouldThrowIfNotAnMap() { // Given: final KsqlJsonDeserializer<Map> deserializer = givenDeserializerForSchema( SchemaBuilder .map(Schema.OPTIONAL_STRING_SCHEMA, Schema.INT32_SCHEMA) .build(), Map.class ); final byte[] bytes = serializeJson(BooleanNode.valueOf(true)); // When: final Exception e = assertThrows( SerializationException.class, () -> deserializer.deserialize(SOME_TOPIC, bytes) ); // Then: assertThat(e.getCause(), (hasMessage(startsWith( "Can't convert type. sourceType: BooleanNode, requiredType: MAP<VARCHAR, INT>")))); }
public static String getDatabaseNameNodePath(final String databaseName) { return String.join("/", "", ROOT_NODE, databaseName); }
@Test void assertGetDatabaseNameNodePath() { assertThat(ListenerAssistedNodePath.getDatabaseNameNodePath("foo_db"), is("/listener_assisted/foo_db")); }