focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public String buildContext() { final ResourceDO after = (ResourceDO) getAfter(); if (Objects.isNull(getBefore())) { return String.format("the resource [%s] is %s", after.getTitle(), StringUtils.lowerCase(getType().getType().toString())); } return String.format("the resource [%s] is %s : %s", after.getTitle(), StringUtils.lowerCase(getType().getType().toString()), contrast()); }
@Test public void resourceChangeBuildContextTest() { final StringBuilder contrast = new StringBuilder(); contrast.append(String.format("name[%s => %s] ", before.getName(), after.getName())); contrast.append(String.format("component[%s => %s] ", before.getComponent(), after.getComponent())); contrast.append(String.format("match icon[%s => %s] ", before.getIcon(), after.getIcon())); contrast.append(String.format("title[%s => %s] ", before.getTitle(), after.getTitle())); contrast.append(String.format("sort[%s => %s] ", before.getSort(), after.getSort())); contrast.append(String.format("perms[%s => %s] ", before.getPerms(), after.getPerms())); String typeStr = StringUtils.lowerCase(EventTypeEnum.RESOURCE_UPDATE.getType().toString()); String context = String.format("the resource [%s] is %s : %s", after.getTitle(), typeStr, contrast); ResourceChangedEvent resourceChangedEvent = new ResourceChangedEvent(after, before, EventTypeEnum.RESOURCE_UPDATE, "test-operator"); assertEquals(context, resourceChangedEvent.buildContext()); }
@Udtf public <T> List<List<T>> cube(final List<T> columns) { if (columns == null) { return Collections.emptyList(); } return createAllCombinations(columns); }
@Test public void shouldCubeColumnsWithDifferentTypes() { // Given: final Object[] args = {1, "foo"}; // When: final List<List<Object>> result = cubeUdtf.cube(Arrays.asList(args)); // Then: assertThat(result.size(), is(4)); assertThat(result.get(0), is(Arrays.asList(null, null))); assertThat(result.get(1), is(Arrays.asList(null, "foo"))); assertThat(result.get(2), is(Arrays.asList(1, null))); assertThat(result.get(3), is(Arrays.asList(1, "foo"))); }
@Udf(description = "Returns the cotangent of an INT value") public Double cot( @UdfParameter( value = "value", description = "The value in radians to get the cotangent of." ) final Integer value ) { return cot(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleNull() { assertThat(udf.cot((Integer) null), is(nullValue())); assertThat(udf.cot((Long) null), is(nullValue())); assertThat(udf.cot((Double) null), is(nullValue())); }
public void remove() { int index = this.model.getModelObjects().indexOf( this.model.getSelectedItem() ); if ( index >= 1 ) { index -= 1; } this.model.getModelObjects().remove( this.model.getSelectedItem() ); if ( !model.getModelObjects().isEmpty() ) { this.model.setSelectedItem( model.getModelObjects().get( index ) ); } else { this.model.setSelectedItem( null ); } }
@Test public void testRemove() { controller.addProviders( providers ); controller.getModel().setSelectedItem( controller.getModel().getModelObjects().get( 0 ) ); controller.remove(); assertEquals( 7, controller.getModel().getModelObjects().size() ); }
public static Optional<ParsedMetricName> parseMetricName(String metricName) { if (metricName.isEmpty()) { return Optional.empty(); } List<String> metricNameSplit = Splitter.on(METRIC_NAME_DELIMITER).limit(2).splitToList(metricName); if (metricNameSplit.size() == 0 || metricNameSplit.get(0).isEmpty()) { return Optional.empty(); } if (metricNameSplit.size() == 1) { return Optional.of(ParsedMetricName.create(metricNameSplit.get(0))); } Splitter.MapSplitter splitter = Splitter.on(LABEL_DELIMITER).omitEmptyStrings().withKeyValueSeparator(METRIC_KV_DELIMITER); try { Map<String, String> labels = splitter.split(metricNameSplit.get(1)); return Optional.of(ParsedMetricName.create(metricNameSplit.get(0), labels)); } catch (IllegalArgumentException e) { return Optional.of(ParsedMetricName.create(metricNameSplit.get(0))); } }
@Test public void testParseMetricName_malformedMetricLabels() { String metricName = "baseLabel*malformed_kv_pair;key2:val2;"; LabeledMetricNameUtils.ParsedMetricName expectedName = LabeledMetricNameUtils.ParsedMetricName.create("baseLabel"); Optional<LabeledMetricNameUtils.ParsedMetricName> parsedMetricName = LabeledMetricNameUtils.parseMetricName(metricName); assertThat(parsedMetricName.isPresent(), equalTo(true)); assertThat(parsedMetricName.get(), equalTo(expectedName)); }
public double weight() { double w = 0; if (user != null) { w += 1; } if (role != null) { w += 1; } if (planCpuCostRange != null) { w += 1; } if (planMemCostRange != null) { w += 1; } if (queryTypes != null && !queryTypes.isEmpty()) { w += 1 + 0.1 / queryTypes.size(); } if (sourceIp != null) { w += 1 + NetUtils.getCidrPrefixLength(sourceIp) / 64.0; } if (CollectionUtils.isNotEmpty(databaseIds)) { w += 10.0 * databaseIds.size(); } return w; }
@Test public void testWeight() { ResourceGroupClassifier classifier = new ResourceGroupClassifier(); for (int i = 0; i <= 32; i++) { classifier.setSourceIp("192.168.0.1/" + i); assertThat(classifier.weight()).isCloseTo(1 + i / 64., within(1e-5)); } }
public DirectGraph getGraph() { checkState(finalized, "Can't get a graph before the Pipeline has been completely traversed"); return DirectGraph.create( producers, viewWriters, perElementConsumers, rootTransforms, stepNames); }
@Test public void getStepNamesContainsAllTransforms() { PCollection<String> created = p.apply(Create.of("1", "2", "3")); PCollection<String> transformed = created.apply( ParDo.of( new DoFn<String, String>() { @ProcessElement public void processElement(DoFn<String, String>.ProcessContext c) throws Exception { c.output(Integer.toString(c.element().length())); } })); transformed.apply( new PTransform<PInput, PDone>() { @Override public PDone expand(PInput input) { return PDone.in(input.getPipeline()); } }); p.traverseTopologically(visitor); DirectGraph graph = visitor.getGraph(); // Step names are of the format "s#" such as "s0", "s1", ... int createdStepIndex = Integer.parseInt(graph.getStepName(graph.getProducer(created)).substring(1)); assertThat( graph.getStepName(graph.getProducer(transformed)), equalTo("s" + (createdStepIndex + 1))); // finished doesn't have a producer, because it's not a PValue. // TODO: Demonstrate that PCollectionList/Tuple and other composite PValues are either safe to // use, or make them so. }
Interned() {}
@Test public void interned() { var node = new Interned<Object, Boolean>(new WeakReference<>(null)); assertThat(node.getValue()).isTrue(); assertThat(node.isRetired()).isFalse(); assertThat(node.getValueReference()).isTrue(); node.retire(); assertThat(node.isRetired()).isTrue(); node.die(); assertThat(node.isDead()).isTrue(); }
protected boolean isAssumeIdentity(ConnectionReference conn) { return isAnonymousAccessAllowed() || (isSystemConnection(conn) && !isVmConnectionAuthenticationRequired()); }
@Test public void testIsAssumeIdentityWithSystemConnection() { ConnectionContext ctx = new ConnectionContext(); Connection connection = new Connection() { private final long connectedTimestamp = System.currentTimeMillis(); @Override public Connector getConnector() { return null; //To change body of implemented methods use File | Settings | File Templates. } @Override public void dispatchSync(Command message) { //To change body of implemented methods use File | Settings | File Templates. } @Override public void dispatchAsync(Command command) { //To change body of implemented methods use File | Settings | File Templates. } @Override public Response service(Command command) { return null; //To change body of implemented methods use File | Settings | File Templates. } @Override public void serviceException(Throwable error) { //To change body of implemented methods use File | Settings | File Templates. } @Override public boolean isSlow() { return false; //To change body of implemented methods use File | Settings | File Templates. } @Override public boolean isBlocked() { return false; //To change body of implemented methods use File | Settings | File Templates. } @Override public boolean isConnected() { return false; //To change body of implemented methods use File | Settings | File Templates. } @Override public boolean isActive() { return false; //To change body of implemented methods use File | Settings | File Templates. } @Override public int getDispatchQueueSize() { return 0; //To change body of implemented methods use File | Settings | File Templates. } @Override public ConnectionStatistics getStatistics() { return null; //To change body of implemented methods use File | Settings | File Templates. } @Override public boolean isManageable() { return false; //To change body of implemented methods use File | Settings | File Templates. } @Override public String getRemoteAddress() { return "vm://localhost"; } @Override public void serviceExceptionAsync(IOException e) { //To change body of implemented methods use File | Settings | File Templates. } @Override public String getConnectionId() { return null; //To change body of implemented methods use File | Settings | File Templates. } @Override public boolean isNetworkConnection() { return false; //To change body of implemented methods use File | Settings | File Templates. } @Override public boolean isFaultTolerantConnection() { return false; //To change body of implemented methods use File | Settings | File Templates. } @Override public void updateClient(ConnectionControl control) { //To change body of implemented methods use File | Settings | File Templates. } @Override public void start() throws Exception { //To change body of implemented methods use File | Settings | File Templates. } @Override public void stop() throws Exception { //To change body of implemented methods use File | Settings | File Templates. } @Override public int getActiveTransactionCount() { return 0; //To change body of implemented methods use File | Settings | File Templates. } @Override public Long getOldestActiveTransactionDuration() { return null; //To change body of implemented methods use File | Settings | File Templates. } @Override public Long getConnectedTimestamp() { return connectedTimestamp; } }; ctx.setConnection(connection); SubjectConnectionReference sc = new SubjectConnectionReference(ctx, new ConnectionInfo(), new DefaultEnvironment(), new SubjectAdapter()); assertTrue(policy.isAssumeIdentity(sc)); }
public ApolloAuditScope startActiveSpan(OpType type, String name, String description) { ApolloAuditSpan startSpan = startSpan(type, name, description); return activate(startSpan); }
@Test public void testStartActiveSpan() { ApolloAuditSpan activeSpan = Mockito.mock(ApolloAuditSpan.class); { doReturn(activeSpan).when(tracer).startSpan(Mockito.eq(opType), Mockito.eq(opName), Mockito.eq(description)); } tracer.startActiveSpan(opType, opName, description); Mockito.verify(tracer, Mockito.times(1)) .startSpan(Mockito.eq(opType), Mockito.eq(opName), Mockito.eq(description)); Mockito.verify(manager, times(1)) .activate(Mockito.eq(activeSpan)); }
@Udf(description = "Splits a string into an array of substrings based on a delimiter.") public List<String> split( @UdfParameter( description = "The string to be split. If NULL, then function returns NULL.") final String string, @UdfParameter( description = "The delimiter to split a string by. If NULL, then function returns NULL.") final String delimiter) { if (string == null || delimiter == null) { return null; } // Java split() accepts regular expressions as a delimiter, but the behavior of this UDF split() // is to accept only literal strings. This method uses Guava Splitter instead, which does not // accept any regex pattern. This is to avoid a confusion to users when splitting by regex // special characters, such as '.' and '|'. try { // Guava Splitter does not accept empty delimiters. Use the Java split() method instead. if (delimiter.isEmpty()) { return Arrays.asList(EMPTY_DELIMITER.split(string)); } else { return Splitter.on(delimiter).splitToList(string); } } catch (final Exception e) { throw new KsqlFunctionException( String.format("Invalid delimiter '%s' in the split() function.", delimiter), e); } }
@Test public void shouldSplitAndAddEmptySpacesIfDelimiterStringIsFoundAtTheBeginningOrEnd() { assertThat(splitUdf.split("$A", "$"), contains("", "A")); assertThat(splitUdf.split("$A$B", "$"), contains("", "A", "B")); assertThat(splitUdf.split("A$", "$"), contains("A", "")); assertThat(splitUdf.split("A$B$", "$"), contains("A", "B", "")); assertThat(splitUdf.split("$A$B$", "$"), contains("", "A", "B", "")); }
public static ByteBuf wrappedBuffer(byte[] array) { if (array.length == 0) { return EMPTY_BUFFER; } return new UnpooledHeapByteBuf(ALLOC, array, array.length); }
@Test public void shouldAllowEmptyBufferToCreateCompositeBuffer() { ByteBuf buf = wrappedBuffer( EMPTY_BUFFER, wrappedBuffer(new byte[16]).order(LITTLE_ENDIAN), EMPTY_BUFFER); try { assertEquals(16, buf.capacity()); } finally { buf.release(); } }
@Override public void onPartitionsRevoked(final Collection<TopicPartition> partitions) { log.debug("Current state {}: revoked partitions {} because of consumer rebalance.\n" + "\tcurrently assigned active tasks: {}\n" + "\tcurrently assigned standby tasks: {}\n", streamThread.state(), partitions, taskManager.activeTaskIds(), taskManager.standbyTaskIds()); // We need to still invoke handleRevocation if the thread has been told to shut down, but we shouldn't ever // transition away from PENDING_SHUTDOWN once it's been initiated (to anything other than DEAD) if ((streamThread.setState(State.PARTITIONS_REVOKED) != null || streamThread.state() == State.PENDING_SHUTDOWN) && !partitions.isEmpty()) { final long start = time.milliseconds(); try { taskManager.handleRevocation(partitions); } finally { log.info("partition revocation took {} ms.", time.milliseconds() - start); } } }
@Test public void shouldHandleRevokedPartitions() { final Collection<TopicPartition> partitions = Collections.singletonList(new TopicPartition("topic", 0)); when(streamThread.setState(State.PARTITIONS_REVOKED)).thenReturn(State.RUNNING); streamsRebalanceListener.onPartitionsRevoked(partitions); verify(taskManager).handleRevocation(partitions); }
public void deleteUser(final User user) { if (provider.isReadOnly()) { throw new UnsupportedOperationException("User provider is read-only."); } final String username = user.getUsername(); // Make sure that the username is valid. try { /*username =*/ Stringprep.nodeprep(username); } catch (final StringprepException se) { throw new IllegalArgumentException("Invalid username: " + username, se); } // Fire event. final Map<String,Object> params = Collections.emptyMap(); UserEventDispatcher.dispatchEvent(user, UserEventDispatcher.EventType.user_deleting, params); provider.deleteUser(user.getUsername()); // Remove the user from cache. userCache.remove(user.getUsername()); }
@Test public void deleteInvalidUserWillGetError() throws Exception{ User user = new User("!@#ED",null,null,null,null); assertThrows(IllegalArgumentException.class, () -> userManager.deleteUser(user)); }
public static List<Integer> changedLines(String oldText, String newText) { if (oldText == null || oldText.equals(newText)) { return Collections.emptyList(); } List<Integer> changed = new ArrayList<>(); String[] oldLines = oldText.split("\n"); String[] newLines = newText.split("\n"); for (int i = 0; i < newLines.length; i++) { String newLine = newLines[i]; String oldLine = i < oldLines.length ? oldLines[i] : null; if (oldLine == null) { changed.add(i); } else if (!newLine.equals(oldLine)) { changed.add(i); } } return changed; }
@Test public void testChangedLines() { String oldText = "Hello\nWorld\nHow are you"; String newText = "Hello\nWorld\nHow are you"; List<Integer> changed = StringHelper.changedLines(oldText, newText); assertEquals(0, changed.size()); oldText = "Hello\nWorld\nHow are you"; newText = "Hello\nWorld\nHow are you today"; changed = StringHelper.changedLines(oldText, newText); assertEquals(1, changed.size()); assertEquals(2, changed.get(0).intValue()); oldText = "Hello\nWorld\nHow are you"; newText = "Hello\nCamel\nHow are you today"; changed = StringHelper.changedLines(oldText, newText); assertEquals(2, changed.size()); assertEquals(1, changed.get(0).intValue()); assertEquals(2, changed.get(1).intValue()); oldText = "Hello\nWorld\nHow are you"; newText = "Hello\nWorld\nHow are you today\nand tomorrow"; changed = StringHelper.changedLines(oldText, newText); assertEquals(2, changed.size()); assertEquals(2, changed.get(0).intValue()); assertEquals(3, changed.get(1).intValue()); }
public boolean promptForSave() throws KettleException { List<TabMapEntry> list = delegates.tabs.getTabs(); for ( TabMapEntry mapEntry : list ) { TabItemInterface itemInterface = mapEntry.getObject(); if ( !itemInterface.canBeClosed() ) { // Show the tab tabfolder.setSelected( mapEntry.getTabItem() ); // Unsaved work that needs to changes to be applied? // int reply = itemInterface.showChangedWarning(); if ( reply == SWT.YES ) { itemInterface.applyChanges(); } else if ( reply == SWT.CANCEL ) { return false; } } } return true; }
@Test public void testCanClosePromptToSave() throws Exception { setPromptToSave( SWT.YES, true ); assertTrue( spoon.promptForSave() ); }
public void close() { try { ioExecutor.shutdown(); if (!ioExecutor.awaitTermination(5L, TimeUnit.MINUTES)) { throw new TimeoutException("Shutdown spilling thread timeout."); } dataFileChannel.close(); } catch (Exception e) { ExceptionUtils.rethrow(e); } }
@Test void testClose() throws Exception { memoryDataSpiller = createMemoryDataSpiller(dataFilePath); List<BufferWithIdentity> bufferWithIdentityList = new ArrayList<>( createBufferWithIdentityList( false, 0, Arrays.asList(Tuple2.of(0, 0), Tuple2.of(1, 1), Tuple2.of(2, 2)))); memoryDataSpiller.spillAsync(bufferWithIdentityList); // blocked until spill finished. memoryDataSpiller.close(); checkData(false, Arrays.asList(Tuple2.of(0, 0), Tuple2.of(1, 1), Tuple2.of(2, 2))); assertThatThrownBy(() -> memoryDataSpiller.spillAsync(bufferWithIdentityList)) .isInstanceOf(RejectedExecutionException.class); }
static void checkValidTableName(String nameToCheck) { if (nameToCheck.length() < MIN_TABLE_ID_LENGTH) { throw new IllegalArgumentException("Table name cannot be empty. "); } if (nameToCheck.length() > MAX_TABLE_ID_LENGTH) { throw new IllegalArgumentException( "Table name " + nameToCheck + " cannot be longer than " + MAX_TABLE_ID_LENGTH + " characters."); } if (ILLEGAL_TABLE_CHARS.matcher(nameToCheck).find()) { throw new IllegalArgumentException( "Table name " + nameToCheck + " is not a valid name. Periods and forward slashes are not allowed."); } }
@Test public void testCheckValidTableNameThrowsErrorWhenContainsPeriod() { assertThrows(IllegalArgumentException.class, () -> checkValidTableName("table.name")); }
int[] sortEncodedSet(int[] encodedSet, int validIndex) { int[] result = new int[validIndex]; for (int i = 0; i < validIndex; ++i) { result[i] = transformToSortRepresentation(encodedSet[i]); } Arrays.sort(result); for (int i = 0; i < validIndex; ++i) { result[i] = transformFromSortRepresentation(result[i]); } return result; }
@Test public void testSortEncodedSet() { int[] testSet = new int[3]; testSet[0] = 655403; testSet[1] = 655416; testSet[2] = 655425; HyperLogLogPlus hyperLogLogPlus = new HyperLogLogPlus(14, 25); testSet = hyperLogLogPlus.sortEncodedSet(testSet, 3); assertEquals(655403, testSet[0]); assertEquals(655425, testSet[1]); assertEquals(655416, testSet[2]); }
@Override public void insertConfigHistoryAtomic(long configHistoryId, ConfigInfo configInfo, String srcIp, String srcUser, final Timestamp time, String ops) { String appNameTmp = StringUtils.defaultEmptyIfBlank(configInfo.getAppName()); String tenantTmp = StringUtils.defaultEmptyIfBlank(configInfo.getTenant()); final String md5Tmp = MD5Utils.md5Hex(configInfo.getContent(), Constants.ENCODE); String encryptedDataKey = StringUtils.defaultEmptyIfBlank(configInfo.getEncryptedDataKey()); HistoryConfigInfoMapper historyConfigInfoMapper = mapperManager.findMapper( dataSourceService.getDataSourceType(), TableConstant.HIS_CONFIG_INFO); final String sql = historyConfigInfoMapper.insert( Arrays.asList("id", "data_id", "group_id", "tenant_id", "app_name", "content", "md5", "src_ip", "src_user", "gmt_modified", "op_type", "encrypted_data_key")); final Object[] args = new Object[] {configHistoryId, configInfo.getDataId(), configInfo.getGroup(), tenantTmp, appNameTmp, configInfo.getContent(), md5Tmp, srcIp, srcUser, time, ops, encryptedDataKey}; EmbeddedStorageContextHolder.addSqlContext(sql, args); }
@Test void testInsertConfigHistoryAtomic() { String dataId = "dateId243"; String group = "group243"; String tenant = "tenant243"; String content = "content243"; String appName = "appName243"; long id = 123456787765432L; String srcUser = "user12345"; String srcIp = "ip1234"; String ops = "D"; Timestamp timestamp = new Timestamp(System.currentTimeMillis()); ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content); configInfo.setEncryptedDataKey("key23456"); //expect insert success,verify insert invoked embeddedHistoryConfigInfoPersistService.insertConfigHistoryAtomic(id, configInfo, srcIp, srcUser, timestamp, ops); //verify insert to be invoked embeddedStorageContextHolderMockedStatic.verify( () -> EmbeddedStorageContextHolder.addSqlContext(anyString(), eq(id), eq(dataId), eq(group), eq(tenant), eq(appName), eq(content), eq(configInfo.getMd5()), eq(srcIp), eq(srcUser), eq(timestamp), eq(ops), eq(configInfo.getEncryptedDataKey())), times(1)); }
void printStats(PrintStream out) { printNonZeroResultScenarios(out); if (stepSubCounts.getTotal() == 0) { out.println("0 Scenarios"); out.println("0 Steps"); } else { printScenarioCounts(out); printStepCounts(out); } printDuration(out); }
@Test void should_print_zero_scenarios_zero_steps_if_nothing_has_executed() { Stats counter = createMonochromeSummaryCounter(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); counter.printStats(new PrintStream(baos)); assertThat(baos.toString(), startsWith(String.format( "0 Scenarios%n" + "0 Steps%n"))); }
public LogoutRequestModel parseLogoutRequest(HttpServletRequest request) throws SamlValidationException, SamlParseException, SamlSessionException, DienstencatalogusException { final LogoutRequestModel logoutRequestModel = new LogoutRequestModel(); try { final BaseHttpServletRequestXMLMessageDecoder decoder = decodeRequest(request); var logoutRequest = (LogoutRequest) decoder.getMessageContext().getMessage(); final SAMLBindingContext bindingContext = decoder.getMessageContext().getSubcontext(SAMLBindingContext.class); logoutRequestModel.setLogoutRequest(logoutRequest); logoutRequestModel.setRequest(request); validateRequest(logoutRequestModel); var id = logoutRequest.getNameID() != null ? logoutRequest.getNameID().getValue() : logoutRequest.getSessionIndexes().get(0).getValue(); var samlSession = samlSessionRepository.findById(id) .orElseThrow(() -> new SamlSessionException("LogoutRequest no saml session found for nameID: " + id)); logoutRequestModel.setConnectionEntityId(samlSession.getConnectionEntityId()); logoutRequestModel.setServiceEntityId(samlSession.getServiceEntityId()); logoutRequestModel.setServiceUuid(samlSession.getServiceUuid()); logoutRequestModel.setRelayState(bindingContext.getRelayState()); logoutRequestModel.setEntranceSession(samlSession.getProtocolType().equals(ProtocolType.SAML_COMBICONNECT)); dcMetadataService.resolveDcMetadata(logoutRequestModel); if (!logoutRequestModel.getConnectionEntityId().equals(logoutRequestModel.getLogoutRequest().getIssuer().getValue())) { throw new SamlValidationException("Issuer not equal to connectorEntityId"); } verifySignature(logoutRequestModel, logoutRequestModel.getLogoutRequest().getSignature()); logout(samlSession); if (logger.isDebugEnabled()) OpenSAMLUtils.logSAMLObject((LogoutRequest) decoder.getMessageContext().getMessage()); } catch (MessageDecodingException e) { throw new SamlParseException("Authentication deflate decode exception", e); } catch (ComponentInitializationException e) { throw new SamlParseException("Authentication deflate initialization exception", e); } return logoutRequestModel; }
@Test public void parseLogoutRequestNoSignature() { httpRequestMock.setParameter("SAMLRequest", "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS"); Exception exception = assertThrows(SamlValidationException.class, () -> logoutService.parseLogoutRequest(httpRequestMock)); assertEquals("LogoutRequest validation error", exception.getMessage()); }
public T pollFirst() { if (head == null) { return null; } T first = head.element; this.remove(first); return first; }
@Test public void testPollOneElement() { LOG.info("Test poll one element"); set.add(list.get(0)); assertEquals(list.get(0), set.pollFirst()); assertNull(set.pollFirst()); LOG.info("Test poll one element - DONE"); }
static QueryId buildId( final Statement statement, final EngineContext engineContext, final QueryIdGenerator idGenerator, final OutputNode outputNode, final boolean createOrReplaceEnabled, final Optional<String> withQueryId) { if (withQueryId.isPresent()) { final String queryId = withQueryId.get().toUpperCase(); validateWithQueryId(queryId); return new QueryId(queryId); } if (statement instanceof CreateTable && ((CreateTable) statement).isSource()) { // Use the CST name as part of the QueryID final String suffix = ((CreateTable) statement).getName().text().toUpperCase() + "_" + idGenerator.getNext().toUpperCase(); return new QueryId(ReservedQueryIdsPrefixes.CST + suffix); } if (!outputNode.getSinkName().isPresent()) { final String prefix = "transient_" + outputNode.getSource().getLeftmostSourceNode().getAlias().text() + "_"; return new QueryId(prefix + Math.abs(ThreadLocalRandom.current().nextLong())); } final KsqlStructuredDataOutputNode structured = (KsqlStructuredDataOutputNode) outputNode; if (!structured.createInto()) { return new QueryId(ReservedQueryIdsPrefixes.INSERT + idGenerator.getNext()); } final SourceName sink = outputNode.getSinkName().get(); final Set<QueryId> queriesForSink = engineContext.getQueryRegistry().getQueriesWithSink(sink); if (queriesForSink.size() > 1) { throw new KsqlException("REPLACE for sink " + sink + " is not supported because there are " + "multiple queries writing into it: " + queriesForSink); } else if (!queriesForSink.isEmpty()) { if (!createOrReplaceEnabled) { final String type = outputNode.getNodeOutputType().getKsqlType().toLowerCase(); throw new UnsupportedOperationException( String.format( "Cannot add %s '%s': A %s with the same name already exists", type, sink.text(), type)); } return Iterables.getOnlyElement(queriesForSink); } final String suffix = outputNode.getId().toString().toUpperCase() + "_" + idGenerator.getNext().toUpperCase(); return new QueryId( outputNode.getNodeOutputType() == DataSourceType.KTABLE ? ReservedQueryIdsPrefixes.CTAS + suffix : ReservedQueryIdsPrefixes.CSAS + suffix ); }
@Test public void shouldReturnWithQueryIdInUppercase(){ // When: final QueryId queryId = QueryIdUtil.buildId(statement, engineContext, idGenerator, plan, false, Optional.of("my_query_id")); // Then: assertThat(queryId, is(new QueryId("MY_QUERY_ID"))); }
@VisibleForTesting void validateParentDept(Long id, Long parentId) { if (parentId == null || DeptDO.PARENT_ID_ROOT.equals(parentId)) { return; } // 1. 不能设置自己为父部门 if (Objects.equals(id, parentId)) { throw exception(DEPT_PARENT_ERROR); } // 2. 父部门不存在 DeptDO parentDept = deptMapper.selectById(parentId); if (parentDept == null) { throw exception(DEPT_PARENT_NOT_EXITS); } // 3. 递归校验父部门,如果父部门是自己的子部门,则报错,避免形成环路 if (id == null) { // id 为空,说明新增,不需要考虑环路 return; } for (int i = 0; i < Short.MAX_VALUE; i++) { // 3.1 校验环路 parentId = parentDept.getParentId(); if (Objects.equals(id, parentId)) { throw exception(DEPT_PARENT_IS_CHILD); } // 3.2 继续递归下一级父部门 if (parentId == null || DeptDO.PARENT_ID_ROOT.equals(parentId)) { break; } parentDept = deptMapper.selectById(parentId); if (parentDept == null) { break; } } }
@Test public void testValidateParentDept_parentError() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> deptService.validateParentDept(id, id), DEPT_PARENT_ERROR); }
static UWildcard create(Kind kind, @Nullable UTree<?> bound) { checkArgument(BOUND_KINDS.containsKey(kind)); // verify bound is null iff kind is UNBOUNDED_WILDCARD checkArgument((bound == null) == (kind == Kind.UNBOUNDED_WILDCARD)); return new AutoValue_UWildcard(kind, bound); }
@Test public void equality() { UExpression objectIdent = UClassIdent.create("java.lang.Object"); UExpression setIdent = UTypeApply.create("java.util.Set", objectIdent); new EqualsTester() .addEqualityGroup(UWildcard.create(Kind.UNBOUNDED_WILDCARD, null)) .addEqualityGroup(UWildcard.create(Kind.EXTENDS_WILDCARD, objectIdent)) .addEqualityGroup(UWildcard.create(Kind.EXTENDS_WILDCARD, setIdent)) // ? extends Set<Object> .addEqualityGroup(UWildcard.create(Kind.SUPER_WILDCARD, setIdent)) // ? super Set<Object> .testEquals(); }
@Override public <VOut> CogroupedKStream<K, VOut> cogroup(final Aggregator<? super K, ? super V, VOut> aggregator) { Objects.requireNonNull(aggregator, "aggregator can't be null"); return new CogroupedKStreamImpl<K, VOut>(name, subTopologySourceNodes, graphNode, builder) .cogroup(this, aggregator); }
@Test public void shouldNotHaveNullAggregatorOnCogroup() { assertThrows(NullPointerException.class, () -> groupedStream.cogroup(null)); }
public static String s3aToS3(String s3aUrl) { return s3aUrl.replaceFirst("(?i)^s3a://", "s3://"); }
@Test void testS3aToS3_AWS() { // Test cases for AWS S3 URLs assertEquals("s3://my-bucket/path/to/object", S3Utils.s3aToS3("s3a://my-bucket/path/to/object")); assertEquals("s3://my-bucket", S3Utils.s3aToS3("s3a://my-bucket")); assertEquals("s3://MY-BUCKET/PATH/TO/OBJECT", S3Utils.s3aToS3("s3a://MY-BUCKET/PATH/TO/OBJECT")); assertEquals("s3://my-bucket/path/to/object", S3Utils.s3aToS3("S3a://my-bucket/path/to/object")); assertEquals("s3://my-bucket/path/to/object", S3Utils.s3aToS3("s3A://my-bucket/path/to/object")); assertEquals("s3://my-bucket/path/to/object", S3Utils.s3aToS3("S3A://my-bucket/path/to/object")); assertEquals("s3://my-bucket/s3a://another-bucket/another/path", S3Utils.s3aToS3("s3a://my-bucket/s3a://another-bucket/another/path")); }
@Override public Output run(RunContext runContext) throws Exception { LogService logService = ((DefaultRunContext)runContext).getApplicationContext().getBean(LogService.class); FlowService flowService = ((DefaultRunContext)runContext).getApplicationContext().getBean(FlowService.class); // validate that this namespace is authorized on the target namespace / all namespaces var flowInfo = runContext.flowInfo(); if (namespace == null){ flowService.checkAllowedAllNamespaces(flowInfo.tenantId(), flowInfo.tenantId(), flowInfo.namespace()); } else if (!runContext.render(namespace).equals(flowInfo.namespace())) { flowService.checkAllowedNamespace(flowInfo.tenantId(), runContext.render(namespace), flowInfo.tenantId(), flowInfo.namespace()); } int deleted = logService.purge( flowInfo.tenantId(), runContext.render(namespace), runContext.render(flowId), logLevels, startDate != null ? ZonedDateTime.parse(runContext.render(startDate)) : null, ZonedDateTime.parse(runContext.render(endDate)) ); return Output.builder().count(deleted).build(); }
@Test void run() throws Exception { // create an execution to delete var logEntry = LogEntry.builder() .namespace("namespace") .flowId("flowId") .timestamp(Instant.now()) .level(Level.INFO) .message("Hello World") .build(); logRepository.save(logEntry); var purge = PurgeLogs.builder() .endDate(ZonedDateTime.now().plusMinutes(1).format(DateTimeFormatter.ISO_ZONED_DATE_TIME)) .build(); var runContext = runContextFactory.of(Map.of("flow", Map.of("namespace", "namespace", "id", "flowId"))); var output = purge.run(runContext); assertThat(output.getCount(), is(1)); }
public boolean isAnyServiceErrorListDefined() { return _serviceErrors != null || _resourceMethodDescriptors.stream() .map(ResourceMethodDescriptor::getServiceErrors) .anyMatch(Objects::nonNull); }
@Test(dataProvider = "isAnyServiceErrorListDefinedData") public void testIsAnyServiceErrorListDefined(ServiceError[] resourceLevelServiceErrors, ResourceMethodDescriptor[] resourceMethodDescriptors, boolean expected) { // Create dummy resource model final ResourceModel resourceModel = new ResourceModel(EmptyRecord.class, SampleResources.CollectionCollectionResource.class, null, "collectionCollection", ResourceType.COLLECTION, "com.linkedin.restli.internal.server.model", "collectionCollection"); // Add resource-level service errors if (resourceLevelServiceErrors == null) { resourceModel.setServiceErrors(null); } else { resourceModel.setServiceErrors(Arrays.asList(resourceLevelServiceErrors)); } // Add mock resource method descriptors for (ResourceMethodDescriptor resourceMethodDescriptor : resourceMethodDescriptors) { resourceModel.addResourceMethodDescriptor(resourceMethodDescriptor); } Assert.assertEquals(expected, resourceModel.isAnyServiceErrorListDefined(), "Cannot correctly compute whether resource model defines resource-level or method-level service errors."); }
public static boolean parseBoolean(String bool, boolean defaultInt) { if (bool == null) { return defaultInt; } else { return Boolean.parseBoolean(bool); } }
@Test public void parseBoolean() { Assert.assertTrue(CommonUtils.parseBoolean(null, true)); Assert.assertTrue(CommonUtils.parseBoolean("true", true)); Assert.assertFalse(CommonUtils.parseBoolean("falSE", true)); Assert.assertFalse(CommonUtils.parseBoolean("xxx", true)); Assert.assertFalse(CommonUtils.parseBoolean(null, false)); Assert.assertTrue(CommonUtils.parseBoolean("trUe", false)); Assert.assertFalse(CommonUtils.parseBoolean("falSE", false)); Assert.assertFalse(CommonUtils.parseBoolean("xxx", false)); }
public static boolean setProperty(Object target, String name, Object value) { try { Class<?> clazz = target.getClass(); if (target instanceof SSLServerSocket) { // overcome illegal access issues with internal implementation class clazz = SSLServerSocket.class; } else if (target instanceof javax.net.ssl.SSLSocket) { // overcome illegal access issues with internal implementation class clazz = javax.net.ssl.SSLSocket.class; } Method setter = findSetterMethod(clazz, name); if (setter == null) { return false; } // JDK 11: class or setter might not be publicly accessible setter.setAccessible(true); // If the type is null or it matches the needed type, just use the // value directly if (value == null || value.getClass() == setter.getParameterTypes()[0]) { setter.invoke(target, value); } else { // We need to convert it setter.invoke(target, convert(value, setter.getParameterTypes()[0])); } return true; } catch (Exception e) { LOG.error(String.format("Could not set property %s on %s", name, target), e); return false; } }
@Test public void testSetPropertyPrimitiveWithWrapperValue() { // Wrapper value Boolean value = Boolean.TRUE; DummyClass dummyClass = new DummyClass(false); dummyClass.setTrace(false); // dummy field expects a primitive IntrospectionSupport.setProperty(dummyClass, "trace", value); assertTrue(dummyClass.isTrace()); }
public NodeMetricsDbMaintainer(NodeRepository nodeRepository, MetricsFetcher metricsFetcher, Duration interval, Metric metric) { super(nodeRepository, interval, metric, false); // No locking because this not modify shared state this.metricsFetcher = metricsFetcher; }
@Test public void testNodeMetricsDbMaintainer() { NodeResources resources = new NodeResources(1, 10, 100, 1); ProvisioningTester tester = new ProvisioningTester.Builder().build(); tester.clock().setInstant(Instant.ofEpochMilli(1400)); tester.makeReadyNodes(2, resources); tester.advanceTime(Duration.ofMinutes(5)); // Make sure these are not considered new nodes (metrics will not be fetched for them) tester.activateTenantHosts(); tester.deploy(ProvisioningTester.applicationId("test"), Capacity.from(new ClusterResources(2, 1, resources))); OrchestratorMock orchestrator = new OrchestratorMock(); MockHttpClient httpClient = new MockHttpClient(); MetricsV2MetricsFetcher fetcher = new MetricsV2MetricsFetcher(tester.nodeRepository(), orchestrator, httpClient); NodeMetricsDbMaintainer maintainer = new NodeMetricsDbMaintainer(tester.nodeRepository(), fetcher, Duration.ofHours(1), new TestMetric()); assertEquals(maintainer.maintain(), 0.0, 0.0000001); List<NodeTimeseries> timeseriesList = tester.nodeRepository().metricsDb().getNodeTimeseries(Duration.ofDays(1), Set.of("host-1.yahoo.com", "host-2.yahoo.com")); assertEquals(2, timeseriesList.size()); List<NodeMetricSnapshot> allSnapshots = timeseriesList.stream() .flatMap(timeseries -> timeseries.asList().stream()) .toList(); assertTrue(allSnapshots.stream().anyMatch(snapshot -> snapshot.inService())); assertTrue(allSnapshots.stream().anyMatch(snapshot -> ! snapshot.inService())); }
public static byte[] shortToBytesLE(int i, byte[] bytes, int off) { bytes[off + 1] = (byte) (i >> 8); bytes[off] = (byte) i; return bytes; }
@Test public void testShortToBytesLE() { assertArrayEquals(SHORT_12345_LE, ByteUtils.shortToBytesLE(-12345, new byte[2] , 0)); }
public void add(int index, NODE element) { throw e; }
@Test void require_that_add_throws_exception() { assertThrows(NodeVector.ReadOnlyException.class, () -> new TestNodeVector("foo").add(barNode())); }
@Override protected void doStop() throws Exception { super.doStop(); int openRequestsAtStop = openRequests.get(); log.debug("Stopping with {} open requests", openRequestsAtStop); if (openRequestsAtStop > 0) { log.warn("There are still {} open requests", openRequestsAtStop); } }
@Test public void doStop() throws Exception { sut.doStop(); }
@Override public CompletableFuture<List<DescribeGroupsResponseData.DescribedGroup>> describeGroups( RequestContext context, List<String> groupIds ) { if (!isActive.get()) { return CompletableFuture.completedFuture(DescribeGroupsRequest.getErrorDescribedGroupList( groupIds, Errors.COORDINATOR_NOT_AVAILABLE )); } final List<CompletableFuture<List<DescribeGroupsResponseData.DescribedGroup>>> futures = new ArrayList<>(groupIds.size()); final Map<TopicPartition, List<String>> groupsByTopicPartition = new HashMap<>(); groupIds.forEach(groupId -> { // For backwards compatibility, we support DescribeGroups for the empty group id. if (groupId == null) { futures.add(CompletableFuture.completedFuture(Collections.singletonList( new DescribeGroupsResponseData.DescribedGroup() .setGroupId(null) .setErrorCode(Errors.INVALID_GROUP_ID.code()) ))); } else { final TopicPartition topicPartition = topicPartitionFor(groupId); groupsByTopicPartition .computeIfAbsent(topicPartition, __ -> new ArrayList<>()) .add(groupId); } }); groupsByTopicPartition.forEach((topicPartition, groupList) -> { CompletableFuture<List<DescribeGroupsResponseData.DescribedGroup>> future = runtime.scheduleReadOperation( "describe-groups", topicPartition, (coordinator, lastCommittedOffset) -> coordinator.describeGroups(context, groupList, lastCommittedOffset) ).exceptionally(exception -> handleOperationException( "describe-groups", groupList, exception, (error, __) -> DescribeGroupsRequest.getErrorDescribedGroupList(groupList, error) )); futures.add(future); }); return FutureUtils.combineFutures(futures, ArrayList::new, List::addAll); }
@Test public void testDescribeGroups() throws Exception { CoordinatorRuntime<GroupCoordinatorShard, CoordinatorRecord> runtime = mockRuntime(); GroupCoordinatorService service = new GroupCoordinatorService( new LogContext(), createConfig(), runtime, mock(GroupCoordinatorMetrics.class), createConfigManager() ); int partitionCount = 2; service.startup(() -> partitionCount); DescribeGroupsResponseData.DescribedGroup describedGroup1 = new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-id-1"); DescribeGroupsResponseData.DescribedGroup describedGroup2 = new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-id-2"); List<DescribeGroupsResponseData.DescribedGroup> expectedDescribedGroups = Arrays.asList( describedGroup1, describedGroup2 ); when(runtime.scheduleReadOperation( ArgumentMatchers.eq("describe-groups"), ArgumentMatchers.eq(new TopicPartition("__consumer_offsets", 0)), ArgumentMatchers.any() )).thenReturn(CompletableFuture.completedFuture(Collections.singletonList(describedGroup1))); CompletableFuture<Object> describedGroupFuture = new CompletableFuture<>(); when(runtime.scheduleReadOperation( ArgumentMatchers.eq("describe-groups"), ArgumentMatchers.eq(new TopicPartition("__consumer_offsets", 1)), ArgumentMatchers.any() )).thenReturn(describedGroupFuture); CompletableFuture<List<DescribeGroupsResponseData.DescribedGroup>> future = service.describeGroups(requestContext(ApiKeys.DESCRIBE_GROUPS), Arrays.asList("group-id-1", "group-id-2")); assertFalse(future.isDone()); describedGroupFuture.complete(Collections.singletonList(describedGroup2)); assertEquals(expectedDescribedGroups, future.get()); }
public static native int getOsxMemoryInfo(long[] totalAndAvailMem);
@Test @EnabledOnOs(OS.MAC) void testOsxMemory() { long[] mem = new long[2]; assertEquals(0, CLibrary.getOsxMemoryInfo(mem)); assertTrue(mem[0] > 1024, "Total: " + mem[0]); assertTrue(mem[1] > 1024, "Free: " + mem[1]); assertTrue(mem[1] < mem[0], "Free (" + mem[1] + ") < Total (" + mem[0] + ")"); }
public static void analyze(CreateTableStmt statement, ConnectContext context) { final TableName tableNameObject = statement.getDbTbl(); MetaUtils.normalizationTableName(context, tableNameObject); final String catalogName = tableNameObject.getCatalog(); MetaUtils.checkCatalogExistAndReport(catalogName); final String tableName = tableNameObject.getTbl(); FeNameFormat.checkTableName(tableName); Database db = MetaUtils.getDatabase(catalogName, tableNameObject.getDb()); if (statement instanceof CreateTemporaryTableStmt) { analyzeTemporaryTable(statement, context, catalogName, db, tableName); } else { if (db.getTable(tableName) != null && !statement.isSetIfNotExists()) { ErrorReport.reportSemanticException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName); } } analyzeEngineName(statement, catalogName); analyzeCharsetName(statement); preCheckColumnRef(statement); analyzeKeysDesc(statement); analyzeSortKeys(statement); analyzePartitionDesc(statement); analyzeDistributionDesc(statement); analyzeColumnRef(statement, catalogName); if (statement.isHasGeneratedColumn()) { analyzeGeneratedColumn(statement, context); } analyzeIndexDefs(statement); }
@Test public void testAnalyze() throws Exception { String sql = "CREATE TABLE test_create_table_db.starrocks_test_table\n" + "(\n" + " `tag_id` string,\n" + " `tag_name` string\n" + ") ENGINE = OLAP PRIMARY KEY(`id`)\n" + "DISTRIBUTED BY HASH(`id`)\n" + "ORDER BY(`id`)\n" + "PROPERTIES (\n" + "\"replication_num\" = \"1\",\n" + "\"in_memory\" = \"false\",\n" + "\"enable_persistent_index\" = \"true\",\n" + "\"replicated_storage\" = \"true\",\n" + "\"compression\" = \"LZ4\"\n" + ")\n"; expectedEx.expect(SemanticException.class); expectedEx.expectMessage("doesn't exist"); CreateTableStmt createTableStmt = (CreateTableStmt) com.starrocks.sql.parser.SqlParser .parse(sql, connectContext.getSessionVariable().getSqlMode()).get(0); CreateTableAnalyzer.analyze(createTableStmt, connectContext); }
protected void notifyConnected(Connection connection) { if (connectionEventListeners.isEmpty()) { return; } LoggerUtils.printIfInfoEnabled(LOGGER, "[{}] Notify connected event to listeners.", rpcClientConfig.name()); for (ConnectionEventListener connectionEventListener : connectionEventListeners) { try { connectionEventListener.onConnected(connection); } catch (Throwable throwable) { LoggerUtils.printIfErrorEnabled(LOGGER, "[{}] Notify connect listener error, listener = {}", rpcClientConfig.name(), connectionEventListener.getClass().getName()); } } }
@Test void testNotifyConnected() { ConnectionEventListener listener = mock(ConnectionEventListener.class); rpcClient.registerConnectionListener(listener); rpcClient.notifyConnected(null); verify(listener).onConnected(null); verify(rpcClientConfig, times(2)).name(); }
public MonitorBuilder password(String password) { this.password = password; return getThis(); }
@Test void password() { MonitorBuilder builder = MonitorBuilder.newBuilder(); builder.password("password"); Assertions.assertEquals("password", builder.build().getPassword()); }
public static List<AclEntry> mergeAclEntries(List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException { ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec); ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); List<AclEntry> foundAclSpecEntries = Lists.newArrayListWithCapacity(MAX_ENTRIES); EnumMap<AclEntryScope, AclEntry> providedMask = Maps.newEnumMap(AclEntryScope.class); EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class); EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class); for (AclEntry existingEntry: existingAcl) { AclEntry aclSpecEntry = aclSpec.findByKey(existingEntry); if (aclSpecEntry != null) { foundAclSpecEntries.add(aclSpecEntry); scopeDirty.add(aclSpecEntry.getScope()); if (aclSpecEntry.getType() == MASK) { providedMask.put(aclSpecEntry.getScope(), aclSpecEntry); maskDirty.add(aclSpecEntry.getScope()); } else { aclBuilder.add(aclSpecEntry); } } else { if (existingEntry.getType() == MASK) { providedMask.put(existingEntry.getScope(), existingEntry); } else { aclBuilder.add(existingEntry); } } } // ACL spec entries that were not replacements are new additions. for (AclEntry newEntry: aclSpec) { if (Collections.binarySearch(foundAclSpecEntries, newEntry, ACL_ENTRY_COMPARATOR) < 0) { scopeDirty.add(newEntry.getScope()); if (newEntry.getType() == MASK) { providedMask.put(newEntry.getScope(), newEntry); maskDirty.add(newEntry.getScope()); } else { aclBuilder.add(newEntry); } } } copyDefaultsIfNeeded(aclBuilder); calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); return buildAndValidateAcl(aclBuilder); }
@Test public void testMergeAclEntriesDefaultMaskPreserved() throws AclException { List<AclEntry> existing = new ImmutableList.Builder<AclEntry>() .add(aclEntry(ACCESS, USER, ALL)) .add(aclEntry(ACCESS, GROUP, READ)) .add(aclEntry(ACCESS, OTHER, READ)) .add(aclEntry(DEFAULT, USER, ALL)) .add(aclEntry(DEFAULT, USER, "diana", ALL)) .add(aclEntry(DEFAULT, GROUP, READ)) .add(aclEntry(DEFAULT, MASK, READ)) .add(aclEntry(DEFAULT, OTHER, NONE)) .build(); List<AclEntry> aclSpec = Lists.newArrayList( aclEntry(ACCESS, USER, "diana", FsAction.READ_EXECUTE)); List<AclEntry> expected = new ImmutableList.Builder<AclEntry>() .add(aclEntry(ACCESS, USER, ALL)) .add(aclEntry(ACCESS, USER, "diana", READ_EXECUTE)) .add(aclEntry(ACCESS, GROUP, READ)) .add(aclEntry(ACCESS, MASK, READ_EXECUTE)) .add(aclEntry(ACCESS, OTHER, READ)) .add(aclEntry(DEFAULT, USER, ALL)) .add(aclEntry(DEFAULT, USER, "diana", ALL)) .add(aclEntry(DEFAULT, GROUP, READ)) .add(aclEntry(DEFAULT, MASK, READ)) .add(aclEntry(DEFAULT, OTHER, NONE)) .build(); assertEquals(expected, mergeAclEntries(existing, aclSpec)); }
public static KeyFormat sanitizeKeyFormat( final KeyFormat keyFormat, final List<SqlType> newKeyColumnSqlTypes, final boolean allowKeyFormatChangeToSupportNewKeySchema ) { return sanitizeKeyFormatWrapping( !allowKeyFormatChangeToSupportNewKeySchema ? keyFormat : sanitizeKeyFormatForTypeCompatibility( sanitizeKeyFormatForMultipleColumns( keyFormat, newKeyColumnSqlTypes.size()), newKeyColumnSqlTypes ), newKeyColumnSqlTypes.size() == 1 ); }
@Test public void shouldRemoveUnapplicableKeyWrappingWhenSanitizingNoKeyCols() { // Given: final KeyFormat format = KeyFormat.nonWindowed( FormatInfo.of(JsonFormat.NAME), SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES)); // When: final KeyFormat sanitized = SerdeFeaturesFactory.sanitizeKeyFormat(format, Collections.emptyList(), true); // Then: assertThat(sanitized.getFormatInfo(), equalTo(FormatInfo.of(JsonFormat.NAME))); assertThat(sanitized.getFeatures(), equalTo(SerdeFeatures.of())); }
public static Schema create(Type type) { switch (type) { case STRING: return new StringSchema(); case BYTES: return new BytesSchema(); case INT: return new IntSchema(); case LONG: return new LongSchema(); case FLOAT: return new FloatSchema(); case DOUBLE: return new DoubleSchema(); case BOOLEAN: return new BooleanSchema(); case NULL: return new NullSchema(); default: throw new AvroRuntimeException("Can't create a: " + type); } }
@Test void doubleAsLongDefaultValue() { assertThrows(AvroTypeException.class, () -> { new Schema.Field("myField", Schema.create(Schema.Type.LONG), "doc", 1.0); }); }
@Subscribe public void onPostMenuSort(PostMenuSort postMenuSort) { // The menu is not rebuilt when it is open, so don't swap or else it will // repeatedly swap entries if (client.isMenuOpen()) { return; } MenuEntry[] menuEntries = client.getMenuEntries(); // Build option map for quick lookup in findIndex int idx = 0; optionIndexes.clear(); for (MenuEntry entry : menuEntries) { String option = Text.removeTags(entry.getOption()).toLowerCase(); optionIndexes.put(option, idx++); } // Perform swaps idx = 0; for (MenuEntry entry : menuEntries) { swapMenuEntry(null, menuEntries, idx++, entry); } if (config.removeDeadNpcMenus()) { removeDeadNpcs(); } }
@Test public void testShiftDeposit() { when(config.bankDepositShiftClick()).thenReturn(ShiftDepositMode.DEPOSIT_ALL); when(client.isKeyPressed(KeyCode.KC_SHIFT)).thenReturn(true); entries = new MenuEntry[]{ menu("Cancel", "", MenuAction.CANCEL), menu("Wield", "Rune arrow", MenuAction.CC_OP_LOW_PRIORITY, 9), menu("Deposit-All", "Rune arrow", MenuAction.CC_OP_LOW_PRIORITY, 8), menu("Deposit-1", "Rune arrow", MenuAction.CC_OP, 2), }; menuEntrySwapperPlugin.onPostMenuSort(new PostMenuSort()); ArgumentCaptor<MenuEntry[]> argumentCaptor = ArgumentCaptor.forClass(MenuEntry[].class); verify(client).setMenuEntries(argumentCaptor.capture()); assertArrayEquals(new MenuEntry[]{ menu("Cancel", "", MenuAction.CANCEL), menu("Wield", "Rune arrow", MenuAction.CC_OP_LOW_PRIORITY, 9), menu("Deposit-1", "Rune arrow", MenuAction.CC_OP, 2), menu("Deposit-All", "Rune arrow", MenuAction.CC_OP, 8), }, argumentCaptor.getValue()); }
@Override public ProductSpuDO getSpu(Long id) { return productSpuMapper.selectById(id); }
@Test void getSpu() { // 准备参数 ProductSpuDO createReqVO = randomPojo(ProductSpuDO.class,o->{ o.setCategoryId(generateId()); o.setBrandId(generateId()); o.setDeliveryTemplateId(generateId()); o.setSort(RandomUtil.randomInt(1,100)); // 限制排序范围 o.setGiveIntegral(generaInt()); // 限制范围为正整数 o.setVirtualSalesCount(generaInt()); // 限制范围为正整数 o.setPrice(generaInt()); // 限制范围为正整数 o.setMarketPrice(generaInt()); // 限制范围为正整数 o.setCostPrice(generaInt()); // 限制范围为正整数 o.setStock(generaInt()); // 限制范围为正整数 o.setGiveIntegral(generaInt()); // 限制范围为正整数 o.setSalesCount(generaInt()); // 限制范围为正整数 o.setBrowseCount(generaInt()); // 限制范围为正整数 }); productSpuMapper.insert(createReqVO); ProductSpuDO spu = productSpuService.getSpu(createReqVO.getId()); assertPojoEquals(createReqVO, spu); }
@Override public boolean isSatisfied(int index, TradingRecord tradingRecord) { boolean satisfied = false; // No trading history or no position opened, no gain if (tradingRecord != null) { Position currentPosition = tradingRecord.getCurrentPosition(); if (currentPosition.isOpened()) { Num entryPrice = currentPosition.getEntry().getNetPrice(); Num currentPrice = referencePrice.getValue(index); Num gainThreshold = stopGainThreshold.getValue(index); if (currentPosition.getEntry().isBuy()) { satisfied = currentPrice.isGreaterThanOrEqual(entryPrice.plus(gainThreshold)); } else { satisfied = currentPrice.isLessThanOrEqual(entryPrice.minus(gainThreshold)); } } } return satisfied; }
@Test public void testStopGainNotTriggered() { TradingRecord tradingRecord = new BaseTradingRecord(); tradingRecord.enter(0, series.getBar(0).getClosePrice(), series.numOf(1)); AverageTrueRangeStopGainRule rule = new AverageTrueRangeStopGainRule(series, 3, 2.0); assertFalse(rule.isSatisfied(1, tradingRecord)); assertFalse(rule.isSatisfied(2, tradingRecord)); assertFalse(rule.isSatisfied(3, tradingRecord)); }
@Override public int run(String[] args) throws Exception { YarnConfiguration yarnConf = getConf() == null ? new YarnConfiguration() : new YarnConfiguration( getConf()); boolean isHAEnabled = yarnConf.getBoolean(YarnConfiguration.RM_HA_ENABLED, YarnConfiguration.DEFAULT_RM_HA_ENABLED); if (args.length < 1) { printUsage("", isHAEnabled); return -1; } int exitCode = -1; int i = 0; String cmd = args[i++]; exitCode = 0; if ("-help".equals(cmd)) { if (i < args.length) { printUsage(args[i], isHAEnabled); } else { printHelp("", isHAEnabled); } return exitCode; } if (USAGE.containsKey(cmd)) { if (isHAEnabled) { return super.run(args); } System.out.println("Cannot run " + cmd + " when ResourceManager HA is not enabled"); return -1; } // // verify that we have enough command line parameters // String subClusterId = StringUtils.EMPTY; if ("-refreshAdminAcls".equals(cmd) || "-refreshQueues".equals(cmd) || "-refreshNodesResources".equals(cmd) || "-refreshServiceAcl".equals(cmd) || "-refreshUserToGroupsMappings".equals(cmd) || "-refreshSuperUserGroupsConfiguration".equals(cmd) || "-refreshClusterMaxPriority".equals(cmd)) { subClusterId = parseSubClusterId(args, isHAEnabled); // If we enable Federation mode, the number of args may be either one or three. // Example: -refreshQueues or -refreshQueues -subClusterId SC-1 if (isYarnFederationEnabled(getConf()) && args.length != 1 && args.length != 3) { printUsage(cmd, isHAEnabled); return exitCode; } else if (!isYarnFederationEnabled(getConf()) && args.length != 1) { // If Federation mode is not enabled, then the number of args can only be one. // Example: -refreshQueues printUsage(cmd, isHAEnabled); return exitCode; } } // If it is federation mode, we will print federation mode information if (isYarnFederationEnabled(getConf())) { System.out.println("Using YARN Federation mode."); } try { if ("-refreshQueues".equals(cmd)) { exitCode = refreshQueues(subClusterId); } else if ("-refreshNodes".equals(cmd)) { exitCode = handleRefreshNodes(args, cmd, isHAEnabled); } else if ("-refreshNodesResources".equals(cmd)) { exitCode = refreshNodesResources(subClusterId); } else if ("-refreshUserToGroupsMappings".equals(cmd)) { exitCode = refreshUserToGroupsMappings(subClusterId); } else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) { exitCode = refreshSuperUserGroupsConfiguration(subClusterId); } else if ("-refreshAdminAcls".equals(cmd)) { exitCode = refreshAdminAcls(subClusterId); } else if ("-refreshServiceAcl".equals(cmd)) { exitCode = refreshServiceAcls(subClusterId); } else if ("-refreshClusterMaxPriority".equals(cmd)) { exitCode = refreshClusterMaxPriority(subClusterId); } else if ("-getGroups".equals(cmd)) { String[] usernames = Arrays.copyOfRange(args, i, args.length); exitCode = getGroups(usernames); } else if ("-updateNodeResource".equals(cmd)) { exitCode = handleUpdateNodeResource(args, cmd, isHAEnabled, subClusterId); } else if ("-addToClusterNodeLabels".equals(cmd)) { exitCode = handleAddToClusterNodeLabels(args, cmd, isHAEnabled); } else if ("-removeFromClusterNodeLabels".equals(cmd)) { exitCode = handleRemoveFromClusterNodeLabels(args, cmd, isHAEnabled); } else if ("-replaceLabelsOnNode".equals(cmd)) { exitCode = handleReplaceLabelsOnNodes(args, cmd, isHAEnabled); } else { exitCode = -1; System.err.println(cmd.substring(1) + ": Unknown command"); printUsage("", isHAEnabled); } } catch (IllegalArgumentException arge) { exitCode = -1; System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage()); printUsage(cmd, isHAEnabled); } catch (RemoteException e) { // // This is a error returned by hadoop server. Print // out the first line of the error message, ignore the stack trace. exitCode = -1; try { String[] content; content = e.getLocalizedMessage().split("\n"); System.err.println(cmd.substring(1) + ": " + content[0]); } catch (Exception ex) { System.err.println(cmd.substring(1) + ": " + ex.getLocalizedMessage()); } } catch (Exception e) { exitCode = -1; System.err.println(cmd.substring(1) + ": " + e.getLocalizedMessage()); } if (null != localNodeLabelsManager) { localNodeLabelsManager.stop(); } return exitCode; }
@Test public void testReplaceLabelsOnNode() throws Exception { // Successfully replace labels dummyNodeLabelsManager .addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y", "Y")); String[] args = { "-replaceLabelsOnNode", "node1:8000,x node2:8000=y node3,x node4=Y", "-directlyAccessNodeLabelStore" }; assertEquals(0, rmAdminCLI.run(args)); assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey( NodeId.newInstance("node1", 8000))); assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey( NodeId.newInstance("node2", 8000))); assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey( NodeId.newInstance("node3", 0))); assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey( NodeId.newInstance("node4", 0))); // no labels, should fail args = new String[] { "-replaceLabelsOnNode" }; assertTrue(0 != rmAdminCLI.run(args)); // no labels, should fail args = new String[] { "-replaceLabelsOnNode", "-failOnUnknownNodes" }; assertTrue(0 != rmAdminCLI.run(args)); // no labels, should fail args = new String[] { "-replaceLabelsOnNode", "-directlyAccessNodeLabelStore" }; assertTrue(0 != rmAdminCLI.run(args)); // no labels, should fail args = new String[] { "-replaceLabelsOnNode", " " }; assertTrue(0 != rmAdminCLI.run(args)); args = new String[] { "-replaceLabelsOnNode", ", " }; assertTrue(0 != rmAdminCLI.run(args)); }
@Override protected void doRefresh(final List<SelectorData> dataList) { pluginDataSubscriber.refreshSelectorDataSelf(dataList); dataList.forEach(pluginDataSubscriber::onSelectorSubscribe); }
@Test public void testDoRefresh() { List<SelectorData> selectorDataList = createFakeSelectorDataObjects(3); selectorDataHandler.doRefresh(selectorDataList); verify(subscriber).refreshSelectorDataSelf(selectorDataList); selectorDataList.forEach(verify(subscriber)::onSelectorSubscribe); }
@Override public Collection<FileSourceSplit> enumerateSplits(Path[] paths, int minDesiredSplits) throws IOException { final ArrayList<FileSourceSplit> splits = new ArrayList<>(); for (Path path : paths) { final FileSystem fs = path.getFileSystem(); final FileStatus status = fs.getFileStatus(path); addSplitsForPath(status, fs, splits); } return splits; }
@Test void testFileWithMultipleBlocks() throws Exception { final Path testPath = new Path("testfs:///dir/file"); testFs = TestingFileSystem.createForFileStatus( "testfs", TestingFileSystem.TestFileStatus.forFileWithBlocks( testPath, 1000L, new TestingFileSystem.TestBlockLocation(0L, 100L, "host1", "host2"), new TestingFileSystem.TestBlockLocation( 100L, 520L, "host2", "host3"), new TestingFileSystem.TestBlockLocation( 620L, 380L, "host3", "host4"))); testFs.register(); final NonSplittingRecursiveEnumerator enumerator = createEnumerator(); final Collection<FileSourceSplit> splits = enumerator.enumerateSplits(new Path[] {new Path("testfs:///dir")}, 0); assertSplitsEqual( new FileSourceSplit( "ignoredId", testPath, 0L, 1000L, 0, 1000L, "host1", "host2", "host3", "host4"), splits.iterator().next()); }
@Override public HashSlotCursor12byteKey cursor() { return new CursorIntKey2(); }
@Test public void testCursor_valueAddress() { final SlotAssignmentResult slot = insert(randomKey(), randomKey()); HashSlotCursor12byteKey cursor = hsa.cursor(); cursor.advance(); assertEquals(slot.address(), cursor.valueAddress()); }
@Override public void execute(GraphModel graphModel) { Graph graph = graphModel.getUndirectedGraphVisible(); execute(graph); }
@Test public void testColumnReplace() { GraphModel graphModel = GraphGenerator.generateNullUndirectedGraph(1); graphModel.getNodeTable().addColumn(Modularity.MODULARITY_CLASS, String.class); Modularity h = new Modularity(); h.execute(graphModel); }
@Override public ExportResult<CalendarContainerResource> export( UUID jobId, TokensAndUrlAuthData authData, Optional<ExportInformation> exportInformation) { if (!exportInformation.isPresent()) { return exportCalendars(authData, Optional.empty()); } else { StringPaginationToken paginationToken = (StringPaginationToken) exportInformation.get().getPaginationData(); if (paginationToken != null && paginationToken.getToken().startsWith(CALENDAR_TOKEN_PREFIX)) { // Next thing to export is more calendars return exportCalendars(authData, Optional.of(paginationToken)); } else { // Next thing to export is events IdOnlyContainerResource idOnlyContainerResource = (IdOnlyContainerResource) exportInformation.get().getContainerResource(); Optional<PaginationData> pageData = Optional.ofNullable(paginationToken); return getCalendarEvents(authData, idOnlyContainerResource.getId(), pageData); } } }
@Test public void exportCalendarFirstSet() throws IOException { setUpSingleCalendarResponse(); // Looking at first page, with at least one page after it calendarListResponse.setNextPageToken(NEXT_TOKEN); // Run test ExportResult<CalendarContainerResource> result = googleCalendarExporter.export(JOB_ID, null, Optional.empty()); // Check results // Verify correct methods were called verify(calendarClient).calendarList(); verify(calendarCalendarList).list(); verify(calendarListRequest).execute(); // Check pagination token ContinuationData continuationData = (ContinuationData) result.getContinuationData(); StringPaginationToken paginationToken = (StringPaginationToken) continuationData.getPaginationData(); assertThat(paginationToken.getToken()).isEqualTo(CALENDAR_TOKEN_PREFIX + NEXT_TOKEN); // Check calendars Collection<CalendarModel> actualCalendars = result.getExportedData().getCalendars(); assertThat(actualCalendars.stream().map(CalendarModel::getId).collect(Collectors.toList())) .containsExactly(CALENDAR_ID); // Check events (should be empty, even though there is an event in the calendar) Collection<CalendarEventModel> actualEvents = result.getExportedData().getEvents(); assertThat(actualEvents).isEmpty(); // Should be one container in the resource list List<ContainerResource> actualResources = continuationData.getContainerResources(); assertThat( actualResources .stream() .map(a -> ((IdOnlyContainerResource) a).getId()) .collect(Collectors.toList())) .containsExactly(CALENDAR_ID); }
public synchronized <K, V> KStream<K, V> stream(final String topic) { return stream(Collections.singleton(topic)); }
@Test public void shouldAllowSubscribingToSamePattern() { builder.stream(Pattern.compile("some-regex")); builder.stream(Pattern.compile("some-regex")); assertBuildDoesNotThrow(builder); }
@Override public ValidationResult validate(RuleBuilderStep step) { if (!actions.containsKey(step.function())) { return new ValidationResult(true, "Function " + step.function() + " not available as action for rule builder."); } return new ValidationResult(false, ""); }
@Test void validate() { RuleBuilderStep stepWithValidAction = RuleBuilderStep.builder().function(TEST_ACTION).build(); RuleBuilderStep stepWithInvalidAction = RuleBuilderStep.builder().function("invalidAction").build(); assertThat(classUnderTest.validate(stepWithValidAction).failed()).isFalse(); assertThat(classUnderTest.validate(stepWithInvalidAction).failed()).isTrue(); }
protected static String convertSoapUITemplate(String responseTemplate) { if (responseTemplate.contains("${")) { return SOAPUI_TEMPLATE_PARAMETER_REPLACE_PATTERN.matcher(responseTemplate).replaceAll("{{ $1 }}"); } return responseTemplate; }
@Test void testConvertSoapUITemplate() { String soapUITemplate = "<something>${myParam}</something>"; String microcksTemplate = SoapController.convertSoapUITemplate(soapUITemplate); assertEquals("<something>{{ myParam }}</something>", microcksTemplate); soapUITemplate = "<bean><something>${ myParam}</something><else>${myOtherParam }</else></bean>"; microcksTemplate = SoapController.convertSoapUITemplate(soapUITemplate); assertEquals("<bean><something>{{ myParam }}</something><else>{{ myOtherParam }}</else></bean>", microcksTemplate); soapUITemplate = "<bean>\n" + " <something>${myParam}</something>\n" + " <else>${myOtherParam}</else>\n" + "</bean>"; String expectedResult = "<bean>\n" + " <something>{{ myParam }}</something>\n" + " <else>{{ myOtherParam }}</else>\n" + "</bean>"; microcksTemplate = SoapController.convertSoapUITemplate(soapUITemplate); assertEquals(expectedResult, microcksTemplate); }
@Nonnull public static <K, V> BatchSource<Entry<K, V>> map(@Nonnull String mapName) { return batchFromProcessor("mapSource(" + mapName + ')', readMapP(mapName)); }
@Test public void mapWithFilterAndProjection_byName() { // Given List<Integer> input = sequence(itemCount); putToBatchSrcMap(input); // When BatchSource<Object> source = Sources.map(srcName, truePredicate(), singleAttribute("value")); // Then p.readFrom(source).writeTo(sink); execute(); assertEquals(toBag(input), sinkToBag()); }
public static int getTypeOfFile(String path, boolean isDirectory) { String mimeType = MimeTypes.getMimeType(path, isDirectory); if (mimeType == null) return NOT_KNOWN; Integer type = sMimeIconIds.get(mimeType); if (type != null) return type; else { if (checkType(mimeType, "text")) return TEXT; else if (checkType(mimeType, "image")) return IMAGE; else if (checkType(mimeType, "video")) return VIDEO; else if (checkType(mimeType, "audio")) return AUDIO; else if (checkType(mimeType, "crypt")) return ENCRYPTED; else return NOT_KNOWN; } }
@Test public void testReturnArchiveTypes() { assertEquals(Icons.COMPRESSED, Icons.getTypeOfFile("archive.zip", false)); assertEquals(Icons.COMPRESSED, Icons.getTypeOfFile("archive.rar", false)); assertEquals(Icons.COMPRESSED, Icons.getTypeOfFile("archive.tar", false)); assertEquals(Icons.COMPRESSED, Icons.getTypeOfFile("archive.tar.gz", false)); assertEquals(Icons.COMPRESSED, Icons.getTypeOfFile("archive.tar.lzma", false)); assertEquals(Icons.COMPRESSED, Icons.getTypeOfFile("archive.tar.xz", false)); assertEquals(Icons.COMPRESSED, Icons.getTypeOfFile("archive.tar.bz2", false)); assertEquals(Icons.COMPRESSED, Icons.getTypeOfFile("archive.txt.bz2", false)); assertEquals(Icons.COMPRESSED, Icons.getTypeOfFile("archive.txt.gz", false)); assertEquals(Icons.COMPRESSED, Icons.getTypeOfFile("archive.txt.gz", false)); }
@Override public void executeUpdate(final UnregisterStorageUnitStatement sqlStatement, final ContextManager contextManager) { if (!sqlStatement.isIfExists()) { checkExisted(sqlStatement.getStorageUnitNames()); } checkInUsed(sqlStatement); try { contextManager.getPersistServiceFacade().getMetaDataManagerPersistService().unregisterStorageUnits(database.getName(), sqlStatement.getStorageUnitNames()); } catch (final SQLException | ShardingSphereServerException ex) { throw new StorageUnitsOperateException("unregister", sqlStatement.getStorageUnitNames(), ex); } }
@Test void assertExecuteUpdateWithStorageUnitNotExisted() { when(database.getResourceMetaData().getStorageUnits()).thenReturn(Collections.emptyMap()); assertThrows(MissingRequiredStorageUnitsException.class, () -> executor.executeUpdate(new UnregisterStorageUnitStatement(Collections.singleton("foo_ds"), false, false), mock(ContextManager.class))); }
@Override public MapperResult findAllConfigInfoFetchRows(MapperContext context) { String sql = "SELECT t.id,data_id,group_id,tenant_id,app_name,content,md5 " + " FROM ( SELECT id FROM config_info WHERE tenant_id LIKE ? ORDER BY id LIMIT ?,? )" + " g, config_info t WHERE g.id = t.id "; return new MapperResult(sql, CollectionUtils.list(context.getWhereParameter(FieldConstant.TENANT_ID), context.getStartRow(), context.getPageSize())); }
@Test void testFindAllConfigInfoFetchRows() { MapperResult mapperResult = configInfoMapperByMySql.findAllConfigInfoFetchRows(context); assertEquals(mapperResult.getSql(), "SELECT t.id,data_id,group_id,tenant_id,app_name,content,md5 FROM ( SELECT id FROM config_info " + "WHERE tenant_id LIKE ? ORDER BY id LIMIT ?,? ) g, config_info t WHERE g.id = t.id "); assertArrayEquals(new Object[] {tenantId, startRow, pageSize}, mapperResult.getParamList().toArray()); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final List<Header> headers = new ArrayList<Header>(this.headers()); if(status.isAppend()) { final HttpRange range = HttpRange.withStatus(status); final String header; if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) { header = String.format("bytes=%d-", range.getStart()); } else { header = String.format("bytes=%d-%d", range.getStart(), range.getEnd()); } if(log.isDebugEnabled()) { log.debug(String.format("Add range header %s for file %s", header, file)); } headers.add(new BasicHeader(HttpHeaders.RANGE, header)); // Disable compression headers.add(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, "identity")); } try { final HttpRequestBase request = this.toRequest(file, status); for(Header header : headers) { request.addHeader(header); } final HttpResponse response = session.getClient().execute(request); final VoidResponseHandler handler = new VoidResponseHandler(); try { handler.handleResponse(response); // Will abort the read when closed before EOF. final ContentLengthStatusInputStream stream = new ContentLengthStatusInputStream(new HttpMethodReleaseInputStream(response, status), response.getEntity().getContentLength(), response.getStatusLine().getStatusCode()); if(status.isAppend()) { if(stream.getCode() == HttpStatus.SC_OK) { if(TransferStatus.UNKNOWN_LENGTH != status.getLength()) { if(stream.getLength() != status.getLength()) { log.warn(String.format("Range header not supported. Skipping %d bytes in file %s.", status.getOffset(), file)); stream.skip(status.getOffset()); } } } } return stream; } catch(IOException ex) { request.abort(); throw ex; } } catch(SardineException e) { throw new DAVExceptionMappingService().map("Download {0} failed", e, file); } catch(IOException e) { throw new HttpExceptionMappingService().map("Download {0} failed", e, file); } }
@Test public void testReadInterrupt() throws Exception { final Path test = new DAVTouchFeature(session).touch(new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); // Unknown length in status final TransferStatus status = new TransferStatus(); // Read a single byte { final InputStream in = new DAVReadFeature(session).read(test, status, new DisabledConnectionCallback()); assertNotNull(in.read()); in.close(); } { final InputStream in = new DAVReadFeature(session).read(test, status, new DisabledConnectionCallback()); assertNotNull(in); in.close(); } new DAVDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public String nextNonCliCommand() { String line; do { line = terminal.readLine(); } while (maybeHandleCliSpecificCommands(line)); return line; }
@Test public void shouldSwallowCliCommandLines() { // Given: when(lineSupplier.get()) .thenReturn(CLI_CMD_NAME) .thenReturn("not a CLI command;"); // When: final String result = console.nextNonCliCommand(); // Then: assertThat(result, is("not a CLI command;")); }
@Override public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { try { String partitionColumn = job.get(Constants.JDBC_PARTITION_COLUMN); int numPartitions = job.getInt(Constants.JDBC_NUM_PARTITIONS, -1); String lowerBound = job.get(Constants.JDBC_LOW_BOUND); String upperBound = job.get(Constants.JDBC_UPPER_BOUND); InputSplit[] splits; if (!job.getBoolean(Constants.JDBC_SPLIT_QUERY, true) || numPartitions <= 1) { // We will not split this query if: // 1. hive.sql.query.split is set to false (either manually or automatically by calcite // 2. numPartitions == 1 splits = new InputSplit[1]; splits[0] = new JdbcInputSplit(FileInputFormat.getInputPaths(job)[0]); LOGGER.info("Creating 1 input split " + splits[0]); return splits; } dbAccessor = DatabaseAccessorFactory.getAccessor(job); Path[] tablePaths = FileInputFormat.getInputPaths(job); // We will split this query into n splits LOGGER.debug("Creating {} input splits", numPartitions); if (partitionColumn != null) { List<String> columnNames = dbAccessor.getColumnNames(job); if (!columnNames.contains(partitionColumn)) { throw new IOException("Cannot find partitionColumn:" + partitionColumn + " in " + columnNames); } List<TypeInfo> hiveColumnTypesList = dbAccessor.getColumnTypes(job); TypeInfo typeInfo = hiveColumnTypesList.get(columnNames.indexOf(partitionColumn)); if (!(typeInfo instanceof PrimitiveTypeInfo)) { throw new IOException(partitionColumn + " is a complex type, only primitive type can be a partition column"); } if (lowerBound == null || upperBound == null) { Pair<String, String> boundary = dbAccessor.getBounds(job, partitionColumn, lowerBound == null, upperBound == null); if (lowerBound == null) { lowerBound = boundary.getLeft(); } if (upperBound == null) { upperBound = boundary.getRight(); } } if (lowerBound == null) { throw new IOException("lowerBound of " + partitionColumn + " cannot be null"); } if (upperBound == null) { throw new IOException("upperBound of " + partitionColumn + " cannot be null"); } IntervalSplitter intervalSplitter = IntervalSplitterFactory.newIntervalSpitter(typeInfo); List<MutablePair<String, String>> intervals = intervalSplitter.getIntervals(lowerBound, upperBound, numPartitions, typeInfo); if (intervals.size()<=1) { LOGGER.debug("Creating 1 input splits"); splits = new InputSplit[1]; splits[0] = new JdbcInputSplit(FileInputFormat.getInputPaths(job)[0]); return splits; } intervals.get(0).setLeft(null); intervals.get(intervals.size()-1).setRight(null); splits = new InputSplit[intervals.size()]; for (int i = 0; i < intervals.size(); i++) { splits[i] = new JdbcInputSplit(partitionColumn, intervals.get(i).getLeft(), intervals.get(i).getRight(), tablePaths[0]); } } else { int numRecords = dbAccessor.getTotalNumberOfRecords(job); if (numRecords < numPartitions) { numPartitions = numRecords; } int numRecordsPerSplit = numRecords / numPartitions; int numSplitsWithExtraRecords = numRecords % numPartitions; LOGGER.debug("Num records = {}", numRecords); splits = new InputSplit[numPartitions]; int offset = 0; for (int i = 0; i < numPartitions; i++) { int numRecordsInThisSplit = numRecordsPerSplit; if (i < numSplitsWithExtraRecords) { numRecordsInThisSplit++; } splits[i] = new JdbcInputSplit(numRecordsInThisSplit, offset, tablePaths[0]); offset += numRecordsInThisSplit; } } dbAccessor = null; LOGGER.info("Num input splits created {}", splits.length); for (InputSplit split : splits) { LOGGER.info("split:" + split.toString()); } return splits; } catch (Exception e) { LOGGER.error("Error while splitting input data.", e); throw new IOException(e); } }
@Test public void testLimitSplit_noSpillOver() throws HiveJdbcDatabaseAccessException, IOException { JdbcInputFormat f = new JdbcInputFormat(); when(mockDatabaseAccessor.getTotalNumberOfRecords(any(Configuration.class))).thenReturn(15); JobConf conf = new JobConf(); conf.set("mapred.input.dir", "/temp"); conf.set("hive.sql.numPartitions", "3"); InputSplit[] splits = f.getSplits(conf, -1); assertThat(splits, is(notNullValue())); assertThat(splits.length, is(3)); assertThat(splits[0].getLength(), is(5L)); }
@Udf(description = "Splits a string into an array of substrings based on a regexp.") public List<String> regexpSplit( @UdfParameter( description = "The string to be split. If NULL, then function returns NULL.") final String string, @UdfParameter( description = "The regular expression to split the string by. " + "If NULL, then function returns NULL.") final String regexp) { if (string == null || regexp == null) { return null; } // Use Guava version to be compatible with other splitting functions. final Pattern p = getPattern(regexp); if (regexp.isEmpty() || p.matcher("").matches()) { return Arrays.asList(p.split(string)); } else { return Splitter.on(p).splitToList(string); } }
@Test public void shouldSplitAndAddEmptySpacesIfRegexIsFoundInContiguousPositions() { assertThat(udf.regexpSplit("A--A", "-"), contains("A", "", "A")); assertThat(udf.regexpSplit("z--A--z", "-"), contains("z", "", "A", "", "z")); assertThat(udf.regexpSplit("--A--A", "-"), contains("", "", "A", "", "A")); assertThat(udf.regexpSplit("A--A--", "-"), contains("A", "", "A", "", "")); assertThat(udf.regexpSplit("aababa", "ab"), contains("a", "", "a")); assertThat(udf.regexpSplit("aababa", "(ab)+"), contains("a", "a")); assertThat(udf.regexpSplit("aabcda", "(ab|cd)"), contains("a", "", "a")); }
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } Endpoint that = (Endpoint) obj; return this.port == that.port && Objects.equals(this.ip, that.ip); }
@Test public void testEquals() { new EqualsTester() .addEqualityGroup(endpoint1, sameAsEndpoint1) .addEqualityGroup(endpoint2) .addEqualityGroup(endpoint3) .testEquals(); }
public static boolean isOnList(@Nonnull final Set<String> list, @Nonnull final String ipAddress) { Ipv4 remoteIpv4; try { remoteIpv4 = Ipv4.of(ipAddress); } catch (IllegalArgumentException e) { Log.trace("Address '{}' is not an IPv4 address.", ipAddress); remoteIpv4 = null; } Ipv6 remoteIpv6; try { remoteIpv6 = Ipv6.of(ipAddress); } catch (IllegalArgumentException e) { Log.trace("Address '{}' is not an IPv6 address.", ipAddress); remoteIpv6 = null; } if (remoteIpv4 == null && remoteIpv6 == null) { Log.warn("Unable to parse '{}' as an IPv4 or IPv6 address!", ipAddress); } for (final String item : list) { // Check if the remote address is an exact match on the list. if (item.equals(ipAddress)) { return true; } // Check if the remote address is a match for an address range on the list. if (remoteIpv4 != null) { Ipv4Range range; try { range = Ipv4Range.parse(item); } catch (IllegalArgumentException e) { Log.trace("List entry '{}' is not an IPv4 range.", item); range = null; } if (range != null && range.contains(remoteIpv4)) { return true; } } if (remoteIpv6 != null) { Ipv6Range range; try { range = Ipv6Range.parse(item); } catch (IllegalArgumentException e) { Log.trace("List entry '{}' is not an IPv6 range.", item); range = null; } if (range != null && range.contains(remoteIpv6)) { return true; } } } return false; }
@Test public void ipNotOnEmptyList() throws Exception { // Setup test fixture. final String input = "203.0.113.251"; final Set<String> list = new HashSet<>(); // Execute system under test. final boolean result = AuthCheckFilter.isOnList(list, input); // Verify result. assertFalse(result); }
public static Type convertType(TypeInfo typeInfo) { switch (typeInfo.getOdpsType()) { case BIGINT: return Type.BIGINT; case INT: return Type.INT; case SMALLINT: return Type.SMALLINT; case TINYINT: return Type.TINYINT; case FLOAT: return Type.FLOAT; case DECIMAL: DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo; return ScalarType.createUnifiedDecimalType(decimalTypeInfo.getPrecision(), decimalTypeInfo.getScale()); case DOUBLE: return Type.DOUBLE; case CHAR: CharTypeInfo charTypeInfo = (CharTypeInfo) typeInfo; return ScalarType.createCharType(charTypeInfo.getLength()); case VARCHAR: VarcharTypeInfo varcharTypeInfo = (VarcharTypeInfo) typeInfo; return ScalarType.createVarcharType(varcharTypeInfo.getLength()); case STRING: case JSON: return ScalarType.createDefaultCatalogString(); case BINARY: return Type.VARBINARY; case BOOLEAN: return Type.BOOLEAN; case DATE: return Type.DATE; case TIMESTAMP: case DATETIME: return Type.DATETIME; case MAP: MapTypeInfo mapTypeInfo = (MapTypeInfo) typeInfo; return new MapType(convertType(mapTypeInfo.getKeyTypeInfo()), convertType(mapTypeInfo.getValueTypeInfo())); case ARRAY: ArrayTypeInfo arrayTypeInfo = (ArrayTypeInfo) typeInfo; return new ArrayType(convertType(arrayTypeInfo.getElementTypeInfo())); case STRUCT: StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo; List<Type> fieldTypeList = structTypeInfo.getFieldTypeInfos().stream().map(EntityConvertUtils::convertType) .collect(Collectors.toList()); return new StructType(fieldTypeList); default: return Type.VARCHAR; } }
@Test public void testConvertTypeCaseDouble() { TypeInfo typeInfo = TypeInfoFactory.DOUBLE; Type result = EntityConvertUtils.convertType(typeInfo); assertEquals(Type.DOUBLE, result); }
@Override public Region createRegion(RegionId regionId, String name, Region.Type type, Annotations annots, List<Set<NodeId>> masterNodeIds) { return regionsRepo.compute(regionId, (id, region) -> { checkArgument(region == null, DUPLICATE_REGION); return new DefaultRegion(regionId, name, type, annots, masterNodeIds); }).value(); }
@Test(expected = IllegalArgumentException.class) public void duplicateCreate() { store.createRegion(RID1, "R1", METRO, NO_ANNOTS, MASTERS); store.createRegion(RID1, "R2", CAMPUS, NO_ANNOTS, MASTERS); }
@VisibleForTesting static Map<String, String> parseHeaders(String headerString) { if (isNullOrEmpty(headerString)) { return Collections.emptyMap(); } final Map<String, String> headers = Maps.newHashMap(); for (String headerPart : headerString.trim().split(",")) { final String[] parts = headerPart.trim().split(":"); if (parts.length == 2) { headers.put(parts[0].trim(), parts[1].trim()); } } return headers; }
@Test public void testParseHeaders() throws Exception { assertEquals(0, parseHeaders("").size()); assertEquals(0, parseHeaders(" ").size()); assertEquals(0, parseHeaders(" . ").size()); assertEquals(0, parseHeaders("foo").size()); assertEquals(1, parseHeaders("X-Foo: Bar").size()); Map<String, String> expectedSingle = ImmutableMap.of("Accept", "application/json"); Map<String, String> expectedMulti = ImmutableMap.of( "Accept", "application/json", "X-Foo", "bar"); assertEquals(expectedMulti, parseHeaders("Accept: application/json, X-Foo: bar")); assertEquals(expectedSingle, parseHeaders("Accept: application/json")); assertEquals(expectedMulti, parseHeaders(" Accept: application/json,X-Foo:bar")); assertEquals(expectedMulti, parseHeaders("Accept:application/json, X-Foo: bar ")); assertEquals(expectedMulti, parseHeaders("Accept: application/json, X-Foo: bar")); assertEquals(expectedMulti, parseHeaders("Accept :application/json, X-Foo: bar ")); assertEquals(expectedSingle, parseHeaders(" Accept: application/json")); assertEquals(expectedSingle, parseHeaders("Accept:application/json")); assertEquals(expectedSingle, parseHeaders(" Accept: application/json ")); assertEquals(expectedSingle, parseHeaders(" Accept :application/json ")); }
@Override public HttpResponseStatus getResponseStatusCode() { return HttpResponseStatus.OK; }
@Test void testResponseStatus() { assertThat(metricsHandlerHeaders.getResponseStatusCode()).isEqualTo(HttpResponseStatus.OK); }
@Override protected Optional<ErrorResponse> filter(DiscFilterRequest req) { var now = clock.instant(); var bearerToken = requestBearerToken(req).orElse(null); if (bearerToken == null) { log.fine("Missing bearer token"); return Optional.of(new ErrorResponse(Response.Status.UNAUTHORIZED, "Unauthorized")); } var permission = Permission.getRequiredPermission(req).orElse(null); if (permission == null) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden")); var requestTokenHash = requestTokenHash(bearerToken); var clientIds = new TreeSet<String>(); var permissions = EnumSet.noneOf(Permission.class); var matchedTokens = new HashSet<TokenVersion>(); for (Client c : allowedClients) { if (!c.permissions().contains(permission)) continue; var matchedToken = c.tokens().get(requestTokenHash); if (matchedToken == null) continue; var expiration = matchedToken.expiration().orElse(null); if (expiration != null && now.isAfter(expiration)) continue; matchedTokens.add(matchedToken); clientIds.add(c.id()); permissions.addAll(c.permissions()); } if (clientIds.isEmpty()) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden")); if (matchedTokens.size() > 1) { log.warning("Multiple tokens matched for request %s" .formatted(matchedTokens.stream().map(TokenVersion::id).toList())); return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden")); } var matchedToken = matchedTokens.stream().findAny().get(); addAccessLogEntry(req, "token.id", matchedToken.id()); addAccessLogEntry(req, "token.hash", matchedToken.fingerprint().toDelimitedHexString()); addAccessLogEntry(req, "token.exp", matchedToken.expiration().map(Instant::toString).orElse("<none>")); ClientPrincipal.attachToRequest(req, clientIds, permissions); return Optional.empty(); }
@Test void accepts_valid_token() { var entry = new AccessLogEntry(); var req = FilterTestUtils.newRequestBuilder() .withMethod(Method.GET) .withAccessLogEntry(entry) .withHeader("Authorization", "Bearer " + READ_TOKEN.secretTokenString()) .build(); var responseHandler = new MockResponseHandler(); newFilterWithClientsConfig().filter(req, responseHandler); assertNull(responseHandler.getResponse()); assertEquals(new ClientPrincipal(Set.of(TOKEN_SEARCH_CLIENT), Set.of(READ)), req.getUserPrincipal()); assertEquals(READ_TOKEN_ID, entry.getKeyValues().get("token.id").get(0)); assertEquals(READ_TOKEN.fingerprint().toDelimitedHexString(), entry.getKeyValues().get("token.hash").get(0)); assertEquals(TOKEN_EXPIRATION.toString(), entry.getKeyValues().get("token.exp").get(0)); }
public QueueCapacityVector parse(String capacityString, QueuePath queuePath) { if (queuePath.isRoot()) { return QueueCapacityVector.of(100f, ResourceUnitCapacityType.PERCENTAGE); } if (capacityString == null) { return new QueueCapacityVector(); } // Trim all spaces from capacity string capacityString = capacityString.replaceAll(" ", ""); for (Parser parser : parsers) { Matcher matcher = parser.regex.matcher(capacityString); if (matcher.find()) { return parser.parser.apply(matcher); } } return new QueueCapacityVector(); }
@Test public void testAbsoluteCapacityVectorConfig() { CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration(); conf.set(getQueuePrefix(QUEUE_PATH) + CapacitySchedulerConfiguration.CAPACITY, ABSOLUTE_RESOURCE); conf.set(YarnConfiguration.RESOURCE_TYPES, RESOURCE_TYPES); ResourceUtils.resetResourceTypes(conf); QueueCapacityVector absoluteCapacityVector = capacityConfigParser.parse(ABSOLUTE_RESOURCE, QUEUE_PATH); Assert.assertEquals(ResourceUnitCapacityType.ABSOLUTE, absoluteCapacityVector.getResource(MEMORY_URI).getVectorResourceType()); Assert.assertEquals(12 * GB, absoluteCapacityVector.getResource(MEMORY_URI) .getResourceValue(), EPSILON); Assert.assertEquals(ResourceUnitCapacityType.ABSOLUTE, absoluteCapacityVector.getResource(VCORES_URI).getVectorResourceType()); Assert.assertEquals(VCORE_ABSOLUTE, absoluteCapacityVector.getResource(VCORES_URI) .getResourceValue(), EPSILON); Assert.assertEquals(ResourceUnitCapacityType.ABSOLUTE, absoluteCapacityVector.getResource(GPU_URI).getVectorResourceType()); Assert.assertEquals(GPU_ABSOLUTE, absoluteCapacityVector.getResource(GPU_URI) .getResourceValue(), EPSILON); QueueCapacityVector withoutGpuVector = capacityConfigParser .parse(ABSOLUTE_RESOURCE_MEMORY_VCORE, QUEUE_PATH); Assert.assertEquals(3, withoutGpuVector.getResourceCount()); Assert.assertEquals(0f, withoutGpuVector.getResource(GPU_URI).getResourceValue(), EPSILON); }
@Override public void writeBytes(Slice source) { writeBytes(source, 0, source.length()); }
@Test public void testWriteBytes() throws Exception { // fill up some input bytes int length = 65536; byte[] inputArray = new byte[length]; for (int i = 0; i < length; i++) { inputArray[i] = (byte) (i % 128); } // pick some offsets to make the inputs into different chunks int[] offsets = {0, 100, 545, 1024, 2049, 2050, 2051, 2151, 10480, 20042, 20100, 40001, 65536}; // check byte array version MockOutputStream byteOutputStream = new MockOutputStream(length); BufferedOutputStreamSliceOutput output = new BufferedOutputStreamSliceOutput(byteOutputStream); for (int i = 0; i < offsets.length - 1; i++) { output.writeBytes(inputArray, offsets[i], offsets[i + 1] - offsets[i]); } // ignore the last flush size check output.flush(); assertEquals(byteOutputStream.toByteArray(), inputArray); byteOutputStream.close(); // check slice version byteOutputStream = new MockOutputStream(length); Slice inputSlice = Slices.wrappedBuffer(inputArray); output = new BufferedOutputStreamSliceOutput(byteOutputStream); for (int i = 0; i < offsets.length - 1; i++) { output.writeBytes(inputSlice, offsets[i], offsets[i + 1] - offsets[i]); } // ignore the last flush size check output.flush(); assertEquals(byteOutputStream.toByteArray(), inputArray); byteOutputStream.close(); }
@PublicAPI(usage = ACCESS) public String getSimpleName() { return descriptor.getSimpleClassName(); }
@Test public void predicate_containAnyConstructorsThat() { @SuppressWarnings("unused") class Match { Match(Serializable param) { } } @SuppressWarnings("unused") class Mismatch { Mismatch(String param) { } } JavaClasses classes = new ClassFileImporter().importClasses(Match.class, Mismatch.class); String regex = ".*" + Serializable.class.getSimpleName() + ".*"; assertThat(containAnyConstructorsThat(fullNameMatching(regex))) .hasDescription("contain any constructors that full name matching '" + regex + "'") .accepts(classes.get(Match.class)) .rejects(classes.get(Mismatch.class)); }
public static <T> T instantiateClassDefConstructor(Class<T> clazz) { //if constructor present then it should have a no arg constructor //if not present then default constructor is already their Objects.requireNonNull(clazz, "class to instantiate should not be null"); if (clazz.getConstructors().length > 0 && Arrays.stream(clazz.getConstructors()).noneMatch(c -> c.getParameterCount() == 0)) { throw new InstantiationException( "Default constructor is required to create instance of public class: " + clazz .getName()); } try { return clazz.getConstructor().newInstance(); } catch (Exception e) { throw new InstantiationException(INSTANTIATION_ERROR_PREFIX + clazz.getName(), e); } }
@Test public void shouldFailToInstantiateNoDefaultConstructor() { assertThatThrownBy( () -> ClassUtils.instantiateClassDefConstructor(NoDefaultConstructor.class)) .isInstanceOf(InstantiationException.class); }
@VisibleForTesting Job getJob(JobID jobid) throws IOException, InterruptedException { int maxRetry = getConf().getInt(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES, MRJobConfig.DEFAULT_MR_CLIENT_JOB_MAX_RETRIES); long retryInterval = getConf() .getLong(MRJobConfig.MR_CLIENT_JOB_RETRY_INTERVAL, MRJobConfig.DEFAULT_MR_CLIENT_JOB_RETRY_INTERVAL); Job job = cluster.getJob(jobid); for (int i = 0; i < maxRetry; ++i) { if (job != null) { return job; } LOG.info("Could not obtain job info after " + String.valueOf(i + 1) + " attempt(s). Sleeping for " + String.valueOf(retryInterval / 1000) + " seconds and retrying."); Thread.sleep(retryInterval); job = cluster.getJob(jobid); } return job; }
@Test public void testGetJobWithoutRetry() throws Exception { Configuration conf = new Configuration(); conf.setInt(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES, 0); final Cluster mockCluster = mock(Cluster.class); when(mockCluster.getJob(any(JobID.class))).thenReturn(null); CLI cli = new CLI(conf); cli.cluster = mockCluster; Job job = cli.getJob(JobID.forName("job_1234654654_001")); Assert.assertTrue("job is not null", job == null); }
public boolean retainAll(Collection<?> c) { throw e; }
@Test void require_that_retainAll_throws_exception() { assertThrows(NodeVector.ReadOnlyException.class, () -> new TestNodeVector("foo").retainAll(null)); }
public void addDescription(String path, String description) throws IOException, InvalidTokenException { Map<String, String[]> tags = new LinkedHashMap<>(); tags.put("description", new String[] {description}); Map<String, Object> body = new LinkedHashMap<>(); body.put("tags", tags); String url; try { url = getUriBuilder() .setPath(API_PATH_PREFIX + "/mounts/primary/files/tags/add") .setParameter("path", path) .build() .toString(); } catch (URISyntaxException e) { throw new IllegalStateException("Could not produce url.", e); } Request.Builder requestBuilder = getRequestBuilder(url); requestBuilder.post( RequestBody.create( MediaType.parse("application/json"), objectMapper.writeValueAsString(body))); try (Response response = getResponse(requestBuilder)) { int code = response.code(); if ((code < 200 || code > 299) && code != 409) { throw new KoofrClientIOException(response); } } }
@Test public void testAddDescriptionTokenExpired() throws Exception { when(credentialFactory.refreshCredential(credential)) .then( (InvocationOnMock invocation) -> { final Credential cred = invocation.getArgument(0); cred.setAccessToken("acc1"); return cred; }); server.enqueue(new MockResponse().setResponseCode(401)); server.enqueue(new MockResponse().setResponseCode(200)); client.addDescription("/path/to/folder", "Test description"); assertEquals(2, server.getRequestCount()); RecordedRequest recordedRequest = server.takeRequest(); assertEquals("POST", recordedRequest.getMethod()); assertEquals( "/api/v2/mounts/primary/files/tags/add?path=%2Fpath%2Fto%2Ffolder", recordedRequest.getPath()); assertEquals("Bearer acc", recordedRequest.getHeader("Authorization")); assertEquals("2.1", recordedRequest.getHeader("X-Koofr-Version")); assertEquals( "application/json; charset=utf-8", recordedRequest.getHeader("Content-Type")); assertEquals( "{\"tags\":{\"description\":[\"Test description\"]}}", recordedRequest.getBody().readUtf8()); recordedRequest = server.takeRequest(); assertEquals("POST", recordedRequest.getMethod()); assertEquals( "/api/v2/mounts/primary/files/tags/add?path=%2Fpath%2Fto%2Ffolder", recordedRequest.getPath()); assertEquals("Bearer acc1", recordedRequest.getHeader("Authorization")); assertEquals("2.1", recordedRequest.getHeader("X-Koofr-Version")); assertEquals( "application/json; charset=utf-8", recordedRequest.getHeader("Content-Type")); assertEquals( "{\"tags\":{\"description\":[\"Test description\"]}}", recordedRequest.getBody().readUtf8()); }
public synchronized void clear() { map.clear(); db.clear(); }
@Test public void testClear() { Context app = RuntimeEnvironment.application; HttpUrl url = HttpUrl.parse("http://www.ehviewer.com/"); Cookie cookie = new Cookie.Builder() .name("user") .value("1234567890") .domain("ehviewer.com") .path("/") .expiresAt(System.currentTimeMillis() + 3000) .build(); CookieRepository repository = new CookieRepository(app, "cookie.db"); repository.saveFromResponse(url, Collections.singletonList(cookie)); Map<String, CookieSet> map = Reflect.on(repository).field("map").get(); assertEquals(1, map.size()); equals(map.get("ehviewer.com"), Collections.singletonList(cookie)); repository.clear(); map = Reflect.on(repository).field("map").get(); assertEquals(0, map.size()); repository.close(); repository = new CookieRepository(app, "cookie.db"); map = Reflect.on(repository).field("map").get(); assertEquals(0, map.size()); repository.close(); }
public static void putValuesForXPathInListUsingSaxon( String xmlFile, String xPathQuery, List<? super String> matchStrings, boolean fragment, int matchNumber, String namespaces) throws SaxonApiException, FactoryConfigurationError { // generating the cache key final ImmutablePair<String, String> key = ImmutablePair.of(xPathQuery, namespaces); //check the cache XPathExecutable xPathExecutable; if(StringUtils.isNotEmpty(xPathQuery)) { xPathExecutable = XPATH_CACHE.get(key); } else { log.warn("Error : {}", JMeterUtils.getResString("xpath2_extractor_empty_query")); return; } try (StringReader reader = new StringReader(xmlFile)) { // We could instantiate it once but might trigger issues in the future // Sharing of a DocumentBuilder across multiple threads is not recommended. // However, in the current implementation sharing a DocumentBuilder (once initialized) // will only cause problems if a SchemaValidator is used. net.sf.saxon.s9api.DocumentBuilder builder = PROCESSOR.newDocumentBuilder(); XdmNode xdmNode = builder.build(new SAXSource(new InputSource(reader))); if(xPathExecutable!=null) { XPathSelector selector = null; try { selector = xPathExecutable.load(); selector.setContextItem(xdmNode); XdmValue nodes = selector.evaluate(); int length = nodes.size(); int indexToMatch = matchNumber; // In case we need to extract everything if(matchNumber < 0) { for(XdmItem item : nodes) { if(fragment) { matchStrings.add(item.toString()); } else { matchStrings.add(item.getStringValue()); } } } else { if(indexToMatch <= length) { if(matchNumber == 0 && length>0) { indexToMatch = JMeterUtils.getRandomInt(length)+1; } XdmItem item = nodes.itemAt(indexToMatch-1); matchStrings.add(fragment ? item.toString() : item.getStringValue()); } else { if(log.isWarnEnabled()) { log.warn("Error : {}{}", JMeterUtils.getResString("xpath2_extractor_match_number_failure"),indexToMatch); } } } } finally { if(selector != null) { try { selector.getUnderlyingXPathContext().setContextItem(null); } catch (Exception e) { // NOSONAR Ignored on purpose // NOOP } } } } } }
@Test public void testputValuesForXPathInListUsingSaxon() throws SaxonApiException, FactoryConfigurationError{ String xPathQuery="//Employees/Employee/role"; ArrayList<String> matchStrings = new ArrayList<>(); boolean fragment = false; String namespaces = "age=http://www.w3.org/2003/01/geo/wgs84_pos#"; int matchNumber = 3; XPathUtil.putValuesForXPathInListUsingSaxon(xmlDoc, xPathQuery, matchStrings, fragment, matchNumber, namespaces); assertEquals("Manager", matchStrings.get(0)); matchNumber = 0; xPathQuery="//Employees/Employee[1]/age:ag"; fragment = true; matchStrings.clear(); XPathUtil.putValuesForXPathInListUsingSaxon(xmlDoc, xPathQuery, matchStrings, fragment, matchNumber, namespaces); assertEquals("<age:ag xmlns:age=\"http://www.w3.org/2003/01/geo/wgs84_pos#\">29</age:ag>", matchStrings.get(0)); assertEquals(1, matchStrings.size()); matchNumber = -1; xPathQuery="//Employees/Employee/age:ag"; matchStrings.clear(); XPathUtil.putValuesForXPathInListUsingSaxon(xmlDoc, xPathQuery, matchStrings, fragment, matchNumber, namespaces); assertEquals("<age:ag xmlns:age=\"http://www.w3.org/2003/01/geo/wgs84_pos#\">29</age:ag>", matchStrings.get(0)); assertEquals(4, matchStrings.size()); fragment = false; matchStrings.clear(); XPathUtil.putValuesForXPathInListUsingSaxon(xmlDoc, xPathQuery, matchStrings, fragment, matchNumber, namespaces); assertEquals("29", matchStrings.get(0)); assertEquals(4, matchStrings.size()); matchStrings.clear(); xPathQuery="regtsgwsdfstgsdf"; XPathUtil.putValuesForXPathInListUsingSaxon(xmlDoc, xPathQuery, matchStrings, fragment, matchNumber, namespaces); assertEquals(new ArrayList<String>(), matchStrings); assertEquals(0, matchStrings.size()); matchStrings.clear(); xPathQuery="//Employees/Employee[1]/age:ag"; matchNumber = 555; XPathUtil.putValuesForXPathInListUsingSaxon(xmlDoc, xPathQuery, matchStrings, fragment, matchNumber, namespaces); assertEquals(new ArrayList<String>(), matchStrings); assertEquals(0, matchStrings.size()); }
static WebSocketServerHandshaker getHandshaker(Channel channel) { return channel.attr(HANDSHAKER_ATTR_KEY).get(); }
@Test public void testWebSocketServerProtocolHandshakeHandlerReplacedBeforeHandshake() { EmbeddedChannel ch = createChannel(new MockOutboundHandler()); ChannelHandlerContext handshakerCtx = ch.pipeline().context(WebSocketServerProtocolHandshakeHandler.class); ch.pipeline().addLast(new ChannelInboundHandlerAdapter() { @Override public void userEventTriggered(ChannelHandlerContext ctx, Object evt) { if (evt instanceof WebSocketServerProtocolHandler.HandshakeComplete) { // We should have removed the handler already. assertNull(ctx.pipeline().context(WebSocketServerProtocolHandshakeHandler.class)); } } }); writeUpgradeRequest(ch); FullHttpResponse response = responses.remove(); assertEquals(SWITCHING_PROTOCOLS, response.status()); response.release(); assertNotNull(WebSocketServerProtocolHandler.getHandshaker(handshakerCtx.channel())); assertFalse(ch.finish()); }
@Override public List<V> readAll() { return get(readAllAsync()); }
@Test public void testReadAll() { RTransferQueue<String> queue = redisson.getTransferQueue("queue"); queue.add("1"); queue.add("2"); queue.add("3"); queue.add("4"); assertThat(queue.readAll()).containsExactly("1", "2", "3", "4"); }
@ScalarOperator(CAST) @SqlType(StandardTypes.DOUBLE) public static double castToDouble(@SqlType(StandardTypes.INTEGER) long value) { return value; }
@Test public void testCastToDouble() { assertFunction("cast(INTEGER'37' as double)", DOUBLE, 37.0); assertFunction("cast(INTEGER'17' as double)", DOUBLE, 17.0); }
@Override public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) { table.refresh(); if (lastPosition != null) { return discoverIncrementalSplits(lastPosition); } else { return discoverInitialSplits(); } }
@Test public void testTableScanThenIncrementalWithEmptyTable() throws Exception { ScanContext scanContext = ScanContext.builder() .startingStrategy(StreamingStartingStrategy.TABLE_SCAN_THEN_INCREMENTAL) .build(); ContinuousSplitPlannerImpl splitPlanner = new ContinuousSplitPlannerImpl(TABLE_RESOURCE.tableLoader().clone(), scanContext, null); ContinuousEnumerationResult emptyTableInitialDiscoveryResult = splitPlanner.planSplits(null); assertThat(emptyTableInitialDiscoveryResult.splits()).isEmpty(); assertThat(emptyTableInitialDiscoveryResult.fromPosition()).isNull(); assertThat(emptyTableInitialDiscoveryResult.toPosition().isEmpty()).isTrue(); assertThat(emptyTableInitialDiscoveryResult.toPosition().snapshotTimestampMs()).isNull(); ContinuousEnumerationResult emptyTableSecondDiscoveryResult = splitPlanner.planSplits(emptyTableInitialDiscoveryResult.toPosition()); assertThat(emptyTableSecondDiscoveryResult.splits()).isEmpty(); assertThat(emptyTableSecondDiscoveryResult.fromPosition().isEmpty()).isTrue(); assertThat(emptyTableSecondDiscoveryResult.fromPosition().snapshotTimestampMs()).isNull(); assertThat(emptyTableSecondDiscoveryResult.toPosition().isEmpty()).isTrue(); assertThat(emptyTableSecondDiscoveryResult.toPosition().snapshotTimestampMs()).isNull(); // next 3 snapshots IcebergEnumeratorPosition lastPosition = emptyTableSecondDiscoveryResult.toPosition(); for (int i = 0; i < 3; ++i) { lastPosition = verifyOneCycle(splitPlanner, lastPosition).lastPosition; } }
public void inputWatermark(Watermark watermark, int channelIndex, DataOutput<?> output) throws Exception { final SubpartitionStatus subpartitionStatus; if (watermark instanceof InternalWatermark) { int subpartitionStatusIndex = ((InternalWatermark) watermark).getSubpartitionIndex(); subpartitionStatus = subpartitionStatuses.get(channelIndex).get(subpartitionStatusIndex); } else { subpartitionStatus = subpartitionStatuses.get(channelIndex).get(subpartitionIndexes[channelIndex]); } // ignore the input watermark if its subpartition, or all subpartitions are idle (i.e. // overall the valve is idle). if (lastOutputWatermarkStatus.isActive() && subpartitionStatus.watermarkStatus.isActive()) { long watermarkMillis = watermark.getTimestamp(); // if the input watermark's value is less than the last received watermark for its // subpartition, ignore it also. if (watermarkMillis > subpartitionStatus.watermark) { subpartitionStatus.watermark = watermarkMillis; if (subpartitionStatus.isWatermarkAligned) { adjustAlignedSubpartitionStatuses(subpartitionStatus); } else if (watermarkMillis >= lastOutputWatermark) { // previously unaligned subpartitions are now aligned if its watermark has // caught up markWatermarkAligned(subpartitionStatus); } // now, attempt to find a new min watermark across all aligned subpartitions findAndOutputNewMinWatermarkAcrossAlignedSubpartitions(output); } } }
@Test void testMultipleInputDecreasingWatermarksYieldsNoOutput() throws Exception { StatusWatermarkOutput valveOutput = new StatusWatermarkOutput(); StatusWatermarkValve valve = new StatusWatermarkValve(3); valve.inputWatermark(new Watermark(25), 0, valveOutput); valve.inputWatermark(new Watermark(10), 1, valveOutput); valve.inputWatermark(new Watermark(17), 2, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(10)); valve.inputWatermark(new Watermark(12), 0, valveOutput); valve.inputWatermark(new Watermark(8), 1, valveOutput); valve.inputWatermark(new Watermark(15), 2, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isNull(); }
public static String getHost(String registry) { return REGISTRY_HOST_MAP.getOrDefault(registry, registry); }
@Test public void testGetHost_noAlias() { String host = RegistryAliasGroup.getHost("something.gcr.io"); Assert.assertEquals("something.gcr.io", host); }
@Override public CustomResponse<TokenResponse> login(LoginRequest loginRequest) { return userServiceClient.loginUser(loginRequest); }
@Test void givenValidLoginRequest_whenLogin_ReturnsCustomResponse() { // Given LoginRequest loginRequest = LoginRequest.builder() .email("valid.email@example.com") .password("validPassword123") .build(); TokenResponse tokenResponse = TokenResponse.builder() .accessToken("access-token") .accessTokenExpiresAt(System.currentTimeMillis() + 3600) .refreshToken("refresh-token") .build(); CustomResponse<TokenResponse> customResponse = CustomResponse.successOf(tokenResponse); // When when(userServiceClient.loginUser(any(LoginRequest.class))).thenReturn(customResponse); // Then CustomResponse<TokenResponse> response = userLoginService.login(loginRequest); assertNotNull(response); assertTrue(response.getIsSuccess()); assertEquals(HttpStatus.OK, response.getHttpStatus()); assertEquals(tokenResponse, response.getResponse()); // Verify verify(userServiceClient, times(1)).loginUser(any(LoginRequest.class)); }
@Override public WsResponse call(WsRequest wsRequest) { DefaultLocalRequest localRequest = new DefaultLocalRequest(wsRequest); LocalConnector.LocalResponse localResponse = localConnector.call(localRequest); return new ByteArrayResponse(wsRequest.getPath(), localResponse); }
@Test public void call_request() throws Exception { WsRequest wsRequest = new PostRequest("api/issues/search") .setMediaType(MediaTypes.JSON) .setParam("foo", "bar"); answer(new DumbLocalResponse(400, MediaTypes.JSON, "{}".getBytes(UTF_8), Collections.<String>emptyList())); WsResponse wsResponse = underTest.call(wsRequest); Map<String, String> expectedParams = new HashMap<>(); expectedParams.put("foo", "bar"); verifyRequested("POST", "api/issues/search", MediaTypes.JSON, expectedParams); assertThat(wsResponse.code()).isEqualTo(400); assertThat(wsResponse.content()).isEqualTo("{}"); assertThat(IOUtils.toString(wsResponse.contentReader())).isEqualTo("{}"); assertThat(IOUtils.toString(wsResponse.contentStream())).isEqualTo("{}"); assertThat(wsResponse.contentType()).isEqualTo(MediaTypes.JSON); assertThat(wsResponse.requestUrl()).isEqualTo("api/issues/search"); assertThat(wsResponse.headers()).isEmpty(); }
public StatMap<K> merge(K key, int value) { if (key.getType() == Type.LONG) { merge(key, (long) value); return this; } int oldValue = getInt(key); int newValue = key.merge(oldValue, value); if (newValue == 0) { _map.remove(key); } else { _map.put(key, newValue); } return this; }
@Test(dataProvider = "allTypeStats") public void singleEncodeDecode(MyStats stat) throws IOException { StatMap<MyStats> statMap = new StatMap<>(MyStats.class); switch (stat.getType()) { case BOOLEAN: statMap.merge(stat, true); break; case INT: statMap.merge(stat, 1); break; case LONG: statMap.merge(stat, 1L); break; case STRING: statMap.merge(stat, "foo"); break; default: throw new IllegalStateException(); } testSerializeDeserialize(statMap); }
@VisibleForTesting void checkSourceFileField( String sourceFilenameFieldName, SFTPPutData data ) throws KettleStepException { // Sourcefilename field sourceFilenameFieldName = environmentSubstitute( sourceFilenameFieldName ); if ( Utils.isEmpty( sourceFilenameFieldName ) ) { // source filename field is missing throw new KettleStepException( BaseMessages.getString( PKG, "SFTPPut.Error.SourceFileNameFieldMissing" ) ); } data.indexOfSourceFileFieldName = getInputRowMeta().indexOfValue( sourceFilenameFieldName ); if ( data.indexOfSourceFileFieldName == -1 ) { // source filename field is missing throw new KettleStepException( BaseMessages.getString( PKG, "SFTPPut.Error.CanNotFindField", sourceFilenameFieldName ) ); } }
@Test public void checkSourceFileField_NameIsSet_Found() throws Exception { RowMeta rowMeta = rowOfStringsMeta( "some field", "sourceFileFieldName" ); step.setInputRowMeta( rowMeta ); SFTPPutData data = new SFTPPutData(); step.checkSourceFileField( "sourceFileFieldName", data ); assertEquals( 1, data.indexOfSourceFileFieldName ); }
NettyPartitionRequestClient createPartitionRequestClient(ConnectionID connectionId) throws IOException, InterruptedException { // We map the input ConnectionID to a new value to restrict the number of tcp connections connectionId = new ConnectionID( connectionId.getResourceID(), connectionId.getAddress(), connectionId.getConnectionIndex() % maxNumberOfConnections); while (true) { final CompletableFuture<NettyPartitionRequestClient> newClientFuture = new CompletableFuture<>(); final CompletableFuture<NettyPartitionRequestClient> clientFuture = clients.putIfAbsent(connectionId, newClientFuture); final NettyPartitionRequestClient client; if (clientFuture == null) { try { client = connectWithRetries(connectionId); } catch (Throwable e) { newClientFuture.completeExceptionally( new IOException("Could not create Netty client.", e)); clients.remove(connectionId, newClientFuture); throw e; } newClientFuture.complete(client); } else { try { client = clientFuture.get(); } catch (ExecutionException e) { ExceptionUtils.rethrowIOException(ExceptionUtils.stripExecutionException(e)); return null; } } // Make sure to increment the reference count before handing a client // out to ensure correct bookkeeping for channel closing. if (client.validateClientAndIncrementReferenceCounter()) { return client; } else if (client.canBeDisposed()) { client.closeConnection(); } else { destroyPartitionRequestClient(connectionId, client); } } }
@TestTemplate void testExceptionsAreNotCached() throws Exception { NettyTestUtil.NettyServerAndClient nettyServerAndClient = createNettyServerAndClient(); try { final PartitionRequestClientFactory factory = new PartitionRequestClientFactory( new UnstableNettyClient(nettyServerAndClient.client(), 1), connectionReuseEnabled); final ConnectionID connectionID = nettyServerAndClient.getConnectionID(RESOURCE_ID, 0); assertThatThrownBy(() -> factory.createPartitionRequestClient(connectionID)) .withFailMessage("Expected the first request to fail.") .isInstanceOf(RemoteTransportException.class); factory.createPartitionRequestClient(connectionID); } finally { shutdown(nettyServerAndClient); } }
public static void verifyIncrementPubContent(String content) { if (content == null || content.length() == 0) { throw new IllegalArgumentException("publish/delete content can not be null"); } for (int i = 0; i < content.length(); i++) { char c = content.charAt(i); if (c == '\r' || c == '\n') { throw new IllegalArgumentException("publish/delete content can not contain return and linefeed"); } if (c == Constants.WORD_SEPARATOR.charAt(0)) { throw new IllegalArgumentException("publish/delete content can not contain(char)2"); } } }
@Test void testVerifyIncrementPubContentFail1() { Throwable exception = assertThrows(IllegalArgumentException.class, () -> { String content = null; ContentUtils.verifyIncrementPubContent(content); }); assertTrue(exception.getMessage().contains("publish/delete content can not be null")); }
@Override public Collection<SQLToken> generateSQLTokens(final SQLStatementContext sqlStatementContext) { Collection<ColumnSegment> columnSegments = ((WhereAvailable) sqlStatementContext).getColumnSegments(); Collection<WhereSegment> whereSegments = ((WhereAvailable) sqlStatementContext).getWhereSegments(); ShardingSphereSchema schema = ((TableAvailable) sqlStatementContext).getTablesContext().getSchemaName().map(schemas::get).orElseGet(() -> defaultSchema); Map<String, String> columnExpressionTableNames = ((TableAvailable) sqlStatementContext).getTablesContext().findTableNames(columnSegments, schema); return generateSQLTokens(columnSegments, columnExpressionTableNames, whereSegments, sqlStatementContext.getDatabaseType()); }
@Test void assertGenerateSQLTokenFromGenerateNewSQLToken() { generator.setSchemas(Collections.emptyMap()); Collection<SQLToken> substitutableColumnNameTokens = generator.generateSQLTokens(EncryptGeneratorFixtureBuilder.createUpdateStatementContext()); assertThat(substitutableColumnNameTokens.size(), is(1)); assertThat(((SubstitutableColumnNameToken) substitutableColumnNameTokens.iterator().next()).toString(null), is("pwd_assist")); }
public Seckill getSeckill(long seckillId) { String key = "seckill:" + seckillId; Seckill seckill = (Seckill) redisTemplate.opsForValue().get(key); if (seckill != null) { return seckill; } else { seckill = seckillMapper.selectById(seckillId); if (seckill == null) { throw new RuntimeException("秒杀活动不存在!"); } putSeckill(seckill); return seckill; } }
@Test void getSeckillSuccess() { long seckillId = 1001L; String key = "seckill:" + seckillId; ValueOperations valueOperations = mock(ValueOperations.class); when(redisTemplate.opsForValue()).thenReturn(valueOperations); Seckill t = new Seckill(); t.setSeckillId(seckillId); when(valueOperations.get(key)).thenReturn(t); assertEquals((long) redisService.getSeckill(seckillId).getSeckillId(), seckillId); }
public static void main(final String[] args) { SpringApplication.run(DualStorageDemoApplication.class, args); }
@Test void checkPossibilityToSimplyStartAndRestartApplication() { this.invoiceConfiguration.getStorageInstance().stop(); this.personConfiguration.getStorageInstance().stop(); DualStorageDemoApplication.main(new String[]{}); }
public static long memorySize2Byte(final long memorySize, @MemoryConst.Unit final int unit) { if (memorySize < 0) return -1; return memorySize * unit; }
@Test public void memorySize2ByteInputZeroOutputZero() { Assert.assertEquals( 0L, ConvertKit.memorySize2Byte(0L, 0) ); }
protected void saveAndRunJobFilters(List<Job> jobs) { if (jobs.isEmpty()) return; try { jobFilterUtils.runOnStateElectionFilter(jobs); storageProvider.save(jobs); jobFilterUtils.runOnStateAppliedFilters(jobs); } catch (ConcurrentJobModificationException concurrentJobModificationException) { try { backgroundJobServer.getConcurrentJobModificationResolver().resolve(concurrentJobModificationException); } catch (UnresolvableConcurrentJobModificationException unresolvableConcurrentJobModificationException) { throw new SevereJobRunrException("Could not resolve ConcurrentJobModificationException", unresolvableConcurrentJobModificationException); } } }
@Test void ifNoStateChangeHappensStateChangeFiltersAreNotInvoked() { Job aJobInProgress = aJobInProgress().build(); for (int i = 0; i <= 5; i++) { task.saveAndRunJobFilters(singletonList(aJobInProgress)); } assertThat(logAllStateChangesFilter.getStateChanges(aJobInProgress)).isEmpty(); assertThat(logAllStateChangesFilter.onProcessingIsCalled(aJobInProgress)).isFalse(); assertThat(logAllStateChangesFilter.onProcessingSucceededIsCalled(aJobInProgress)).isFalse(); }
@Override public Iterable<GenericRow> transform( final K readOnlyKey, final GenericRow value, final KsqlProcessingContext ctx ) { if (value == null) { return null; } final List<Iterator<?>> iters = new ArrayList<>(tableFunctionAppliers.size()); int maxLength = 0; for (final TableFunctionApplier applier : tableFunctionAppliers) { final List<?> exploded = applier.apply(value, processingLogger); iters.add(exploded.iterator()); maxLength = Math.max(maxLength, exploded.size()); } final List<GenericRow> rows = new ArrayList<>(maxLength); for (int i = 0; i < maxLength; i++) { final GenericRow newRow = new GenericRow(value.values().size() + iters.size()); newRow.appendAll(value.values()); for (final Iterator<?> iter : iters) { if (iter.hasNext()) { newRow.append(iter.next()); } else { newRow.append(null); } } rows.add(newRow); } return rows; }
@Test public void shouldFlatMapOneFunction() { // Given: final TableFunctionApplier applier = createApplier(Arrays.asList(10, 10, 10)); final KudtfFlatMapper<String> flatMapper = new KudtfFlatMapper<>(ImmutableList.of(applier), processingLogger); // When: final Iterable<GenericRow> iterable = flatMapper.transform(KEY, VALUE, ctx); // Then: final Iterator<GenericRow> iter = iterable.iterator(); assertThat(iter.next().values(), is(Arrays.asList(1, 2, 3, 10))); assertThat(iter.next().values(), is(Arrays.asList(1, 2, 3, 10))); assertThat(iter.next().values(), is(Arrays.asList(1, 2, 3, 10))); assertThat(iter.hasNext(), is(false)); }
@Override public void process(Tuple input) { String key = filterMapper.getKeyFromTuple(input); boolean found; JedisCommandsContainer jedisCommand = null; try { jedisCommand = getInstance(); switch (dataType) { case STRING: found = jedisCommand.exists(key); break; case SET: found = jedisCommand.sismember(additionalKey, key); break; case HASH: found = jedisCommand.hexists(additionalKey, key); break; case SORTED_SET: found = jedisCommand.zrank(additionalKey, key) != null; break; case HYPER_LOG_LOG: found = jedisCommand.pfcount(key) > 0; break; case GEO: List<GeoCoordinate> geopos = jedisCommand.geopos(additionalKey, key); if (geopos == null || geopos.isEmpty()) { found = false; } else { // If any entry is NOT null, then we have a match. found = geopos.stream() .anyMatch(Objects::nonNull); } break; default: throw new IllegalArgumentException("Cannot process such data type: " + dataType); } if (found) { collector.emit(input, input.getValues()); } collector.ack(input); } catch (Exception e) { this.collector.reportError(e); this.collector.fail(input); } }
@Test void smokeTest_exists_keyFound() { // Define input key final String inputKey = "ThisIsMyKey"; // Ensure key does exist in redis jedisHelper.set(inputKey, "some-value"); assertTrue(jedisHelper.exists(inputKey), "Sanity check key exists."); // Create an input tuple final Map<String, Object> values = new HashMap<>(); values.put("key", inputKey); values.put("value", "ThisIsMyValue"); final Tuple tuple = new StubTuple(values); final JedisPoolConfig config = configBuilder.build(); final TestMapper mapper = new TestMapper(STRING); final RedisFilterBolt bolt = new RedisFilterBolt(config, mapper); bolt.prepare(new HashMap<>(), topologyContext, new OutputCollector(outputCollector)); bolt.process(tuple); // Verify Tuple passed through the bolt verifyTuplePassed(tuple); }
public static Builder builder(MetricRegistry metricsRegistry) { return new Builder().metricsRegistry(metricsRegistry); }
@Test public void configurableViaBuilder() { final MetricRegistry registry = Mockito.mock(MetricRegistry.class); InstrumentedHttpClientConnectionManager.builder(registry) .name("some-name") .name("some-other-name") .build() .close(); ArgumentCaptor<String> argumentCaptor = ArgumentCaptor.forClass(String.class); Mockito.verify(registry, Mockito.atLeast(1)).registerGauge(argumentCaptor.capture(), any()); assertTrue(argumentCaptor.getValue().contains("some-other-name")); }