focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public CompletableFuture<Void> closeAsync() { ShutdownHookUtil.removeShutdownHook(shutDownHook, getClass().getSimpleName(), LOG); return shutDownAsync( ApplicationStatus.UNKNOWN, ShutdownBehaviour.PROCESS_FAILURE, "Cluster entrypoint has been closed externally.", false) .thenAccept(ignored -> {}); }
@Test public void testCloseAsyncShouldNotDeregisterApp() throws Exception { final CompletableFuture<Void> deregisterFuture = new CompletableFuture<>(); final TestingResourceManagerFactory testingResourceManagerFactory = new TestingResourceManagerFactory.Builder() .setInternalDeregisterApplicationConsumer( (ignored1, ignored2, ignore3) -> deregisterFuture.complete(null)) .build(); final TestingEntryPoint testingEntryPoint = new TestingEntryPoint.Builder() .setConfiguration(flinkConfig) .setResourceManagerFactory(testingResourceManagerFactory) .build(); final CompletableFuture<ApplicationStatus> appStatusFuture = startClusterEntrypoint(testingEntryPoint); testingEntryPoint.closeAsync(); assertThat( appStatusFuture.get(TIMEOUT_MS, TimeUnit.MILLISECONDS), is(ApplicationStatus.UNKNOWN)); assertThat(deregisterFuture.isDone(), is(false)); }
@Override public long getMin() { if (values.length == 0) { return 0; } return values[0]; }
@Test public void calculatesAMinOfZeroForAnEmptySnapshot() { final Snapshot emptySnapshot = new UniformSnapshot(new long[]{}); assertThat(emptySnapshot.getMin()) .isZero(); }
@Override public Object getValue() { return value; }
@Test public void testGetValue() { assertEquals(VALUE, record.getValue()); assertEquals(VALUE, recordSameAttributes.getValue()); assertNotEquals(VALUE, recordOtherKeyAndValue.getValue()); }
public Set<MapperConfig> load(InputStream inputStream) throws IOException { final PrometheusMappingConfig config = ymlMapper.readValue(inputStream, PrometheusMappingConfig.class); return config.metricMappingConfigs() .stream() .flatMap(this::mapMetric) .collect(Collectors.toSet()); }
@Test void inputMetricType() throws Exception { when(messageInputFactory.getAvailableInputs()).thenReturn( ImmutableMap.of("test.input", mock(InputDescription.class))); final Map<String, ImmutableList<Serializable>> config = Collections.singletonMap("metric_mappings", ImmutableList.of( ImmutableMap.of( "type", "input_metric", "metric_name", "test1", "input_metric_name", "foo.bar" ))); assertThat(configLoader.load(new ByteArrayInputStream(objectMapper.writeValueAsBytes(config)))) .containsExactlyInAnyOrder(new MapperConfig( "test.input.*.foo.bar", "gl_test1", ImmutableMap.of( "node", "5ca1ab1e-0000-4000-a000-000000000000", "input_id", "${0}", "input_type", "test.input" ))); }
public static String getTypeName(final int type) { switch (type) { case START_EVENT_V3: return "Start_v3"; case STOP_EVENT: return "Stop"; case QUERY_EVENT: return "Query"; case ROTATE_EVENT: return "Rotate"; case INTVAR_EVENT: return "Intvar"; case LOAD_EVENT: return "Load"; case NEW_LOAD_EVENT: return "New_load"; case SLAVE_EVENT: return "Slave"; case CREATE_FILE_EVENT: return "Create_file"; case APPEND_BLOCK_EVENT: return "Append_block"; case DELETE_FILE_EVENT: return "Delete_file"; case EXEC_LOAD_EVENT: return "Exec_load"; case RAND_EVENT: return "RAND"; case XID_EVENT: return "Xid"; case USER_VAR_EVENT: return "User var"; case FORMAT_DESCRIPTION_EVENT: return "Format_desc"; case TABLE_MAP_EVENT: return "Table_map"; case PRE_GA_WRITE_ROWS_EVENT: return "Write_rows_event_old"; case PRE_GA_UPDATE_ROWS_EVENT: return "Update_rows_event_old"; case PRE_GA_DELETE_ROWS_EVENT: return "Delete_rows_event_old"; case WRITE_ROWS_EVENT_V1: return "Write_rows_v1"; case UPDATE_ROWS_EVENT_V1: return "Update_rows_v1"; case DELETE_ROWS_EVENT_V1: return "Delete_rows_v1"; case BEGIN_LOAD_QUERY_EVENT: return "Begin_load_query"; case EXECUTE_LOAD_QUERY_EVENT: return "Execute_load_query"; case INCIDENT_EVENT: return "Incident"; case HEARTBEAT_LOG_EVENT: case HEARTBEAT_LOG_EVENT_V2: return "Heartbeat"; case IGNORABLE_LOG_EVENT: return "Ignorable"; case ROWS_QUERY_LOG_EVENT: return "Rows_query"; case WRITE_ROWS_EVENT: return "Write_rows"; case UPDATE_ROWS_EVENT: return "Update_rows"; case DELETE_ROWS_EVENT: return "Delete_rows"; case GTID_LOG_EVENT: return "Gtid"; case ANONYMOUS_GTID_LOG_EVENT: return "Anonymous_Gtid"; case PREVIOUS_GTIDS_LOG_EVENT: return "Previous_gtids"; case PARTIAL_UPDATE_ROWS_EVENT: return "Update_rows_partial"; case TRANSACTION_CONTEXT_EVENT : return "Transaction_context"; case VIEW_CHANGE_EVENT : return "view_change"; case XA_PREPARE_LOG_EVENT : return "Xa_prepare"; case TRANSACTION_PAYLOAD_EVENT : return "transaction_payload"; default: return "Unknown type:" + type; } }
@Test public void getTypeNameInputPositiveOutputNotNull28() { // Arrange final int type = 35; // Act final String actual = LogEvent.getTypeName(type); // Assert result Assert.assertEquals("Previous_gtids", actual); }
public static ListenableFuture<EntityFieldsData> findAsync(TbContext ctx, EntityId originatorId) { switch (originatorId.getEntityType()) { // TODO: use EntityServiceRegistry case TENANT: return toEntityFieldsDataAsync(ctx.getTenantService().findTenantByIdAsync(ctx.getTenantId(), (TenantId) originatorId), EntityFieldsData::new, ctx); case CUSTOMER: return toEntityFieldsDataAsync(ctx.getCustomerService().findCustomerByIdAsync(ctx.getTenantId(), (CustomerId) originatorId), EntityFieldsData::new, ctx); case USER: return toEntityFieldsDataAsync(ctx.getUserService().findUserByIdAsync(ctx.getTenantId(), (UserId) originatorId), EntityFieldsData::new, ctx); case ASSET: return toEntityFieldsDataAsync(ctx.getAssetService().findAssetByIdAsync(ctx.getTenantId(), (AssetId) originatorId), EntityFieldsData::new, ctx); case DEVICE: return toEntityFieldsDataAsync(Futures.immediateFuture(ctx.getDeviceService().findDeviceById(ctx.getTenantId(), (DeviceId) originatorId)), EntityFieldsData::new, ctx); case ALARM: return toEntityFieldsDataAsync(ctx.getAlarmService().findAlarmByIdAsync(ctx.getTenantId(), (AlarmId) originatorId), EntityFieldsData::new, ctx); case RULE_CHAIN: return toEntityFieldsDataAsync(ctx.getRuleChainService().findRuleChainByIdAsync(ctx.getTenantId(), (RuleChainId) originatorId), EntityFieldsData::new, ctx); case ENTITY_VIEW: return toEntityFieldsDataAsync(ctx.getEntityViewService().findEntityViewByIdAsync(ctx.getTenantId(), (EntityViewId) originatorId), EntityFieldsData::new, ctx); case EDGE: return toEntityFieldsDataAsync(ctx.getEdgeService().findEdgeByIdAsync(ctx.getTenantId(), (EdgeId) originatorId), EntityFieldsData::new, ctx); default: return Futures.immediateFailedFuture(new TbNodeException("Unexpected originator EntityType: " + originatorId.getEntityType())); } }
@Test public void givenSupportedTypeButEntityDoesNotExist_whenFindAsync_thenException() { for (var entityType : SUPPORTED_ENTITY_TYPES) { var entityId = EntityIdFactory.getByTypeAndUuid(entityType, RANDOM_UUID); initMocks(entityType, true); when(ctxMock.getTenantId()).thenReturn(TENANT_ID); var expectedExceptionMsg = "java.util.NoSuchElementException: Entity not found!"; var exception = assertThrows(ExecutionException.class, () -> EntitiesFieldsAsyncLoader.findAsync(ctxMock, entityId).get()); assertInstanceOf(NoSuchElementException.class, exception.getCause()); assertThat(exception.getMessage()).isEqualTo(expectedExceptionMsg); } }
@Override public synchronized <PS extends Serializer<P>, P> KeyValueIterator<Bytes, byte[]> prefixScan(final P prefix, final PS prefixKeySerializer) { final Bytes from = Bytes.wrap(prefixKeySerializer.serialize(null, prefix)); final Bytes to = Bytes.increment(from); return new DelegatingPeekingKeyValueIterator<>( name, new InMemoryKeyValueIterator(map.subMap(from, true, to, false).keySet(), true) ); }
@Test public void shouldThrowNullPointerIfPrefixKeySerializerIsNull() { assertThrows(NullPointerException.class, () -> byteStore.prefixScan("bb", null)); }
@Override public Set<EntityExcerpt> listEntityExcerpts() { return collectorService.all().stream() .map(this::createExcerpt) .collect(Collectors.toSet()); }
@Test @MongoDBFixtures("SidecarCollectorFacadeTest.json") public void listEntityExcerpts() { final Set<EntityExcerpt> entityExcerpts = facade.listEntityExcerpts(); assertThat(entityExcerpts).containsOnly( EntityExcerpt.builder() .id(ModelId.of("5b4c920b4b900a0024af0001")) .type(ModelTypes.SIDECAR_COLLECTOR_V1) .title("filebeat") .build(), EntityExcerpt.builder() .id(ModelId.of("5b4c920b4b900a0024af0002")) .type(ModelTypes.SIDECAR_COLLECTOR_V1) .title("winlogbeat") .build(), EntityExcerpt.builder() .id(ModelId.of("5b4c920b4b900a0024af0003")) .type(ModelTypes.SIDECAR_COLLECTOR_V1) .title("nxlog") .build() ); }
public boolean isAllTablesInSameDataSource(final Collection<String> logicTableNames) { Collection<String> dataSourceNames = logicTableNames.stream().map(shardingTables::get) .filter(Objects::nonNull).flatMap(each -> each.getActualDataSourceNames().stream()).collect(Collectors.toSet()); return 1 == dataSourceNames.size(); }
@Test void assertIsAllTablesInSameDataSource() { ShardingRuleConfiguration ruleConfig = new ShardingRuleConfiguration(); ruleConfig.getTables().add(new ShardingTableRuleConfiguration("LOGIC_TABLE", "ds_${0}.table_${0..2}")); ShardingRule shardingRule = new ShardingRule(ruleConfig, Maps.of("resource0", new MockedDataSource()), mock(ComputeNodeInstanceContext.class)); assertTrue(shardingRule.isAllTablesInSameDataSource(Collections.singleton("logic_Table"))); }
@Override public int drainPermits() { return get(drainPermitsAsync()); }
@Test public void testDrainPermits() throws InterruptedException { RSemaphore s = redisson.getSemaphore("test"); assertThat(s.drainPermits()).isZero(); s.trySetPermits(10); s.acquire(3); assertThat(s.drainPermits()).isEqualTo(7); assertThat(s.availablePermits()).isEqualTo(0); }
public static void andAckSet(Position currentPosition, Position otherPosition) { if (currentPosition == null || otherPosition == null) { return; } AckSetState currentAckSetState = AckSetStateUtil.getAckSetState(currentPosition); AckSetState otherAckSetState = AckSetStateUtil.getAckSetState(otherPosition); currentAckSetState.setAckSet(andAckSet(currentAckSetState.getAckSet(), otherAckSetState.getAckSet())); }
@Test public void andAckSetTest() { Position positionOne = AckSetStateUtil.createPositionWithAckSet(1, 1, new long[0]); Position positionTwo = AckSetStateUtil.createPositionWithAckSet(1, 2, new long[0]); BitSet bitSetOne = new BitSet(); BitSet bitSetTwo = new BitSet(); bitSetOne.set(0); bitSetOne.set(2); bitSetOne.set(4); bitSetOne.set(6); bitSetOne.set(8); AckSetState positionOneAckSetState = AckSetStateUtil.getAckSetState(positionOne); positionOneAckSetState.setAckSet(bitSetOne.toLongArray()); AckSetState positionTwoAckSetState = AckSetStateUtil.getAckSetState(positionTwo); positionTwoAckSetState.setAckSet(bitSetTwo.toLongArray()); andAckSet(positionOne, positionTwo); BitSetRecyclable bitSetRecyclable = BitSetRecyclable.valueOf(positionOneAckSetState.getAckSet()); assertTrue(bitSetRecyclable.isEmpty()); bitSetTwo.set(2); bitSetTwo.set(4); positionOneAckSetState.setAckSet(bitSetOne.toLongArray()); positionTwoAckSetState.setAckSet(bitSetTwo.toLongArray()); andAckSet(positionOne, positionTwo); bitSetRecyclable = BitSetRecyclable.valueOf(positionOneAckSetState.getAckSet()); BitSetRecyclable bitSetRecyclableTwo = BitSetRecyclable.valueOf(bitSetTwo.toLongArray()); assertEquals(bitSetRecyclable, bitSetRecyclableTwo); }
public WorkflowInstanceRestartResponse toWorkflowRestartResponse() { return WorkflowInstanceRestartResponse.builder() .workflowId(this.workflowId) .workflowVersionId(this.workflowVersionId) .workflowInstanceId(this.workflowInstanceId) .workflowRunId(this.workflowRunId) .workflowUuid(this.workflowUuid) .status(this.status.runStatus) .timelineEvent(this.timelineEvent) .build(); }
@Test public void testToWorkflowRestartResponse() { RunResponse res = RunResponse.from(stepInstance, TimelineLogEvent.info("bar")); WorkflowInstanceRestartResponse response = res.toWorkflowRestartResponse(); Assert.assertEquals(InstanceRunStatus.CREATED, response.getStatus()); res = RunResponse.from(instance, "foo"); response = res.toWorkflowRestartResponse(); Assert.assertEquals(InstanceRunStatus.INTERNAL_ERROR, response.getStatus()); res = RunResponse.from(instance, 0); response = res.toWorkflowRestartResponse(); Assert.assertEquals(InstanceRunStatus.DUPLICATED, response.getStatus()); res = RunResponse.from(instance, -1); response = res.toWorkflowRestartResponse(); Assert.assertEquals(InstanceRunStatus.STOPPED, response.getStatus()); res = RunResponse.from(instance, 1); response = res.toWorkflowRestartResponse(); Assert.assertEquals(InstanceRunStatus.CREATED, response.getStatus()); }
@SuppressWarnings("unchecked") public final void isLessThan(@Nullable T other) { if (checkNotNull((Comparable<Object>) actual).compareTo(checkNotNull(other)) >= 0) { failWithActual("expected to be less than", other); } }
@Test public void isLessThan_failsEqual() { assertThat(4).isLessThan(5); expectFailureWhenTestingThat(4).isLessThan(4); assertFailureValue("expected to be less than", "4"); }
public void command(String primaryCommand, SecureConfig config, String... allArguments) { terminal.writeLine(""); final Optional<CommandLine> commandParseResult; try { commandParseResult = Command.parse(primaryCommand, allArguments); } catch (InvalidCommandException e) { terminal.writeLine(String.format("ERROR: %s", e.getMessage())); return; } if (commandParseResult.isEmpty()) { printHelp(); return; } final CommandLine commandLine = commandParseResult.get(); switch (commandLine.getCommand()) { case CREATE: { if (commandLine.hasOption(CommandOptions.HELP)){ terminal.writeLine("Creates a new keystore. For example: 'bin/logstash-keystore create'"); return; } if (secretStoreFactory.exists(config.clone())) { terminal.write("An Logstash keystore already exists. Overwrite ? [y/N] "); if (isYes(terminal.readLine())) { create(config); } } else { create(config); } break; } case LIST: { if (commandLine.hasOption(CommandOptions.HELP)){ terminal.writeLine("List all secret identifiers from the keystore. For example: " + "`bin/logstash-keystore list`. Note - only the identifiers will be listed, not the secrets."); return; } Collection<SecretIdentifier> ids = secretStoreFactory.load(config).list(); List<String> keys = ids.stream().filter(id -> !id.equals(LOGSTASH_MARKER)).map(id -> id.getKey()).collect(Collectors.toList()); Collections.sort(keys); keys.forEach(terminal::writeLine); break; } case ADD: { if (commandLine.hasOption(CommandOptions.HELP)){ terminal.writeLine("Add secrets to the keystore. For example: " + "`bin/logstash-keystore add my-secret`, at the prompt enter your secret. You will use the identifier ${my-secret} in your Logstash configuration."); return; } if (commandLine.getArguments().isEmpty()) { terminal.writeLine("ERROR: You must supply an identifier to add. (e.g. bin/logstash-keystore add my-secret)"); return; } if (secretStoreFactory.exists(config.clone())) { final SecretStore secretStore = secretStoreFactory.load(config); for (String argument : commandLine.getArguments()) { final SecretIdentifier id = new SecretIdentifier(argument); final byte[] existingValue = secretStore.retrieveSecret(id); if (existingValue != null) { SecretStoreUtil.clearBytes(existingValue); terminal.write(String.format("%s already exists. Overwrite ? [y/N] ", argument)); if (!isYes(terminal.readLine())) { continue; } } final String enterValueMessage = String.format("Enter value for %s: ", argument); char[] secret = null; while(secret == null) { terminal.write(enterValueMessage); final char[] readSecret = terminal.readSecret(); if (readSecret == null || readSecret.length == 0) { terminal.writeLine("ERROR: Value cannot be empty"); continue; } if (!ASCII_ENCODER.canEncode(CharBuffer.wrap(readSecret))) { terminal.writeLine("ERROR: Value must contain only ASCII characters"); continue; } secret = readSecret; } add(secretStore, id, SecretStoreUtil.asciiCharToBytes(secret)); } } else { terminal.writeLine("ERROR: Logstash keystore not found. Use 'create' command to create one."); } break; } case REMOVE: { if (commandLine.hasOption(CommandOptions.HELP)){ terminal.writeLine("Remove secrets from the keystore. For example: " + "`bin/logstash-keystore remove my-secret`"); return; } if (commandLine.getArguments().isEmpty()) { terminal.writeLine("ERROR: You must supply a value to remove. (e.g. bin/logstash-keystore remove my-secret)"); return; } final SecretStore secretStore = secretStoreFactory.load(config); for (String argument : commandLine.getArguments()) { SecretIdentifier id = new SecretIdentifier(argument); if (secretStore.containsSecret(id)) { secretStore.purgeSecret(id); terminal.writeLine(String.format("Removed '%s' from the Logstash keystore.", id.getKey())); } else { terminal.writeLine(String.format("ERROR: '%s' does not exist in the Logstash keystore.", argument)); } } break; } } }
@Test public void testAddWithNoIdentifiers() { final String expectedMessage = "ERROR: You must supply an identifier to add"; createKeyStore(); String[] nullArguments = null; cli.command("add", newStoreConfig.clone(), nullArguments); assertThat(terminal.out).containsIgnoringCase(expectedMessage); terminal.reset(); cli.command("add", newStoreConfig.clone()); assertThat(terminal.out).containsIgnoringCase(expectedMessage); }
@ConstantFunction(name = "previous_day", argTypes = {DATETIME, VARCHAR}, returnType = DATE, isMonotonic = true) public static ConstantOperator previousDay(ConstantOperator date, ConstantOperator dow) { int dateDowValue = date.getDate().getDayOfWeek().getValue(); switch (dow.getVarchar()) { case "Sunday": case "Sun": case "Su": return ConstantOperator.createDateOrNull(date.getDate().minusDays((dateDowValue - 1L) % 7 + 1L)); case "Monday": case "Mon": case "Mo": return ConstantOperator.createDateOrNull(date.getDate().minusDays((dateDowValue + 5L) % 7 + 1L)); case "Tuesday": case "Tue": case "Tu": return ConstantOperator.createDateOrNull(date.getDate().minusDays((dateDowValue + 4L) % 7 + 1L)); case "Wednesday": case "Wed": case "We": return ConstantOperator.createDateOrNull(date.getDate().minusDays((dateDowValue + 3L) % 7 + 1L)); case "Thursday": case "Thu": case "Th": return ConstantOperator.createDateOrNull(date.getDate().minusDays((dateDowValue + 2L) % 7 + 1L)); case "Friday": case "Fri": case "Fr": return ConstantOperator.createDateOrNull(date.getDate().minusDays((dateDowValue + 1L) % 7 + 1L)); case "Saturday": case "Sat": case "Sa": return ConstantOperator.createDateOrNull(date.getDate().minusDays(dateDowValue % 7 + 1L)); default: throw new IllegalArgumentException(dow + " not supported in previous_day dow_string"); } }
@Test public void previousDay() { assertEquals("2015-03-22T09:23:55", ScalarOperatorFunctions.previousDay(O_DT_20150323_092355, ConstantOperator.createVarchar("Sunday")).getDate().toString()); Assert.assertThrows("undefine_dow not supported in previous_day dow_string", IllegalArgumentException.class, () -> ScalarOperatorFunctions.previousDay(O_DT_20150323_092355, ConstantOperator.createVarchar("undefine_dow")) .getVarchar()); }
@Override public long getIntentCount() { return currentMap.size(); }
@Test public void testGetIntentCount() { assertThat(intentStore.getIntentCount(), is(0L)); generateIntentList(5).forEach(intentStore::write); assertThat(intentStore.getIntentCount(), is(5L)); }
public static Object eval(String expression, Map<String, Object> context) { return eval(expression, context, ListUtil.empty()); }
@Test public void jexlTest(){ final ExpressionEngine engine = new JexlEngine(); final Dict dict = Dict.create() .set("a", 100.3) .set("b", 45) .set("c", -199.100); final Object eval = engine.eval("a-(b-c)", dict, null); assertEquals(-143.8, (double)eval, 0); }
public static UBinary create(Kind binaryOp, UExpression lhs, UExpression rhs) { checkArgument( OP_CODES.containsKey(binaryOp), "%s is not a supported binary operation", binaryOp); return new AutoValue_UBinary(binaryOp, lhs, rhs); }
@Test public void divide() { assertUnifiesAndInlines( "4 / 17", UBinary.create(Kind.DIVIDE, ULiteral.intLit(4), ULiteral.intLit(17))); }
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) { return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature); }
@Test public void testStateParameterAlwaysFetched() { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("ReadableStates"); DoFnSignature sig = DoFnSignatures.getSignature( new DoFn<KV<String, Integer>, Long>() { @StateId("my-id") private final StateSpec<MapState<Integer, Integer>> myfield = StateSpecs.map(VarIntCoder.of(), VarIntCoder.of()); @ProcessElement public void myProcessElement( ProcessContext context, @AlwaysFetched @StateId("my-id") MapState<Integer, Integer> one) {} }.getClass()); StateParameter stateParameter = (StateParameter) sig.processElement().extraParameters().get(1); assertTrue(stateParameter.alwaysFetched()); }
@Override public void removeLoginStatus(Long userId) throws Exception { redisTemplate.opsForSet().remove(LOGIN_STATUS_PREFIX,userId.toString()) ; }
@Test public void removeLoginStatus() throws Exception { userInfoCacheService.removeLoginStatus(2000L); }
@Override public Mono<AccessToken> createAccessToken(String clientId, Authentication authentication, boolean singleton) { return singleton ? doCreateSingletonAccessToken(clientId, authentication) : doCreateAccessToken(clientId, authentication, false).map(token -> token.toAccessToken(tokenExpireIn)); }
@Test public void testCreateSingletonAccessToken() { RedisAccessTokenManager tokenManager = new RedisAccessTokenManager(RedisHelper.factory); SimpleAuthentication authentication = new SimpleAuthentication(); authentication.setUser(SimpleUser.builder() .id("test") .build()); Flux .concat(tokenManager .createAccessToken("test", authentication, true), tokenManager .createAccessToken("test", authentication, true)) .doOnNext(System.out::println) .as(StepVerifier::create) .expectNextCount(2) .verifyComplete(); }
public static void main(String[] args) throws Exception { TikaCLI cli = new TikaCLI(); if (cli.testForHelp(args)) { cli.usage(); return; } else if (cli.testForBatch(args)) { String[] batchArgs = BatchCommandLineBuilder.build(args); BatchProcessDriverCLI batchDriver = new BatchProcessDriverCLI(batchArgs); batchDriver.execute(); return; } else if (cli.testForAsync(args)) { async(args); return; } if (args.length > 0) { for (String arg : args) { cli.process(arg); } if (cli.pipeMode) { cli.process("-"); } } else { // Started with no arguments. Wait for up to 0.1s to see if // we have something waiting in standard input and use the // pipe mode if we have. If no input is seen, start the GUI. if (System.in.available() == 0) { Thread.sleep(100); } if (System.in.available() > 0) { cli.process("-"); } else { cli.process("--gui"); } } }
@Test public void testDefaultConfigException() throws Exception { //default xml parser will throw TikaException //this and TestConfig() are broken into separate tests so that //setUp and tearDown() are called each time String[] params = {resourcePrefix + "bad_xml.xml"}; boolean tikaEx = false; try { TikaCLI.main(params); } catch (TikaException e) { tikaEx = true; } assertTrue(tikaEx); }
@Override protected boolean isStepCompleted(@NonNull Context context) { return isContactsPermComplete(context) && isNotificationPermComplete(context); }
@Test public void testKeyboardEnabledAndDefaultButNoPermission() { final String flatASKComponent = new ComponentName(BuildConfig.APPLICATION_ID, SoftKeyboard.class.getName()) .flattenToString(); Settings.Secure.putString( getApplicationContext().getContentResolver(), Settings.Secure.ENABLED_INPUT_METHODS, flatASKComponent); Settings.Secure.putString( getApplicationContext().getContentResolver(), Settings.Secure.DEFAULT_INPUT_METHOD, flatASKComponent); WizardPermissionsFragment fragment = startFragment(); Assert.assertFalse(fragment.isStepCompleted(getApplicationContext())); var contacts = fragment.getView().findViewById(R.id.contacts_permission_group); Assert.assertEquals(View.VISIBLE, contacts.getVisibility()); // no need for this in M var notifications = fragment.getView().findViewById(R.id.notification_permission_group); Assert.assertEquals(View.GONE, notifications.getVisibility()); // can handle wiki? fragment.getView().findViewById(R.id.open_permissions_wiki_action).performClick(); Intent wikiIntent = Shadows.shadowOf((Application) ApplicationProvider.getApplicationContext()) .getNextStartedActivity(); Assert.assertEquals(Intent.ACTION_VIEW, wikiIntent.getAction()); Assert.assertEquals( "https://github.com/AnySoftKeyboard/AnySoftKeyboard/wiki/Why-Does-AnySoftKeyboard-Requires-Extra-Permissions", wikiIntent.getData().toString()); // can disable Contacts fragment.getView().findViewById(R.id.disable_contacts_dictionary).performClick(); Assert.assertFalse( SharedPrefsHelper.getPrefValue(R.string.settings_key_use_contacts_dictionary, true)); // disabling contacts Assert.assertTrue(fragment.isStepCompleted(getApplicationContext())); }
@Override public boolean useIPAddrForServer() { return clientConfig.getPropertyAsBoolean(IClientConfigKey.Keys.UseIPAddrForServer, true); }
@Test void testUseIPAddrForServer() { assertTrue(connectionPoolConfig.useIPAddrForServer()); }
public static <N, E> Set<N> reachableNodes( Network<N, E> network, Set<N> startNodes, Set<N> endNodes) { Set<N> visitedNodes = new HashSet<>(); Queue<N> queuedNodes = new ArrayDeque<>(); queuedNodes.addAll(startNodes); // Perform a breadth-first traversal rooted at the input node. while (!queuedNodes.isEmpty()) { N currentNode = queuedNodes.remove(); // If we have already visited this node or it is a terminal node than do not add any // successors. if (!visitedNodes.add(currentNode) || endNodes.contains(currentNode)) { continue; } queuedNodes.addAll(network.successors(currentNode)); } return visitedNodes; }
@Test public void testReachableNodesWithPathAroundBoundaryNode() { // Since there is a path around J, we will include E, G, and H assertEquals( ImmutableSet.of("I", "J", "E", "G", "H", "K", "L"), Networks.reachableNodes(createNetwork(), ImmutableSet.of("I"), ImmutableSet.of("J"))); }
@Override protected double maintain() { if (!nodeRepository().nodes().isWorking()) return 0.0; if (!nodeRepository().zone().environment().isProduction()) return 1.0; NodeList allNodes = nodeRepository().nodes().list(); // Lockless as strong consistency is not needed if (!zoneIsStable(allNodes)) return 1.0; Move bestMove = findBestMove(allNodes); if (!bestMove.isEmpty()) { LOG.info("Trying " + bestMove + " (" + bestMove.fromHost().switchHostname().orElse("<none>") + " -> " + bestMove.toHost().switchHostname().orElse("<none>") + ")"); } bestMove.execute(false, Agent.SwitchRebalancer, deployer, metric, nodeRepository()); return 1.0; }
@Test public void rebalance_does_not_move_node_already_on_exclusive_switch() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build(); ClusterSpec spec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("c1")).vespaVersion("1").build(); Capacity capacity = Capacity.from(new ClusterResources(4, 1, new NodeResources(4, 8, 50, 1))); MockDeployer deployer = deployer(tester, capacity, spec); SwitchRebalancer rebalancer = new SwitchRebalancer(tester.nodeRepository(), Duration.ofDays(1), new TestMetric(), deployer); // Provision initial hosts on two switches NodeResources hostResources = new NodeResources(8, 16, 500, 10); String switch0 = "switch0"; String switch1 = "switch1"; provisionHost(switch0, hostResources, tester); provisionHosts(3, switch1, hostResources, tester); // Deploy application deployer.deployFromLocalActive(app).get().activate(); tester.assertSwitches(Set.of(switch0, switch1), app, spec.id()); List<Node> nodesOnExclusiveSwitch = tester.activeNodesOn(switch0, app, spec.id()); assertEquals(1, nodesOnExclusiveSwitch.size()); assertEquals(3, tester.activeNodesOn(switch1, app, spec.id()).size()); // Another host becomes available on a new host String switch2 = "switch2"; provisionHost(switch2, hostResources, tester); // Rebalance tester.clock().advance(SwitchRebalancer.waitTimeAfterPreviousDeployment); rebalancer.maintain(); NodeList activeNodes = nodesIn(spec.id(), tester).state(Node.State.active); NodeList retired = activeNodes.retired(); assertEquals("Node is retired", 1, retired.size()); assertFalse("Retired node was not on exclusive switch", nodesOnExclusiveSwitch.contains(retired.first().get())); tester.assertSwitches(Set.of(switch0, switch1, switch2), app, spec.id()); // Retired node becomes inactive and makes zone stable deactivate(tester, retired); // Next iteration does nothing tester.clock().advance(SwitchRebalancer.waitTimeAfterPreviousDeployment); assertNoMoves(rebalancer, tester); }
@Override public Optional<Listener> acquire(ContextT context) { final Partition partition = resolvePartition(context); try { lock.lock(); if (shouldBypass(context)){ return createBypassListener(); } if (getInflight() >= getLimit() && partition.isLimitExceeded()) { lock.unlock(); if (partition.backoffMillis > 0 && delayedThreads.get() < maxDelayedThreads) { try { delayedThreads.incrementAndGet(); TimeUnit.MILLISECONDS.sleep(partition.backoffMillis); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } finally { delayedThreads.decrementAndGet(); } } return createRejectedListener(); } partition.acquire(); final Listener listener = createListener(); return Optional.of(new Listener() { @Override public void onSuccess() { listener.onSuccess(); releasePartition(partition); } @Override public void onIgnore() { listener.onIgnore(); releasePartition(partition); } @Override public void onDropped() { listener.onDropped(); releasePartition(partition); } }); } finally { if (lock.isHeldByCurrentThread()) lock.unlock(); } }
@Test public void testBypassSimpleLimiter() { SimpleLimiter<String> limiter = (SimpleLimiter<String>) TestPartitionedLimiter.newBuilder() .limit(FixedLimit.of(10)) .bypassLimitResolverInternal(new ShouldBypassPredicate()) .build(); int inflightCount = 0; for (int i = 0; i < 5; i++) { Assert.assertTrue(limiter.acquire("request").isPresent()); Assert.assertEquals(i+1, limiter.getInflight()); inflightCount++; } for (int i = 0; i < 15; i++) { Assert.assertTrue(limiter.acquire("admin").isPresent()); Assert.assertEquals(inflightCount, limiter.getInflight()); } for (int i = 0; i < 5; i++) { Assert.assertTrue(limiter.acquire("request").isPresent()); Assert.assertEquals(inflightCount+i+1, limiter.getInflight()); } // Calls with passing bypass condition will return a token // whereas remaining calls will be throttled since inflight count is greater than the limit for (int i = 0; i < 10; i++) { Assert.assertFalse(limiter.acquire("request").isPresent()); Assert.assertTrue(limiter.acquire("admin").isPresent()); } }
static <E extends Enum<E>> int encodeReplicationSessionStateChange( final UnsafeBuffer encodingBuffer, final int offset, final int captureLength, final int length, final E from, final E to, final long replicationId, final long srcRecordingId, final long dstRecordingId, final long position, final String reason) { int encodedLength = encodeLogHeader(encodingBuffer, offset, captureLength, length); encodingBuffer.putLong(offset + encodedLength, replicationId, LITTLE_ENDIAN); encodedLength += SIZE_OF_LONG; encodingBuffer.putLong(offset + encodedLength, srcRecordingId, LITTLE_ENDIAN); encodedLength += SIZE_OF_LONG; encodingBuffer.putLong(offset + encodedLength, dstRecordingId, LITTLE_ENDIAN); encodedLength += SIZE_OF_LONG; encodingBuffer.putLong(offset + encodedLength, position, LITTLE_ENDIAN); encodedLength += SIZE_OF_LONG; encodedLength += encodeStateChange(encodingBuffer, offset + encodedLength, from, to); encodedLength += encodeTrailingString(encodingBuffer, offset + encodedLength, captureLength + LOG_HEADER_LENGTH - encodedLength, reason); return encodedLength; }
@Test void testEncodeReplicationSessionStateChange() { int offset = 24; final int length = replicationSessionStateChangeLength(State.ALPHA, State.BETA, "reason"); final int captureLength = captureLength(length); encodeReplicationSessionStateChange(buffer, offset, captureLength, length, State.ALPHA, State.BETA, 1, 2, 3, 4, "reason"); assertEquals(captureLength, buffer.getInt(offset, LITTLE_ENDIAN)); assertEquals(length, buffer.getInt(offset + SIZE_OF_INT, LITTLE_ENDIAN)); assertNotEquals(0, buffer.getLong(offset + 2 * SIZE_OF_INT, LITTLE_ENDIAN)); offset += LOG_HEADER_LENGTH; assertEquals(1, buffer.getLong(offset, LITTLE_ENDIAN)); offset += SIZE_OF_LONG; assertEquals(2, buffer.getLong(offset, LITTLE_ENDIAN)); offset += SIZE_OF_LONG; assertEquals(3, buffer.getLong(offset, LITTLE_ENDIAN)); offset += SIZE_OF_LONG; assertEquals(4, buffer.getLong(offset, LITTLE_ENDIAN)); offset += SIZE_OF_LONG; assertEquals("ALPHA -> BETA", buffer.getStringAscii(offset)); offset += SIZE_OF_INT + "ALPHA -> BETA".length(); assertEquals("reason", buffer.getStringAscii(offset)); }
public static void forceMkdir(String path) throws IOException { FileUtils.forceMkdir(new File(path)); }
@Test void testForceMkdirWithPath() throws IOException { Path path = Paths.get(EnvUtil.getNacosTmpDir(), UUID.randomUUID().toString(), UUID.randomUUID().toString()); DiskUtils.forceMkdir(path.toString()); File file = path.toFile(); assertTrue(file.exists()); file.deleteOnExit(); }
@Override public void pickSuggestionManually( int index, CharSequence suggestion, boolean withAutoSpaceEnabled) { if (getCurrentComposedWord().isAtTagsSearchState()) { if (index == 0) { // this is a special case for tags-searcher // since we append a magnifying glass to the suggestions, the "suggestion" // value is not a valid output suggestion suggestion = getCurrentComposedWord().getTypedWord().toString(); } else { // regular emoji. Storing in history. getQuickKeyHistoryRecords().store(suggestion.toString(), suggestion.toString()); } } super.pickSuggestionManually(index, suggestion, withAutoSpaceEnabled); }
@Test public void testTagsSearchThrice() throws Exception { verifyNoSuggestionsInteractions(); mAnySoftKeyboardUnderTest.simulateTextTyping(":face"); List suggestions = verifyAndCaptureSuggestion(true); Assert.assertNotNull(suggestions); Assert.assertEquals(131, suggestions.size()); mAnySoftKeyboardUnderTest.simulateKeyPress(' '); mAnySoftKeyboardUnderTest.simulateTextTyping(":face"); suggestions = verifyAndCaptureSuggestion(true); Assert.assertNotNull(suggestions); Assert.assertEquals(131, suggestions.size()); mAnySoftKeyboardUnderTest.pickSuggestionManually(1, "\uD83D\uDE00"); mAnySoftKeyboardUnderTest.simulateTextTyping(":face"); suggestions = verifyAndCaptureSuggestion(true); Assert.assertNotNull(suggestions); Assert.assertEquals(131, suggestions.size()); }
long remove(final long recordingId) { ensurePositive(recordingId, "recordingId"); final long[] index = this.index; final int lastPosition = lastPosition(); final int position = find(index, recordingId, lastPosition); if (position < 0) { return NULL_VALUE; } final long recordingDescriptorOffset = index[position + 1]; count--; // Shift data to the left for (int i = position; i < lastPosition; i += 2) { index[i] = index[i + 2]; index[i + 1] = index[i + 3]; } // Reset last copied element index[lastPosition] = 0; index[lastPosition + 1] = 0; return recordingDescriptorOffset; }
@Test void removeThrowsIllegalArgumentExceptionIfNegativeRecordingIdIsProvided() { assertThrows(IllegalArgumentException.class, () -> catalogIndex.remove(-1)); }
@Override protected Triple<List<HoodieClusteringGroup>, Integer, List<FileSlice>> buildMergeClusteringGroup( ConsistentBucketIdentifier identifier, List<FileSlice> fileSlices, int mergeSlot) { return super.buildMergeClusteringGroup(identifier, fileSlices, mergeSlot); }
@Test public void testBuildMergeClusteringGroup() throws Exception { setup(); int maxFileSize = 5120; Properties props = new Properties(); props.setProperty(KeyGeneratorOptions.RECORDKEY_FIELD_NAME.key(), "uuid"); HoodieWriteConfig config = HoodieWriteConfig.newBuilder().withPath(basePath) .withIndexConfig(HoodieIndexConfig.newBuilder().fromProperties(props).withIndexType(HoodieIndex.IndexType.BUCKET) .withBucketIndexEngineType(HoodieIndex.BucketIndexEngineType.CONSISTENT_HASHING) .withBucketMinNum(4) .withBucketNum("4").build()) .withStorageConfig(HoodieStorageConfig.newBuilder() .parquetMaxFileSize(maxFileSize).build()) .build(); HoodieTable hoodieTable = HoodieSparkTable.create(config, context, metaClient); SparkConsistentBucketClusteringPlanStrategy planStrategy = new SparkConsistentBucketClusteringPlanStrategy(hoodieTable, context, config); HoodieConsistentHashingMetadata metadata = new HoodieConsistentHashingMetadata("partition", 8); ConsistentBucketIdentifier identifier = new ConsistentBucketIdentifier(metadata); int mergeSize = (int) (maxFileSize * BUCKET_MERGE_THRESHOLD.defaultValue()); int[] fsSize = {0, maxFileSize, mergeSize / 2, mergeSize / 2 + 10, mergeSize / 2, maxFileSize, mergeSize / 4, mergeSize / 4}; List<FileSlice> fileSlices = IntStream.range(0, metadata.getNodes().size()).mapToObj( i -> createFileSliceWithSize(metadata.getNodes().get(i).getFileIdPrefix(), fsSize[i] / 2, fsSize[i] / 2) ).collect(Collectors.toList()); /** * 1. Test merge candidate selection based on file size * 2. Test empty file size * 3. Test merge slot */ Triple res = planStrategy.buildMergeClusteringGroup(identifier, fileSlices, 4); Assertions.assertEquals(3, res.getMiddle()); List<HoodieClusteringGroup> groups = (List<HoodieClusteringGroup>) res.getLeft(); Assertions.assertEquals(2, groups.size()); // Check group 0 Assertions.assertEquals(fileSlices.get(0).getFileId(), groups.get(0).getSlices().get(2).getFileId()); Assertions.assertEquals(fileSlices.get(7).getFileId(), groups.get(0).getSlices().get(1).getFileId()); Assertions.assertEquals(fileSlices.get(6).getFileId(), groups.get(0).getSlices().get(0).getFileId()); Assertions.assertEquals(3, groups.get(0).getSlices().size()); List<ConsistentHashingNode> nodes = ConsistentHashingNode.fromJsonString(groups.get(0).getExtraMetadata().get(BaseConsistentHashingBucketClusteringPlanStrategy.METADATA_CHILD_NODE_KEY)); Assertions.assertEquals(3, nodes.size()); Assertions.assertEquals(ConsistentHashingNode.NodeTag.DELETE, nodes.get(0).getTag()); Assertions.assertEquals(ConsistentHashingNode.NodeTag.DELETE, nodes.get(1).getTag()); Assertions.assertEquals(ConsistentHashingNode.NodeTag.REPLACE, nodes.get(2).getTag()); Assertions.assertEquals(metadata.getNodes().get(0).getValue(), nodes.get(2).getValue()); // Check group 1 Assertions.assertEquals(fileSlices.get(2).getFileId(), groups.get(1).getSlices().get(0).getFileId()); Assertions.assertEquals(fileSlices.get(3).getFileId(), groups.get(1).getSlices().get(1).getFileId()); Assertions.assertEquals(2, groups.get(1).getSlices().size()); nodes = ConsistentHashingNode.fromJsonString(groups.get(1).getExtraMetadata().get(BaseConsistentHashingBucketClusteringPlanStrategy.METADATA_CHILD_NODE_KEY)); Assertions.assertEquals(2, nodes.size()); Assertions.assertEquals(ConsistentHashingNode.NodeTag.DELETE, nodes.get(0).getTag()); Assertions.assertEquals(ConsistentHashingNode.NodeTag.REPLACE, nodes.get(1).getTag()); Assertions.assertEquals(metadata.getNodes().get(3).getValue(), nodes.get(1).getValue()); HoodieConsistentHashingMetadata metadata1 = new HoodieConsistentHashingMetadata("partition", 4); ConsistentBucketIdentifier identifier1 = new ConsistentBucketIdentifier(metadata1); int[] fsSize1 = {mergeSize / 4, mergeSize / 4, maxFileSize, mergeSize / 4}; List<FileSlice> fileSlices1 = IntStream.range(0, metadata1.getNodes().size()).mapToObj( i -> createFileSliceWithSize(metadata1.getNodes().get(i).getFileIdPrefix(), fsSize1[i] / 2, fsSize1[i] / 2) ).collect(Collectors.toList()); Triple<List<HoodieClusteringGroup>, Integer, List<FileSlice>> res1 = planStrategy.buildMergeClusteringGroup(identifier1, fileSlices1.stream().filter(fs -> fs.getTotalFileSize() < mergeSize).collect(Collectors.toList()), 4); Assertions.assertEquals(1, res1.getLeft().size(), "should have 1 clustering group"); Assertions.assertEquals(3, res1.getLeft().get(0).getSlices().size(), "should have 3 input files"); }
@GetMapping(value = ApiConstants.PROMETHEUS_CONTROLLER_PATH, produces = "application/json; charset=UTF-8") public ResponseEntity<String> metric() throws NacosException { ArrayNode arrayNode = JacksonUtils.createEmptyArrayNode(); Set<Instance> targetSet = new HashSet<>(); Set<String> allNamespaces = serviceManager.getAllNamespaces(); for (String namespace : allNamespaces) { Set<Service> singletons = serviceManager.getSingletons(namespace); for (Service service : singletons) { List<? extends Instance> instances = instanceServiceV2.listAllInstances(namespace, service.getGroupedServiceName()); targetSet.addAll(instances); } } PrometheusUtils.assembleArrayNodes(targetSet, arrayNode); return ResponseEntity.ok().body(arrayNode.toString()); }
@Test public void testMetric() throws Exception { when(instanceServiceV2.listAllInstances(nameSpace, NamingUtils.getGroupedName(name, group))).thenReturn(testInstanceList); MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.get(ApiConstants.PROMETHEUS_CONTROLLER_PATH); MockHttpServletResponse response = mockMvc.perform(builder).andReturn().getResponse(); assertEquals(200, response.getStatus()); assertEquals(testInstanceList.size(), JacksonUtils.toObj(response.getContentAsString()).size()); }
protected void update(Long elapsedTime) { controller.moveBullet(0.5f * elapsedTime / 1000); }
@Test void testUpdate() { gameLoop.update(20L); Assertions.assertEquals(0.01f, gameLoop.controller.getBulletPosition(), 0); }
@Override public Local find() { final NSArray directories = FoundationKitFunctions.library.NSSearchPathForDirectoriesInDomains( FoundationKitFunctions.NSSearchPathDirectory.NSApplicationSupportDirectory, FoundationKitFunctions.NSSearchPathDomainMask.NSUserDomainMask, true); final String application = preferences.getProperty("application.name"); if(directories.count().intValue() == 0) { log.error("Failed searching for application support directory"); return new FinderLocal("~/Library/Application Support", application); } else { final String directory = directories.objectAtIndex(new NSUInteger(0)).toString(); if(log.isInfoEnabled()) { log.info(String.format("Found application support directory in %s", directory)); } final Local folder = new FinderLocal(directory, application); if(log.isDebugEnabled()) { log.debug(String.format("Use folder %s for application support directory", folder)); } return folder; } }
@Test public void testFind() { assertNotNull(new ApplicationSupportDirectoryFinder().find()); assertEquals("~/Library/Application Support/Cyberduck", new ApplicationSupportDirectoryFinder().find().getAbbreviatedPath()); }
@Override public void indexOnStartup(Set<IndexType> uninitializedIndexTypes) { // TODO do not load everything in memory. Db rows should be scrolled. List<IndexPermissions> authorizations = getAllAuthorizations(); Stream<AuthorizationScope> scopes = getScopes(uninitializedIndexTypes); index(authorizations, scopes, Size.LARGE); }
@Test public void indexOnStartup_does_not_grant_access_to_anybody_on_private_project() { ProjectDto project = createAndIndexPrivateProject(); UserDto user = db.users().insertUser(); GroupDto group = db.users().insertGroup(); indexOnStartup(); verifyAnyoneNotAuthorized(project); verifyNotAuthorized(project, user); verifyNotAuthorized(project, user, group); }
public ZonedDateTime createdAt() { return ZonedDateTime.parse("2018-07-18T15:58:00Z"); }
@Test public void createdAt() { assertThat(migration.createdAt()).isEqualTo(ZonedDateTime.parse("2018-07-18T15:58:00Z")); }
static Map<String, Comparable> prepareProperties(Map<String, Comparable> properties, Collection<PropertyDefinition> propertyDefinitions) { Map<String, Comparable> mappedProperties = createHashMap(propertyDefinitions.size()); for (PropertyDefinition propertyDefinition : propertyDefinitions) { String propertyKey = propertyDefinition.key(); if (properties.containsKey(propertyKey.replace("-", ""))) { properties.put(propertyKey, properties.remove(propertyKey.replace("-", ""))); } if (!properties.containsKey(propertyKey)) { if (!propertyDefinition.optional()) { throw new InvalidConfigurationException( String.format("Missing property '%s' on discovery strategy", propertyKey)); } continue; } Comparable value = properties.get(propertyKey); TypeConverter typeConverter = propertyDefinition.typeConverter(); Comparable mappedValue = typeConverter.convert(value); ValueValidator validator = propertyDefinition.validator(); if (validator != null) { validator.validate(mappedValue); } mappedProperties.put(propertyKey, mappedValue); } verifyNoUnknownProperties(mappedProperties, properties); return mappedProperties; }
@Test public void correctDashlessPropertyConversion() { // given Map<String, Comparable> properties = new HashMap<>(singletonMap("customproperty", PROPERTY_VALUE_1)); Collection<PropertyDefinition> propertyDefinitions = singletonList(new SimplePropertyDefinition("custom-property", STRING)); // when Map<String, Comparable> result = prepareProperties(properties, propertyDefinitions); // then assertEquals(PROPERTY_VALUE_1, result.get("custom-property")); }
public static void main(String[] args) { if (args.length < 1 || args[0].equals("-h") || args[0].equals("--help")) { System.out.println(usage); return; } // Copy args, because CommandFormat mutates the list. List<String> argsList = new ArrayList<String>(Arrays.asList(args)); CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "-glob", "-jar"); try { cf.parse(argsList); } catch (UnknownOptionException e) { terminate(1, "unrecognized option"); return; } String classPath = System.getProperty("java.class.path"); if (cf.getOpt("-glob")) { // The classpath returned from the property has been globbed already. System.out.println(classPath); } else if (cf.getOpt("-jar")) { if (argsList.isEmpty() || argsList.get(0) == null || argsList.get(0).isEmpty()) { terminate(1, "-jar option requires path of jar file to write"); return; } // Write the classpath into the manifest of a temporary jar file. Path workingDir = new Path(System.getProperty("user.dir")); final String tmpJarPath; try { tmpJarPath = FileUtil.createJarWithClassPath(classPath, workingDir, System.getenv())[0]; } catch (IOException e) { terminate(1, "I/O error creating jar: " + e.getMessage()); return; } // Rename the temporary file to its final location. String jarPath = argsList.get(0); try { FileUtil.replaceFile(new File(tmpJarPath), new File(jarPath)); } catch (IOException e) { terminate(1, "I/O error renaming jar temporary file to path: " + e.getMessage()); return; } } }
@Test public void testJarFileMissing() throws IOException { try { Classpath.main(new String[] { "--jar" }); fail("expected exit"); } catch (ExitUtil.ExitException e) { assertTrue(stdout.toByteArray().length == 0); String strErr = new String(stderr.toByteArray(), UTF8); assertTrue(strErr.contains("requires path of jar")); } }
public Collection<SQLException> closeConnections(final boolean forceRollback) { Collection<SQLException> result = new LinkedList<>(); synchronized (cachedConnections) { resetSessionVariablesIfNecessary(cachedConnections.values(), result); for (Connection each : cachedConnections.values()) { try { if (forceRollback && connectionSession.getTransactionStatus().isInTransaction()) { each.rollback(); } each.close(); } catch (final SQLException ex) { result.add(ex); } } cachedConnections.clear(); } if (!forceRollback) { connectionPostProcessors.clear(); } return result; }
@Test void assertCloseConnectionsCorrectlyWhenSQLExceptionThrown() throws SQLException { Connection connection = prepareCachedConnections(); SQLException sqlException = new SQLException(""); doThrow(sqlException).when(connection).close(); assertTrue(databaseConnectionManager.closeConnections(false).contains(sqlException)); }
@PublicAPI(usage = ACCESS) public Set<Dependency> getDirectDependenciesToSelf() { return reverseDependencies.getDirectDependenciesTo(this); }
@Test @UseDataProvider public void test_direct_dependencies_to_self_by_code_unit_type_parameters(JavaClass firstOrigin, JavaClass secondOrigin, JavaClass expectedTarget) { assertThatDependencies(expectedTarget.getDirectDependenciesToSelf()) .contain(from(firstOrigin).to(expectedTarget).inLocation(getClass(), 0) .withDescriptionContaining("type parameter 'T' depending on") .from(secondOrigin).to(expectedTarget).inLocation(getClass(), 0) .withDescriptionContaining("type parameter 'U' depending on") .from(secondOrigin).to(expectedTarget).inLocation(getClass(), 0) .withDescriptionContaining("type parameter 'V' depending on") ); }
public void add(String topic) { if (topic == null) { throw new IllegalArgumentException("topic can not be null"); } add(Numeric.hexStringToByteArray(topic)); }
@Test public void testEthereumSampleLogsBloomReconstructionFromItsTopics() { Bloom reconstructedBloom = new Bloom(); for (String topic : ethereumSampleLogs) { reconstructedBloom.add(topic); } assertEquals( new Bloom(ethereumSampleLogsBloom), reconstructedBloom, "reconstructed logsBloom should be equal"); }
@Override protected Optional<ErrorResponse> filter(DiscFilterRequest req) { var certs = req.getClientCertificateChain(); log.fine(() -> "Certificate chain contains %d elements".formatted(certs.size())); if (certs.isEmpty()) { log.fine("Missing client certificate"); return Optional.of(new ErrorResponse(Response.Status.UNAUTHORIZED, "Unauthorized")); } if (legacyMode) { log.fine("Legacy mode validation complete"); ClientPrincipal.attachToRequest(req, Set.of(), Set.of(READ, WRITE)); return Optional.empty(); } var permission = Permission.getRequiredPermission(req).orElse(null); if (permission == null) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden")); var clientCert = certs.get(0); var clientIds = new TreeSet<String>(); var permissions = new TreeSet<Permission>(); for (Client c : allowedClients) { if (!c.permissions().contains(permission)) continue; if (!c.certificates().contains(clientCert)) continue; clientIds.add(c.id()); permissions.addAll(c.permissions()); } if (clientIds.isEmpty()) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden")); ClientPrincipal.attachToRequest(req, clientIds, permissions); return Optional.empty(); }
@Test void supports_handler_with_custom_request_spec() { // Spec that maps POST as action 'read' var spec = RequestHandlerSpec.builder() .withAclMapping(HttpMethodAclMapping.standard() .override(Method.POST, Action.READ).build()) .build(); var req = FilterTestUtils.newRequestBuilder() .withMethod(Method.POST) .withClientCertificate(SEARCH_CERT) .withAttribute(RequestHandlerSpec.ATTRIBUTE_NAME, spec) .build(); var responseHandler = new MockResponseHandler(); newFilterWithClientsConfig().filter(req, responseHandler); assertNull(responseHandler.getResponse()); assertEquals(new ClientPrincipal(Set.of(MTLS_SEARCH_CLIENT_ID), Set.of(READ)), req.getUserPrincipal()); }
public void recoverFileSize() { if (fileSegmentTable.isEmpty() || FileSegmentType.INDEX.equals(fileType)) { return; } FileSegment fileSegment = fileSegmentTable.get(fileSegmentTable.size() - 1); long fileSize = fileSegment.getSize(); if (fileSize == GET_FILE_SIZE_ERROR) { log.warn("FlatAppendFile get last file size error, filePath: {}", this.filePath); return; } if (fileSegment.getCommitPosition() != fileSize) { fileSegment.initPosition(fileSize); flushFileSegmentMeta(fileSegment); log.warn("FlatAppendFile last file size not correct, filePath: {}", this.filePath); } }
@Test public void recoverFileSizeTest() { String filePath = MessageStoreUtil.toFilePath(queue); FlatAppendFile flatFile = flatFileFactory.createFlatFileForConsumeQueue(filePath); flatFile.rollingNewFile(500L); FileSegment fileSegment = flatFile.getFileToWrite(); flatFile.append(allocateBuffer(1000), 1L); flatFile.commitAsync().join(); flatFile.flushFileSegmentMeta(fileSegment); }
public static OriginName fromVipAndApp(String vip, String appName) { return fromVipAndApp(vip, appName, vip); }
@Test void noNull() { assertThrows(NullPointerException.class, () -> OriginName.fromVipAndApp(null, "app")); assertThrows(NullPointerException.class, () -> OriginName.fromVipAndApp("vip", null)); assertThrows(NullPointerException.class, () -> OriginName.fromVipAndApp(null, "app", "niws")); assertThrows(NullPointerException.class, () -> OriginName.fromVipAndApp("vip", null, "niws")); assertThrows(NullPointerException.class, () -> OriginName.fromVipAndApp("vip", "app", null)); }
@Override public FileConfigDO getFileConfig(Long id) { return fileConfigMapper.selectById(id); }
@Test public void testGetFileConfig() { // mock 数据 FileConfigDO dbFileConfig = randomFileConfigDO().setMaster(false); fileConfigMapper.insert(dbFileConfig);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbFileConfig.getId(); // 调用,并断言 assertPojoEquals(dbFileConfig, fileConfigService.getFileConfig(id)); }
public BBox calculateIntersection(BBox bBox) { if (!this.intersects(bBox)) return null; double minLon = Math.max(this.minLon, bBox.minLon); double maxLon = Math.min(this.maxLon, bBox.maxLon); double minLat = Math.max(this.minLat, bBox.minLat); double maxLat = Math.min(this.maxLat, bBox.maxLat); return new BBox(minLon, maxLon, minLat, maxLat); }
@Test public void testCalculateIntersection() { BBox b1 = new BBox(0, 2, 0, 1); BBox b2 = new BBox(-1, 1, -1, 2); BBox expected = new BBox(0, 1, 0, 1); assertEquals(expected, b1.calculateIntersection(b2)); //No intersection b2 = new BBox(100, 200, 100, 200); assertNull(b1.calculateIntersection(b2)); //Real Example b1 = new BBox(8.8591,9.9111,48.3145,48.8518); b2 = new BBox(5.8524,17.1483,46.3786,55.0653); assertEquals(b1, b1.calculateIntersection(b2)); }
@SuppressWarnings("unchecked") static Object extractFromRecordValue(Object recordValue, String fieldName) { List<String> fields = Splitter.on('.').splitToList(fieldName); if (recordValue instanceof Struct) { return valueFromStruct((Struct) recordValue, fields); } else if (recordValue instanceof Map) { return valueFromMap((Map<String, ?>) recordValue, fields); } else { throw new UnsupportedOperationException( "Cannot extract value from type: " + recordValue.getClass().getName()); } }
@Test public void testExtractFromRecordValueMapNull() { Map<String, Object> val = ImmutableMap.of("key", 123L); Object result = RecordUtils.extractFromRecordValue(val, ""); assertThat(result).isNull(); result = RecordUtils.extractFromRecordValue(val, "xkey"); assertThat(result).isNull(); }
public static BufferDebloatConfiguration fromConfiguration(ReadableConfig config) { Duration targetTotalBufferSize = config.get(BUFFER_DEBLOAT_TARGET); int maxBufferSize = Math.toIntExact(config.get(TaskManagerOptions.MEMORY_SEGMENT_SIZE).getBytes()); int minBufferSize = Math.toIntExact(config.get(TaskManagerOptions.MIN_MEMORY_SEGMENT_SIZE).getBytes()); int bufferDebloatThresholdPercentages = config.get(BUFFER_DEBLOAT_THRESHOLD_PERCENTAGES); final int numberOfSamples = config.get(BUFFER_DEBLOAT_SAMPLES); // Right now the buffer size can not be grater than integer max value according to // MemorySegment and buffer implementation. checkArgument(maxBufferSize > 0); checkArgument(minBufferSize > 0); checkArgument(numberOfSamples > 0); checkArgument(maxBufferSize >= minBufferSize); checkArgument(targetTotalBufferSize.toMillis() > 0.0); return new BufferDebloatConfiguration( config.get(TaskManagerOptions.BUFFER_DEBLOAT_ENABLED), targetTotalBufferSize, maxBufferSize, minBufferSize, bufferDebloatThresholdPercentages, numberOfSamples); }
@Test public void testNegativeConsumptionTime() { final Configuration config = new Configuration(); config.set(TaskManagerOptions.BUFFER_DEBLOAT_TARGET, Duration.ofMillis(-1)); assertThrows( IllegalArgumentException.class, () -> BufferDebloatConfiguration.fromConfiguration(config)); }
protected boolean isCurrentlyPredicting() { return isPredictionOn() && !mWord.isEmpty(); }
@Test public void testDoesNotPostRestartOnBackspaceWhilePredicting() { simulateFinishInputFlow(); SharedPrefsHelper.setPrefsValue(R.string.settings_key_allow_suggestions_restart, true); simulateOnStartInputFlow(); mAnySoftKeyboardUnderTest.simulateTextTyping("hel"); Assert.assertTrue(mAnySoftKeyboardUnderTest.isCurrentlyPredicting()); Assert.assertFalse( ((AnySoftKeyboardSuggestions) mAnySoftKeyboardUnderTest) .mKeyboardHandler.hasMessages(MSG_RESTART_NEW_WORD_SUGGESTIONS)); mAnySoftKeyboardUnderTest.simulateKeyPress(KeyCodes.DELETE, false); Assert.assertFalse( ((AnySoftKeyboardSuggestions) mAnySoftKeyboardUnderTest) .mKeyboardHandler.hasMessages(MSG_RESTART_NEW_WORD_SUGGESTIONS)); SystemClock.sleep(5); Assert.assertFalse( ((AnySoftKeyboardSuggestions) mAnySoftKeyboardUnderTest) .mKeyboardHandler.hasMessages(MSG_RESTART_NEW_WORD_SUGGESTIONS)); }
public static List<TierFactory> initializeTierFactories(Configuration configuration) { String externalTierFactoryClass = configuration.get( NettyShuffleEnvironmentOptions .NETWORK_HYBRID_SHUFFLE_EXTERNAL_REMOTE_TIER_FACTORY_CLASS_NAME); if (externalTierFactoryClass != null) { return Collections.singletonList( createExternalTierFactory(configuration, externalTierFactoryClass)); } else { return getEphemeralTierFactories(configuration); } }
@Test void testInitDurableTiersWithExternalRemoteTier() { Configuration configuration = new Configuration(); configuration.set( NettyShuffleEnvironmentOptions .NETWORK_HYBRID_SHUFFLE_EXTERNAL_REMOTE_TIER_FACTORY_CLASS_NAME, ExternalRemoteTierFactory.class.getName()); List<TierFactory> tierFactories = TierFactoryInitializer.initializeTierFactories(configuration); assertThat(tierFactories).hasSize(1); assertThat(tierFactories.get(0)).isInstanceOf(ExternalRemoteTierFactory.class); }
public void doesNotContain(@Nullable Object rowKey, @Nullable Object columnKey) { if (checkNotNull(actual).contains(rowKey, columnKey)) { failWithoutActual( simpleFact("expected not to contain mapping for row-column key pair"), fact("row key", rowKey), fact("column key", columnKey), fact("but contained value", actual.get(rowKey, columnKey)), fact("full contents", actual)); } }
@Test public void doesNotContainFailure() { ImmutableTable<String, String, String> table = ImmutableTable.of("row", "col", "val"); expectFailureWhenTestingThat(table).doesNotContain("row", "col"); assertThat(expectFailure.getFailure()) .factKeys() .containsExactly( "expected not to contain mapping for row-column key pair", "row key", "column key", "but contained value", "full contents"); assertThat(expectFailure.getFailure()).factValue("row key").isEqualTo("row"); assertThat(expectFailure.getFailure()).factValue("column key").isEqualTo("col"); assertThat(expectFailure.getFailure()).factValue("but contained value").isEqualTo("val"); }
@Override public Result apply(String action, Class<? extends Validatable> aClass, String resource, String resourceToOperateWithin) { if (matchesAction(action) && matchesType(aClass) && matchesResource(resource)) { return Result.DENY; } if (isRequestForElasticAgentProfiles(aClass) && matchesAction(action) && matchesResource(resourceToOperateWithin)) { return Result.DENY; } return Result.SKIP; }
@Test void forViewOfAllClusterProfiles() { Deny directive = new Deny("view", "cluster_profile", "*"); Result viewAllElasticAgentProfiles = directive.apply("view", ElasticProfile.class, "*", null); Result viewAllClusterProfiles = directive.apply("view", ClusterProfile.class, "*", null); Result administerAllElasticAgentProfiles = directive.apply("administer", ElasticProfile.class, "*", null); Result administerAllClusterProfiles = directive.apply("administer", ClusterProfile.class, "*", null); assertThat(viewAllElasticAgentProfiles).isEqualTo(Result.DENY); assertThat(viewAllClusterProfiles).isEqualTo(Result.DENY); assertThat(administerAllElasticAgentProfiles).isEqualTo(Result.SKIP); assertThat(administerAllClusterProfiles).isEqualTo(Result.SKIP); }
@Override public void rollback() throws SQLException { for (TransactionHook each : transactionHooks) { each.beforeRollback(connection.getCachedConnections().values(), getTransactionContext()); } if (connection.getConnectionSession().getTransactionStatus().isInTransaction()) { try { if (TransactionType.LOCAL == TransactionUtils.getTransactionType(getTransactionContext()) || null == distributionTransactionManager) { localTransactionManager.rollback(); } else { distributionTransactionManager.rollback(); } } finally { for (TransactionHook each : transactionHooks) { each.afterRollback(connection.getCachedConnections().values(), getTransactionContext()); } for (Connection each : connection.getCachedConnections().values()) { ConnectionSavepointManager.getInstance().transactionFinished(each); } connection.getConnectionSession().getTransactionStatus().setInTransaction(false); connection.getConnectionSession().getConnectionContext().close(); } } }
@Test void assertRollbackWithoutTransaction() throws SQLException { ContextManager contextManager = mockContextManager(TransactionType.LOCAL); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); newBackendTransactionManager(TransactionType.LOCAL, false); backendTransactionManager.rollback(); verify(transactionStatus, times(0)).setInTransaction(false); verify(localTransactionManager, times(0)).rollback(); verify(distributionTransactionManager, times(0)).rollback(); }
protected Request buildRequest(String url, String sender, String data) throws JsonProcessingException { if (sender == null || !WalletUtils.isValidAddress(sender)) { throw new EnsResolutionException("Sender address is null or not valid"); } if (data == null) { throw new EnsResolutionException("Data is null"); } if (!url.contains("{sender}")) { throw new EnsResolutionException("Url is not valid, sender parameter is not exist"); } // URL expansion String href = url.replace("{sender}", sender).replace("{data}", data); Request.Builder builder = new Request.Builder().url(href); if (url.contains("{data}")) { return builder.get().build(); } else { EnsGatewayRequestDTO requestDTO = new EnsGatewayRequestDTO(data); ObjectMapper om = ObjectMapperFactory.getObjectMapper(); return builder.post(RequestBody.create(om.writeValueAsString(requestDTO), JSON)) .addHeader("Content-Type", "application/json") .build(); } }
@Test void buildRequestWhenWithoutSenderTest() throws IOException { String url = "https://example.com/gateway/{sender}.json"; String data = "0xd5fa2b00"; assertThrows(EnsResolutionException.class, () -> ensResolver.buildRequest(url, null, data)); }
@Override public void validate(Context context) { Optional<Reader> overrides = context.deployState().getApplicationPackage().getValidationOverrides(); if (overrides.isEmpty()) return; ValidationOverrides validationOverrides = ValidationOverrides.fromXml(overrides.get()); validationOverrides.validate(context.deployState().now(), context::illegal); }
@Test void testValidationOverride() throws IOException, SAXException { String tenDays = dateTimeFormatter.format(Instant.now().plus(Duration.ofDays(10))); var validationOverridesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + " <validation-overrides>\n" + " <allow until='" + tenDays + "'>deployment-removal</allow>\n" + " </validation-overrides>"; var deployState = createDeployState(validationOverridesXml); VespaModel model = new VespaModel(new NullConfigModelRegistry(), deployState); new Validation().validate(model, new ValidationParameters(), deployState); }
public static String toAbsolute(String baseURL, String relativeURL) { String relURL = relativeURL; // Relative to protocol if (relURL.startsWith("//")) { return StringUtils.substringBefore(baseURL, "//") + "//" + StringUtils.substringAfter(relURL, "//"); } // Relative to domain name if (relURL.startsWith("/")) { return getRoot(baseURL) + relURL; } // Relative to full full page URL minus ? or # if (relURL.startsWith("?") || relURL.startsWith("#")) { // this is a relative url and should have the full page base return baseURL.replaceFirst("(.*?)([\\?\\#])(.*)", "$1") + relURL; } // Relative to last directory/segment if (!relURL.contains("://")) { String base = baseURL.replaceFirst("(.*?)([\\?\\#])(.*)", "$1"); if (StringUtils.countMatches(base, '/') > 2) { base = base.replaceFirst("(.*/)(.*)", "$1"); } if (base.endsWith("/")) { // This is a URL relative to the last URL segment relURL = base + relURL; } else { relURL = base + "/" + relURL; } } // Not detected as relative, so return as is return relURL; }
@Test public void testFromDomainNoTrailSlashToRelativeNoLeadSlash() { s = "http://www.sample.com"; t = "http://www.sample.com/xyz.html"; assertEquals(t, HttpURL.toAbsolute(s, "xyz.html")); }
@Override public void getData(final String path, final boolean watch, final AsyncCallback.DataCallback cb, final Object ctx) { if (!SymlinkUtil.containsSymlink(path)) { _zk.getData(path, watch, cb, ctx); } else { SymlinkDataCallback compositeCallback = new SymlinkDataCallback(path, _defaultWatcher, cb); getData0(path, watch ? compositeCallback : null, compositeCallback, ctx); } }
@Test public void testSymlinkGetData() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); AsyncCallback.DataCallback callback = new AsyncCallback.DataCallback() { @Override public void processResult(int rc, String path, Object ctx, byte[] data, Stat stat) { String value = null; try { value = new String(data, "UTF-8"); } catch (UnsupportedEncodingException e) { Assert.fail(e.getMessage()); } finally { KeeperException.Code result = KeeperException.Code.get(rc); Assert.assertEquals(result, KeeperException.Code.OK); Assert.assertEquals(value, (String)ctx); Assert.assertNotNull(stat); latch.countDown(); } } }; // symlink: /foo/$link/1 -> /foo/bar/1 _zkClient.getZooKeeper().getData("/foo/$link/1", null, callback, "1"); latch.await(30, TimeUnit.SECONDS); }
public HdfsBolt withPartitioner(Partitioner partitioner) { this.partitioner = partitioner; return this; }
@Test public void testPartitionedOutput() throws IOException { HdfsBolt bolt = makeHdfsBolt(hdfsURI, 1, 1000f); Partitioner partitoner = new Partitioner() { @Override public String getPartitionPath(Tuple tuple) { return Path.SEPARATOR + tuple.getStringByField("city"); } }; bolt.prepare(new Config(), topologyContext, collector); bolt.withPartitioner(partitoner); bolt.execute(tuple1); bolt.execute(tuple2); verify(collector).ack(tuple1); verify(collector).ack(tuple2); assertEquals(1, countNonZeroLengthFiles(testRoot + "/SFO")); assertEquals(1, countNonZeroLengthFiles(testRoot + "/SJO")); }
public static HighAvailabilityServices createAvailableOrEmbeddedServices( Configuration config, Executor executor, FatalErrorHandler fatalErrorHandler) throws Exception { HighAvailabilityMode highAvailabilityMode = HighAvailabilityMode.fromConfig(config); switch (highAvailabilityMode) { case NONE: return new EmbeddedHaServices(executor); case ZOOKEEPER: return createZooKeeperHaServices(config, executor, fatalErrorHandler); case KUBERNETES: return createCustomHAServices( "org.apache.flink.kubernetes.highavailability.KubernetesHaServicesFactory", config, executor); case FACTORY_CLASS: return createCustomHAServices(config, executor); default: throw new Exception( "High availability mode " + highAvailabilityMode + " is not supported."); } }
@Test(expected = Exception.class) public void testCustomHAServicesFactoryNotDefined() throws Exception { Configuration config = new Configuration(); Executor executor = Executors.directExecutor(); config.set( HighAvailabilityOptions.HA_MODE, HighAvailabilityMode.FACTORY_CLASS.name().toLowerCase()); // expect HighAvailabilityServicesUtils.createAvailableOrEmbeddedServices( config, executor, NoOpFatalErrorHandler.INSTANCE); }
public static Assignment deserializeAssignment(final ByteBuffer buffer, short version) { version = checkAssignmentVersion(version); try { ConsumerProtocolAssignment data = new ConsumerProtocolAssignment(new ByteBufferAccessor(buffer), version); List<TopicPartition> assignedPartitions = new ArrayList<>(); for (ConsumerProtocolAssignment.TopicPartition tp : data.assignedPartitions()) { for (Integer partition : tp.partitions()) { assignedPartitions.add(new TopicPartition(tp.topic(), partition)); } } return new Assignment( assignedPartitions, data.userData() != null ? data.userData().duplicate() : null); } catch (BufferUnderflowException e) { throw new SchemaException("Buffer underflow while parsing consumer protocol's assignment", e); } }
@Test public void deserializeFutureAssignmentVersion() { // verify that a new version which adds a field is still parseable short version = 100; Schema assignmentSchemaV100 = new Schema( new Field("assigned_partitions", new ArrayOf( ConsumerProtocolAssignment.TopicPartition.SCHEMA_0)), new Field("user_data", Type.BYTES), new Field("foo", Type.STRING)); Struct assignmentV100 = new Struct(assignmentSchemaV100); assignmentV100.set("assigned_partitions", new Object[]{new Struct(ConsumerProtocolAssignment.TopicPartition.SCHEMA_0) .set("topic", tp1.topic()) .set("partitions", new Object[]{tp1.partition()})}); assignmentV100.set("user_data", ByteBuffer.wrap(new byte[0])); assignmentV100.set("foo", "bar"); Struct headerV100 = new Struct(new Schema(new Field("version", Type.INT16))); headerV100.set("version", version); ByteBuffer buffer = ByteBuffer.allocate(assignmentV100.sizeOf() + headerV100.sizeOf()); headerV100.writeTo(buffer); assignmentV100.writeTo(buffer); buffer.flip(); Assignment assignment = ConsumerProtocol.deserializeAssignment(buffer); assertEquals(toSet(Collections.singletonList(tp1)), toSet(assignment.partitions())); }
void invokeMain(BootstrappedInstanceProxy instanceProxy, ExecuteJobParameters executeJobParameters, Method mainMethod, List<String> args) throws IllegalAccessException, InvocationTargetException { try { instanceProxy.setExecuteJobParameters(executeJobParameters); String[] jobArgs = args.toArray(new String[0]); // upcast args to Object, so it's passed as a single array-typed argument mainMethod.invoke(null, (Object) jobArgs); } finally { instanceProxy.removeExecuteJobParameters(); } }
@Test public void testInvokeMain() throws InvocationTargetException, IllegalAccessException { HazelcastInstance hazelcastInstance = mock(HazelcastInstance.class); AbstractJetInstance abstractJetInstance = mock(AbstractJetInstance.class); when(hazelcastInstance.getJet()).thenReturn(abstractJetInstance); when(abstractJetInstance.getHazelcastInstance()).thenReturn(hazelcastInstance); // Parameter for test BootstrappedInstanceProxy instanceProxy = createWithMemberJetProxy(hazelcastInstance); // Parameter for test. Empty main method MainMethodFinder mainMethodFinder = new MainMethodFinder(); mainMethodFinder.getMainMethodOfClass(MemberExecuteJarTest.class); // Parameter for test String jarPath = "jarPath"; String snapshotName = "snapshotName"; String jobName = "jobName"; ExecuteJobParameters executeJobParameters = new ExecuteJobParameters(jarPath, snapshotName, jobName); // Test that invokeMain removes thread local values in BootstrappedInstanceProxy MemberExecuteJar memberExecuteJar = new MemberExecuteJar(); memberExecuteJar.invokeMain(instanceProxy, executeJobParameters, mainMethodFinder.mainMethod, Collections.singletonList("jobArgs") ); BootstrappedJetProxy bootstrappedJetProxy = instanceProxy.getJet(); ExecuteJobParameters parameters = bootstrappedJetProxy.getExecuteJobParameters(); assertThat(parameters.getJarPath()).isNull(); assertThat(parameters.getSnapshotName()).isNull(); assertThat(parameters.getJobName()).isNull(); }
static ProjectMeasuresQuery newProjectMeasuresQuery(List<Criterion> criteria, @Nullable Set<String> projectUuids) { ProjectMeasuresQuery query = new ProjectMeasuresQuery(); Optional.ofNullable(projectUuids).ifPresent(query::setProjectUuids); criteria.forEach(criterion -> processCriterion(criterion, query)); return query; }
@Test public void filter_on_projectUuids_if_projectUuid_is_non_empty_and_criteria_non_empty() { ProjectMeasuresQuery query = newProjectMeasuresQuery(singletonList(Criterion.builder().setKey("ncloc").setOperator(GT).setValue("10").build()), Collections.singleton("foo")); assertThat(query.getProjectUuids()).isPresent(); }
@ApiOperation(value = "Get LwM2M Objects (getLwm2mListObjects)", notes = "Returns a page of LwM2M objects parsed from Resources with type 'LWM2M_MODEL' owned by tenant or sysadmin. " + "You can specify parameters to filter the results. " + LWM2M_OBJECT_DESCRIPTION + TENANT_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAnyAuthority('TENANT_ADMIN')") @GetMapping(value = "/resource/lwm2m") public List<LwM2mObject> getLwm2mListObjects(@Parameter(description = SORT_ORDER_DESCRIPTION, schema = @Schema(allowableValues = {"ASC", "DESC"}, requiredMode = Schema.RequiredMode.REQUIRED)) @RequestParam String sortOrder, @Parameter(description = SORT_PROPERTY_DESCRIPTION, schema = @Schema(allowableValues = {"id", "name"}, requiredMode = Schema.RequiredMode.REQUIRED)) @RequestParam String sortProperty, @Parameter(description = "LwM2M Object ids.", array = @ArraySchema(schema = @Schema(type = "string")), required = true) @RequestParam(required = false) String[] objectIds) throws ThingsboardException { return checkNotNull(tbResourceService.findLwM2mObject(getTenantId(), sortOrder, sortProperty, objectIds)); }
@Test public void testGetLwm2mListObjects() throws Exception { loginTenantAdmin(); List<TbResource> resources = loadLwm2mResources(); List<LwM2mObject> objects = doGetTyped("/api/resource/lwm2m?sortProperty=id&sortOrder=ASC&objectIds=3_1.2,5_1.2,19_1.1", new TypeReference<>() {}); Assert.assertNotNull(objects); Assert.assertEquals(3, objects.size()); removeLoadResources(resources); }
public static <R> R callInstanceMethod( final Object instance, final String methodName, ClassParameter<?>... classParameters) { perfStatsCollector.incrementCount( String.format( "ReflectionHelpers.callInstanceMethod-%s_%s", instance.getClass().getName(), methodName)); try { final Class<?>[] classes = ClassParameter.getClasses(classParameters); final Object[] values = ClassParameter.getValues(classParameters); return traverseClassHierarchy( instance.getClass(), NoSuchMethodException.class, traversalClass -> { Method declaredMethod = traversalClass.getDeclaredMethod(methodName, classes); declaredMethod.setAccessible(true); return (R) declaredMethod.invoke(instance, values); }); } catch (InvocationTargetException e) { if (e.getTargetException() instanceof RuntimeException) { throw (RuntimeException) e.getTargetException(); } if (e.getTargetException() instanceof Error) { throw (Error) e.getTargetException(); } throw new RuntimeException(e.getTargetException()); } catch (Exception e) { throw new RuntimeException(e); } }
@Test public void callInstanceMethodReflectively_callsInheritedMethods() { ExampleDescendant example = new ExampleDescendant(); assertThat((int) ReflectionHelpers.callInstanceMethod(example, "returnNegativeNumber")) .isEqualTo(-46); }
@Override public long getCount() { return counter.getCount(); }
@Test void testGetCount() { Counter c = new SimpleCounter(); c.inc(5); Meter m = new MeterView(c); assertThat(m.getCount()).isEqualTo(5); }
@Override public T deserialize(final String topic, final byte[] bytes) { try { if (bytes == null) { return null; } // don't use the JsonSchemaConverter to read this data because // we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS, // which is not currently available in the standard converters final JsonNode value = isJsonSchema ? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class) : MAPPER.readTree(bytes); final Object coerced = enforceFieldType( "$", new JsonValueContext(value, schema) ); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } return SerdeUtils.castToTargetType(coerced, targetType); } catch (final Exception e) { // Clear location in order to avoid logging data, for security reasons if (e instanceof JsonParseException) { ((JsonParseException) e).clearLocation(); } throw new SerializationException( "Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e); } }
@Test public void shouldDeserializeJsonObjectWithRedundantFields() { // Given: final Map<String, Object> orderRow = new HashMap<>(AN_ORDER); orderRow.put("extraField", "should be ignored"); final byte[] bytes = serializeJson(orderRow); // When: final Struct result = deserializer.deserialize(SOME_TOPIC, bytes); // Then: assertThat(result, is(expectedOrder)); }
@Override public List<byte[]> mGet(byte[]... keys) { if (isQueueing() || isPipelined()) { for (byte[] key : keys) { read(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key); } return null; } CommandBatchService es = new CommandBatchService(executorService); for (byte[] key: keys) { es.readAsync(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key); } BatchResult<byte[]> r = (BatchResult<byte[]>) es.execute(); return r.getResponses(); }
@Test public void testMGet() { Map<byte[], byte[]> map = new HashMap<>(); for (int i = 0; i < 10; i++) { map.put(("test" + i).getBytes(), ("test" + i*100).getBytes()); } connection.mSet(map); List<byte[]> r = connection.mGet(map.keySet().toArray(new byte[0][])); assertThat(r).containsExactly(map.values().toArray(new byte[0][])); }
@Override public ExecuteContext before(ExecuteContext context) { Object object = context.getObject(); String serviceId = getServiceId(object).orElse(null); if (StringUtils.isBlank(serviceId)) { return context; } Object[] arguments = context.getArguments(); List<Object> instances = (List<Object>) arguments[0]; if (CollectionUtils.isEmpty(instances)) { return context; } RequestData requestData = ThreadLocalUtils.getRequestData(); List<Object> targetInstances = loadBalancerService.getTargetInstances(serviceId, instances, requestData); arguments[0] = targetInstances; return context; }
@Test public void testBefore() { ThreadLocalUtils.setRequestData(new RequestData(Collections.emptyMap(), "", "")); interceptor.before(context); List<ServiceInstance> instances = (List<ServiceInstance>) context.getArguments()[0]; Assert.assertNotNull(instances); Assert.assertEquals(1, instances.size()); }
@Override public double entropy() { return entropy; }
@Test public void testEntropy() { System.out.println("entropy"); GammaDistribution instance = new GammaDistribution(3, 2.1); instance.rand(); assertEquals(2.589516, instance.entropy(), 1E-6); }
@Override public List<Intent> compile(SinglePointToMultiPointIntent intent, List<Intent> installable) { Set<Link> links = new HashSet<>(); final boolean allowMissingPaths = intentAllowsPartialFailure(intent); boolean hasPaths = false; boolean missingSomePaths = false; for (ConnectPoint egressPoint : intent.egressPoints()) { if (egressPoint.deviceId().equals(intent.ingressPoint().deviceId())) { // Do not need to look for paths, since ingress and egress // devices are the same. if (deviceService.isAvailable(egressPoint.deviceId())) { hasPaths = true; } else { missingSomePaths = true; } continue; } Path path = getPath(intent, intent.ingressPoint().deviceId(), egressPoint.deviceId()); if (path != null) { hasPaths = true; links.addAll(path.links()); } else { missingSomePaths = true; } } // Allocate bandwidth if a bandwidth constraint is set ConnectPoint ingressCP = intent.filteredIngressPoint().connectPoint(); List<ConnectPoint> egressCPs = intent.filteredEgressPoints().stream() .map(fcp -> fcp.connectPoint()) .collect(Collectors.toList()); List<ConnectPoint> pathCPs = links.stream() .flatMap(l -> Stream.of(l.src(), l.dst())) .collect(Collectors.toList()); pathCPs.add(ingressCP); pathCPs.addAll(egressCPs); allocateBandwidth(intent, pathCPs); if (!hasPaths) { throw new IntentException("Cannot find any path between ingress and egress points."); } else if (!allowMissingPaths && missingSomePaths) { throw new IntentException("Missing some paths between ingress and egress points."); } Intent result = LinkCollectionIntent.builder() .appId(intent.appId()) .key(intent.key()) .selector(intent.selector()) .treatment(intent.treatment()) .links(links) .filteredIngressPoints(ImmutableSet.of(intent.filteredIngressPoint())) .filteredEgressPoints(intent.filteredEgressPoints()) .priority(intent.priority()) .applyTreatmentOnEgress(true) .constraints(intent.constraints()) .resourceGroup(intent.resourceGroup()) .build(); return Collections.singletonList(result); }
@Test public void testPartialFailureConstraintFailure() { FilteredConnectPoint ingress = new FilteredConnectPoint(new ConnectPoint(DID_1, PORT_1)); Set<FilteredConnectPoint> egress = ImmutableSet.of( new FilteredConnectPoint(new ConnectPoint(DID_4, PORT_2)), new FilteredConnectPoint(new ConnectPoint(DID_5, PORT_2))); SinglePointToMultiPointIntent intent = makeIntent(ingress, egress); String[] hops = {S3}; SinglePointToMultiPointIntentCompiler compiler = makeCompiler(null, new IntentTestsMocks.FixedMP2MPMockPathService(hops), null); assertThat(compiler, is(notNullValue())); intentException.expect(IntentException.class); List<Intent> result = compiler.compile(intent, null); assertThat(result, null); }
@Override @Deprecated public <KR, VR> KStream<KR, VR> transform(final org.apache.kafka.streams.kstream.TransformerSupplier<? super K, ? super V, KeyValue<KR, VR>> transformerSupplier, final String... stateStoreNames) { Objects.requireNonNull(transformerSupplier, "transformerSupplier can't be null"); final String name = builder.newProcessorName(TRANSFORM_NAME); return flatTransform(new TransformerSupplierAdapter<>(transformerSupplier), Named.as(name), stateStoreNames); }
@Test @SuppressWarnings("deprecation") public void shouldNotAllowNullStoreNamesOnTransform() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.transform(transformerSupplier, (String[]) null)); assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array")); }
public FEELFnResult<Boolean> invoke(@ParameterName("list") List list) { if (list == null) { return FEELFnResult.ofResult(false); } boolean result = false; for (final Object element : list) { if (element != null && !(element instanceof Boolean)) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not" + " a Boolean")); } else { if (element != null) { result |= (Boolean) element; } } } return FEELFnResult.ofResult(result); }
@Test void invokeListParamReturnNull() { FunctionTestUtil.assertResult(anyFunction.invoke(Arrays.asList(Boolean.FALSE, null, Boolean.FALSE)), false); }
public Shard getShardById(final int shardId) { return shardMap.get(shardId); }
@Test void testGetShardById() { var shard = new Shard(1); shardManager.addNewShard(shard); var tmpShard = shardManager.getShardById(1); assertEquals(shard, tmpShard); }
@Override public ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options) { final KafkaFutureImpl<Collection<Object>> all = new KafkaFutureImpl<>(); final long nowMetadata = time.milliseconds(); final long deadline = calcDeadlineMs(nowMetadata, options.timeoutMs()); runnable.call(new Call("findAllBrokers", deadline, new LeastLoadedNodeProvider()) { @Override MetadataRequest.Builder createRequest(int timeoutMs) { return new MetadataRequest.Builder(new MetadataRequestData() .setTopics(Collections.emptyList()) .setAllowAutoTopicCreation(true)); } @Override void handleResponse(AbstractResponse abstractResponse) { MetadataResponse metadataResponse = (MetadataResponse) abstractResponse; Collection<Node> nodes = metadataResponse.brokers(); if (nodes.isEmpty()) throw new StaleMetadataException("Metadata fetch failed due to missing broker list"); HashSet<Node> allNodes = new HashSet<>(nodes); final ListConsumerGroupsResults results = new ListConsumerGroupsResults(allNodes, all); for (final Node node : allNodes) { final long nowList = time.milliseconds(); runnable.call(new Call("listConsumerGroups", deadline, new ConstantNodeIdProvider(node.id())) { @Override ListGroupsRequest.Builder createRequest(int timeoutMs) { List<String> states = options.states() .stream() .map(ConsumerGroupState::toString) .collect(Collectors.toList()); List<String> groupTypes = options.types() .stream() .map(GroupType::toString) .collect(Collectors.toList()); return new ListGroupsRequest.Builder(new ListGroupsRequestData() .setStatesFilter(states) .setTypesFilter(groupTypes) ); } private void maybeAddConsumerGroup(ListGroupsResponseData.ListedGroup group) { String protocolType = group.protocolType(); if (protocolType.equals(ConsumerProtocol.PROTOCOL_TYPE) || protocolType.isEmpty()) { final String groupId = group.groupId(); final Optional<ConsumerGroupState> state = group.groupState().isEmpty() ? Optional.empty() : Optional.of(ConsumerGroupState.parse(group.groupState())); final Optional<GroupType> type = group.groupType().isEmpty() ? Optional.empty() : Optional.of(GroupType.parse(group.groupType())); final ConsumerGroupListing groupListing = new ConsumerGroupListing( groupId, protocolType.isEmpty(), state, type ); results.addListing(groupListing); } } @Override void handleResponse(AbstractResponse abstractResponse) { final ListGroupsResponse response = (ListGroupsResponse) abstractResponse; synchronized (results) { Errors error = Errors.forCode(response.data().errorCode()); if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS || error == Errors.COORDINATOR_NOT_AVAILABLE) { throw error.exception(); } else if (error != Errors.NONE) { results.addError(error.exception(), node); } else { for (ListGroupsResponseData.ListedGroup group : response.data().groups()) { maybeAddConsumerGroup(group); } } results.tryComplete(node); } } @Override void handleFailure(Throwable throwable) { synchronized (results) { results.addError(throwable, node); results.tryComplete(node); } } }, nowList); } } @Override void handleFailure(Throwable throwable) { KafkaException exception = new KafkaException("Failed to find brokers to send ListGroups", throwable); all.complete(Collections.singletonList(exception)); } }, nowMetadata); return new ListConsumerGroupsResult(all); }
@Test public void testListConsumerGroupsWithTypesOlderBrokerVersion() throws Exception { ApiVersion listGroupV4 = new ApiVersion() .setApiKey(ApiKeys.LIST_GROUPS.id) .setMinVersion((short) 0) .setMaxVersion((short) 4); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(Collections.singletonList(listGroupV4))); env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); // Check if we can list groups with older broker if we specify states and don't specify types. env.kafkaClient().prepareResponseFrom( expectListGroupsRequestWithFilters(singleton(ConsumerGroupState.STABLE.toString()), Collections.emptySet()), new ListGroupsResponse(new ListGroupsResponseData() .setErrorCode(Errors.NONE.code()) .setGroups(Collections.singletonList( new ListGroupsResponseData.ListedGroup() .setGroupId("group-1") .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) .setGroupState(ConsumerGroupState.STABLE.toString())))), env.cluster().nodeById(0)); ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inStates(singleton(ConsumerGroupState.STABLE)); ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(options); Collection<ConsumerGroupListing> listing = result.all().get(); assertEquals(1, listing.size()); List<ConsumerGroupListing> expected = Collections.singletonList( new ConsumerGroupListing("group-1", false, Optional.of(ConsumerGroupState.STABLE)) ); assertEquals(expected, listing); // Check that we cannot set a type filter with an older broker. env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); env.kafkaClient().prepareUnsupportedVersionResponse(request -> request instanceof ListGroupsRequest && !((ListGroupsRequest) request).data().typesFilter().isEmpty() ); options = new ListConsumerGroupsOptions().withTypes(singleton(GroupType.CLASSIC)); result = env.adminClient().listConsumerGroups(options); TestUtils.assertFutureThrows(result.all(), UnsupportedVersionException.class); } }
@VisibleForTesting public Optional<ProcessContinuation> run( PartitionMetadata partition, ChildPartitionsRecord record, RestrictionTracker<TimestampRange, Timestamp> tracker, ManualWatermarkEstimator<Instant> watermarkEstimator) { final String token = partition.getPartitionToken(); LOG.debug("[{}] Processing child partition record {}", token, record); final Timestamp startTimestamp = record.getStartTimestamp(); final Instant startInstant = new Instant(startTimestamp.toSqlTimestamp().getTime()); if (!tracker.tryClaim(startTimestamp)) { LOG.debug("[{}] Could not claim queryChangeStream({}), stopping", token, startTimestamp); return Optional.of(ProcessContinuation.stop()); } watermarkEstimator.setWatermark(startInstant); for (ChildPartition childPartition : record.getChildPartitions()) { processChildPartition(partition, record, childPartition); } LOG.debug("[{}] Child partitions action completed successfully", token); return Optional.empty(); }
@Test public void testRestrictionClaimedAndIsSplitCase() { final String partitionToken = "partitionToken"; final long heartbeat = 30L; final Timestamp startTimestamp = Timestamp.ofTimeMicroseconds(10L); final Timestamp endTimestamp = Timestamp.ofTimeMicroseconds(20L); final PartitionMetadata partition = mock(PartitionMetadata.class); final ChildPartitionsRecord record = new ChildPartitionsRecord( startTimestamp, "recordSequence", Arrays.asList( new ChildPartition("childPartition1", partitionToken), new ChildPartition("childPartition2", partitionToken)), null); when(partition.getEndTimestamp()).thenReturn(endTimestamp); when(partition.getHeartbeatMillis()).thenReturn(heartbeat); when(partition.getPartitionToken()).thenReturn(partitionToken); when(tracker.tryClaim(startTimestamp)).thenReturn(true); when(transaction.getPartition("childPartition1")).thenReturn(null); when(transaction.getPartition("childPartition2")).thenReturn(null); final Optional<ProcessContinuation> maybeContinuation = action.run(partition, record, tracker, watermarkEstimator); assertEquals(Optional.empty(), maybeContinuation); verify(watermarkEstimator).setWatermark(new Instant(startTimestamp.toSqlTimestamp().getTime())); verify(transaction) .insert( PartitionMetadata.newBuilder() .setPartitionToken("childPartition1") .setParentTokens(Sets.newHashSet(partitionToken)) .setStartTimestamp(startTimestamp) .setEndTimestamp(endTimestamp) .setHeartbeatMillis(heartbeat) .setState(CREATED) .setWatermark(startTimestamp) .build()); verify(transaction) .insert( PartitionMetadata.newBuilder() .setPartitionToken("childPartition2") .setParentTokens(Sets.newHashSet(partitionToken)) .setStartTimestamp(startTimestamp) .setEndTimestamp(endTimestamp) .setHeartbeatMillis(heartbeat) .setState(CREATED) .setWatermark(startTimestamp) .build()); }
public static MetricName getMetricName( String group, String typeName, String name ) { return getMetricName( group, typeName, name, null ); }
@Test public void testUntaggedMetric() { MetricName metricName = KafkaYammerMetrics.getMetricName( "kafka.metrics", "TestMetrics", "UntaggedMetric" ); assertEquals("kafka.metrics", metricName.getGroup()); assertEquals("TestMetrics", metricName.getType()); assertEquals("UntaggedMetric", metricName.getName()); assertEquals("kafka.metrics:type=TestMetrics,name=UntaggedMetric", metricName.getMBeanName()); assertNull(metricName.getScope()); }
public List<Flow> convertFlows(String componentName, @Nullable DbIssues.Locations issueLocations) { if (issueLocations == null) { return Collections.emptyList(); } return issueLocations.getFlowList().stream() .map(sourceFlow -> toFlow(componentName, sourceFlow)) .collect(Collectors.toCollection(LinkedList::new)); }
@Test public void convertFlows_withSingleDbLocations_returnsCorrectFlow() { DbIssues.Location location = createDbLocation("comp_id_1"); DbIssues.Locations issueLocations = DbIssues.Locations.newBuilder() .addFlow(createFlow(location)) .build(); List<Flow> flows = flowGenerator.convertFlows(COMPONENT_NAME, issueLocations); assertThat(flows).hasSize(1); Flow singleFlow = flows.iterator().next(); assertThat(singleFlow.getLocations()).hasSize(1); Location singleLocation = singleFlow.getLocations().iterator().next(); assertLocationMatches(singleLocation, location); }
@Override public DataTableType dataTableType() { return dataTableType; }
@Test void static_methods_are_invoked_without_a_body() throws NoSuchMethodException { Method method = JavaDataTableTypeDefinitionTest.class.getMethod("static_convert_data_table_to_string", DataTable.class); JavaDataTableTypeDefinition definition = new JavaDataTableTypeDefinition(method, lookupForStaticMethod, new String[0]); assertThat(definition.dataTableType().transform(dataTable.cells()), is("static_convert_data_table_to_string=[[a, b], [c, d]]")); }
public static FromEndOfWindow pastEndOfWindow() { return new FromEndOfWindow(); }
@Test public void testAtWatermarkAndLate() throws Exception { tester = TriggerStateMachineTester.forTrigger( AfterWatermarkStateMachine.pastEndOfWindow().withLateFirings(mockLate), FixedWindows.of(Duration.millis(100))); injectElements(1); IntervalWindow window = new IntervalWindow(new Instant(0), new Instant(100)); // No early firing, just double checking when(mockEarly.shouldFire(anyTriggerContext())).thenReturn(true); assertFalse(tester.shouldFire(window)); tester.fireIfShouldFire(window); assertFalse(tester.isMarkedFinished(window)); // Fire due to watermark when(mockEarly.shouldFire(anyTriggerContext())).thenReturn(false); tester.advanceInputWatermark(new Instant(100)); assertTrue(tester.shouldFire(window)); tester.fireIfShouldFire(window); assertFalse(tester.isMarkedFinished(window)); testRunningAsTrigger(mockLate, window); }
public void setTemplateEntriesForChild(CapacitySchedulerConfiguration conf, QueuePath childQueuePath) { setTemplateEntriesForChild(conf, childQueuePath, false); }
@Test public void testWildcardTemplateWithLimitedAutoCreatedQueueDepth() { conf.set(getTemplateKey(TEST_QUEUE_ROOT_WILDCARD, "capacity"), "6w"); conf.set(getTemplateKey(TEST_QUEUE_A_WILDCARD, "capacity"), "5w"); conf.setMaximumAutoCreatedQueueDepth(TEST_QUEUE_A, 1); conf.setMaximumAutoCreatedQueueDepth(TEST_QUEUE_AB, 1); new AutoCreatedQueueTemplate(conf, TEST_QUEUE_A) .setTemplateEntriesForChild(conf, TEST_QUEUE_AB); new AutoCreatedQueueTemplate(conf, TEST_QUEUE_AB) .setTemplateEntriesForChild(conf, TEST_QUEUE_ABC); Assert.assertEquals("weight is not set", 6f, conf.getNonLabeledQueueWeight(TEST_QUEUE_AB), 10e-6); Assert.assertEquals("weight is not set", 5f, conf.getNonLabeledQueueWeight(TEST_QUEUE_ABC), 10e-6); }
public boolean sync() throws IOException { if (!preSyncCheck()) { return false; } if (!getAllDiffs()) { return false; } List<Path> sourcePaths = context.getSourcePaths(); final Path sourceDir = sourcePaths.get(0); final Path targetDir = context.getTargetPath(); final FileSystem tfs = targetDir.getFileSystem(conf); Path tmpDir = null; try { tmpDir = createTargetTmpDir(tfs, targetDir); DiffInfo[] renameAndDeleteDiffs = getRenameAndDeleteDiffsForSync(targetDir); if (renameAndDeleteDiffs.length > 0) { // do the real sync work: deletion and rename syncDiff(renameAndDeleteDiffs, tfs, tmpDir); } return true; } catch (Exception e) { DistCp.LOG.warn("Failed to use snapshot diff for distcp", e); return false; } finally { deleteTargetTmpDir(tfs, tmpDir); // TODO: since we have tmp directory, we can support "undo" with failures // set the source path using the snapshot path context.setSourcePaths(Arrays.asList(getSnapshotPath(sourceDir, context.getToSnapshot()))); } }
@Test public void testSyncSnapshotTimeStampChecking() throws Exception { initData(source); initData(target); dfs.allowSnapshot(source); dfs.allowSnapshot(target); dfs.createSnapshot(source, "s2"); dfs.createSnapshot(target, "s1"); // Sleep one second to make snapshot s1 created later than s2 Thread.sleep(1000); dfs.createSnapshot(source, "s1"); boolean threwException = false; try { DistCpSync distCpSync = new DistCpSync(context, conf); // do the sync distCpSync.sync(); } catch (HadoopIllegalArgumentException e) { threwException = true; GenericTestUtils.assertExceptionContains( "Snapshot s2 should be newer than s1", e); } Assert.assertTrue(threwException); }
private void withdraw(ResolvedRoute route) { synchronized (this) { IpPrefix prefix = route.prefix(); MultiPointToSinglePointIntent intent = routeIntents.remove(prefix); if (intent == null) { log.trace("No intent in routeIntents to delete for prefix: {}", prefix); return; } intentSynchronizer.withdraw(intent); } }
@Test public void testRemoveEgressInterface() { // Add a route first testRouteAddToNoVlan(); // Create existing intent MultiPointToSinglePointIntent removedIntent = createIntentToThreeSrcOneTwo(PREFIX1); // Set up expectation reset(intentSynchronizer); // Setup the expected intents intentSynchronizer.withdraw(eqExceptId(removedIntent)); replay(intentSynchronizer); // Define the existing egress interface and remove it Interface intf = new Interface("sw3-eth1", SW3_ETH1, Collections.singletonList(IIP3), MAC3, VlanId.NONE); InterfaceEvent intfEvent = new InterfaceEvent(InterfaceEvent.Type.INTERFACE_REMOVED, intf); interfaceListener.event(intfEvent); verify(intentSynchronizer); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof DurableExecutorWaitNotifyKey)) { return false; } if (!super.equals(o)) { return false; } DurableExecutorWaitNotifyKey that = (DurableExecutorWaitNotifyKey) o; return (uniqueId == that.uniqueId); }
@Test public void testEquals() { assertEquals(notifyKey, notifyKey); assertEquals(notifyKey, notifyKeySameAttributes); assertNotEquals(null, notifyKey); assertNotEquals(new Object(), notifyKey); assertNotEquals(notifyKey, notifyKeyOtherUniqueId); assertNotEquals(notifyKey, notifyKeyOtherName); }
@Transactional public MeetingCreateResponse create(MeetingCreateRequest request) { Meeting meeting = saveMeeting(request.meetingName(), request.toMeetingStartTime(), request.toMeetingEndTime()); AvailableDates meetingDates = new AvailableDates(request.toAvailableMeetingDates(), meeting); validateNotPast(meetingDates); availableDateRepository.saveAll(meetingDates.getAvailableDates()); Attendee attendee = saveHostAttendee(meeting, request.hostName(), request.hostPassword()); String token = jwtManager.generate(attendee.getId()); return MeetingCreateResponse.from(meeting, attendee, meetingDates, token); }
@DisplayName("약속을 생성할 때 과거 날짜를 보내면 예외가 발생합니다.") @Test void throwExceptionWhenDatesHavePast() { //given setFixedClock(); LocalDate today = LocalDate.now(clock); LocalDate yesterday = today.minusDays(1); MeetingCreateRequest request = new MeetingCreateRequest( "momoHost", "momo", "momoMeeting", List.of(yesterday.toString(), today.toString()), "08:00", "22:00" ); //when //then assertThatThrownBy(() -> meetingService.create(request)) .isInstanceOf(MomoException.class) .hasMessage(MeetingErrorCode.PAST_NOT_PERMITTED.message()); }
public AnalysisPropertyDto setAnalysisUuid(String analysisUuid) { requireNonNull(analysisUuid, "analysisUuid cannot be null"); this.analysisUuid = analysisUuid; return this; }
@Test void null_analysis_uuid_should_throw_NPE() { underTest = new AnalysisPropertyDto(); assertThatThrownBy(() -> underTest.setAnalysisUuid(null)) .isInstanceOf(NullPointerException.class) .hasMessage("analysisUuid cannot be null"); }
protected static boolean isSingleQuoted(String input) { if (input == null || input.isBlank()) { return false; } return input.matches("(^" + QUOTE_CHAR + "{1}([^" + QUOTE_CHAR + "]+)" + QUOTE_CHAR + "{1})"); }
@Test public void testEmptySingleQuotedNegative() { assertFalse(isSingleQuoted("\"\"")); }
@Override protected Collection<Address> getPossibleAddresses() { Iterable<DiscoveryNode> discoveredNodes = checkNotNull(discoveryService.discoverNodes(), "Discovered nodes cannot be null!"); MemberImpl localMember = node.nodeEngine.getLocalMember(); Set<Address> localAddresses = node.getLocalAddressRegistry().getLocalAddresses(); Collection<Address> possibleMembers = new ArrayList<>(); for (DiscoveryNode discoveryNode : discoveredNodes) { Address discoveredAddress = usePublicAddress ? discoveryNode.getPublicAddress() : discoveryNode.getPrivateAddress(); if (localAddresses.contains(discoveredAddress)) { if (!usePublicAddress && discoveryNode.getPublicAddress() != null) { // enrich member with client public address localMember.getAddressMap().put(EndpointQualifier.resolve(ProtocolType.CLIENT, "public"), publicAddress(localMember, discoveryNode)); } continue; } possibleMembers.add(discoveredAddress); } return possibleMembers; }
@Test public void test_DiscoveryJoiner_enriches_member_with_public_address() { DiscoveryJoiner joiner = new DiscoveryJoiner(getNode(hz), service, false); doReturn(discoveryNodes).when(service).discoverNodes(); Collection<Address> addresses = joiner.getPossibleAddresses(); Address clientPublicAddress = getNode(hz).getLocalMember().getAddressMap().get(CLIENT_PUBLIC_ENDPOINT_QUALIFIER); assertEquals(Address.createUnresolvedAddress("127.0.0.2", 6701), clientPublicAddress); }
public List<String> toList(boolean trim) { return toList((str) -> trim ? StrUtil.trim(str) : str); }
@Test public void splitByStrTest(){ String str1 = "a, ,,efedsfs, ddf,"; SplitIter splitIter = new SplitIter(str1, new StrFinder("e", false), Integer.MAX_VALUE, true ); final List<String> strings = splitIter.toList(false); assertEquals(3, strings.size()); }
public Connector newConnector(String connectorClassOrAlias) { Class<? extends Connector> klass = connectorClass(connectorClassOrAlias); return newPlugin(klass); }
@Test public void shouldThrowIfDefaultConstructorPrivate() { assertThrows(ConnectException.class, () -> plugins.newConnector( TestPlugin.BAD_PACKAGING_DEFAULT_CONSTRUCTOR_PRIVATE_CONNECTOR.className() )); }
@Nullable public static PipelineBreakerResult executePipelineBreakers(OpChainSchedulerService scheduler, MailboxService mailboxService, WorkerMetadata workerMetadata, StagePlan stagePlan, Map<String, String> opChainMetadata, long requestId, long deadlineMs) { PipelineBreakerContext pipelineBreakerContext = new PipelineBreakerContext(); PipelineBreakerVisitor.visitPlanRoot(stagePlan.getRootNode(), pipelineBreakerContext); if (!pipelineBreakerContext.getPipelineBreakerMap().isEmpty()) { try { // TODO: This PlanRequestContext needs to indicate it is a pre-stage opChain and only listens to pre-stage // OpChain receive-mail callbacks. // see also: MailboxIdUtils TODOs, de-couple mailbox id from query information OpChainExecutionContext opChainExecutionContext = new OpChainExecutionContext(mailboxService, requestId, deadlineMs, opChainMetadata, stagePlan.getStageMetadata(), workerMetadata, null); return execute(scheduler, pipelineBreakerContext, opChainExecutionContext); } catch (Exception e) { LOGGER.error("Caught exception executing pipeline breaker for request: {}, stage: {}", requestId, stagePlan.getStageMetadata().getStageId(), e); return new PipelineBreakerResult(pipelineBreakerContext.getNodeIdMap(), Collections.emptyMap(), TransferableBlockUtils.getErrorTransferableBlock(e), null); } } else { return null; } }
@Test public void shouldReturnErrorBlocksWhenReceivedErrorFromSender() { MailboxReceiveNode mailboxReceiveNode1 = getPBReceiveNode(1); MailboxReceiveNode incorrectlyConfiguredMailboxNode = getPBReceiveNode(2); JoinNode joinNode = new JoinNode(0, DATA_SCHEMA, PlanNode.NodeHint.EMPTY, List.of(mailboxReceiveNode1, incorrectlyConfiguredMailboxNode), JoinRelType.INNER, List.of(0), List.of(0), List.of()); StagePlan stagePlan = new StagePlan(joinNode, _stageMetadata); // when when(_mailboxService.getReceivingMailbox(MAILBOX_ID_1)).thenReturn(_mailbox1); when(_mailboxService.getReceivingMailbox(MAILBOX_ID_2)).thenReturn(_mailbox2); Object[] row1 = new Object[]{1, 1}; Object[] row2 = new Object[]{2, 3}; when(_mailbox1.poll()).thenReturn(OperatorTestUtil.block(DATA_SCHEMA, row1), TransferableBlockUtils.getErrorTransferableBlock(new RuntimeException("ERROR ON 1"))); when(_mailbox2.poll()).thenReturn(OperatorTestUtil.block(DATA_SCHEMA, row2), TransferableBlockTestUtils.getEndOfStreamTransferableBlock(0)); PipelineBreakerResult pipelineBreakerResult = PipelineBreakerExecutor.executePipelineBreakers(_scheduler, _mailboxService, _workerMetadata, stagePlan, ImmutableMap.of(), 0, Long.MAX_VALUE); // then // should fail even if one of the 2 PB doesn't contain error block from sender. Assert.assertNotNull(pipelineBreakerResult); TransferableBlock errorBlock = pipelineBreakerResult.getErrorBlock(); Assert.assertNotNull(errorBlock); Assert.assertTrue(errorBlock.isErrorBlock()); }
@Override public RestLiResponseData<GetResponseEnvelope> buildRestLiResponseData(Request request, RoutingResult routingResult, Object result, Map<String, String> headers, List<HttpCookie> cookies) { final RecordTemplate record; final HttpStatus status; if (result instanceof GetResult) { final GetResult<?> getResult = (GetResult<?>) result; record = getResult.getValue(); status = getResult.getStatus(); } else { record = (RecordTemplate) result; status = HttpStatus.S_200_OK; } final ResourceContext resourceContext = routingResult.getContext(); DataMap rawData = record.data(); RecordDataSchema schema = record.schema(); if (resourceContext.isFillInDefaultsRequested()) { rawData = (DataMap) ResponseUtils.fillInDataDefault(schema, rawData); } TimingContextUtil.beginTiming(resourceContext.getRawRequestContext(), FrameworkTimingKeys.SERVER_RESPONSE_RESTLI_PROJECTION_APPLY.key()); final DataMap data = RestUtils.projectFields(rawData, resourceContext); TimingContextUtil.endTiming(resourceContext.getRawRequestContext(), FrameworkTimingKeys.SERVER_RESPONSE_RESTLI_PROJECTION_APPLY.key()); return new RestLiResponseDataImpl<>(new GetResponseEnvelope(status, new AnyRecord(data)), headers, cookies); }
@Test public void testProjectionInBuildRestliResponseData() { MaskTree maskTree = new MaskTree(); maskTree.addOperation(new PathSpec("fruitsField"), MaskOperation.POSITIVE_MASK_OP); ServerResourceContext mockContext = getMockResourceContext(maskTree, ProjectionMode.AUTOMATIC); RoutingResult routingResult = new RoutingResult(mockContext, getMockResourceMethodDescriptor()); Foo value = new Foo().setStringField("value").setFruitsField(Fruits.APPLE); GetResponseBuilder responseBuilder = new GetResponseBuilder(); RestLiResponseData<GetResponseEnvelope> responseData = responseBuilder.buildRestLiResponseData(null, routingResult, value, Collections.emptyMap(), Collections.emptyList()); RecordTemplate record = responseData.getResponseEnvelope().getRecord(); Assert.assertEquals(record.data().size(), 1); Assert.assertEquals(record.data().get("fruitsField"), Fruits.APPLE.toString()); EasyMock.verify(mockContext); }
static boolean applyTags(RuleDto rule, Set<String> tags) { for (String tag : tags) { RuleTagFormat.validate(tag); } Set<String> initialTags = rule.getTags(); final Set<String> systemTags = rule.getSystemTags(); Set<String> withoutSystemTags = Sets.filter(tags, input -> input != null && !systemTags.contains(input)); rule.setTags(withoutSystemTags); return withoutSystemTags.size() != initialTags.size() || !withoutSystemTags.containsAll(initialTags); }
@Test public void applyTags_validate_format() { RuleDto rule = new RuleDto(); boolean changed = RuleTagHelper.applyTags(rule, Sets.newHashSet("java8", "security")); assertThat(rule.getTags()).containsOnly("java8", "security"); assertThat(changed).isTrue(); try { RuleTagHelper.applyTags(rule, Sets.newHashSet("Java Eight")); fail(); } catch (IllegalArgumentException e) { assertThat(e.getMessage()).startsWith("Entry 'Java Eight' is invalid"); } }
public String toLoggableString(ApiMessage message) { MetadataRecordType type = MetadataRecordType.fromId(message.apiKey()); switch (type) { case CONFIG_RECORD: { if (!configSchema.isSensitive((ConfigRecord) message)) { return message.toString(); } ConfigRecord duplicate = ((ConfigRecord) message).duplicate(); duplicate.setValue("(redacted)"); return duplicate.toString(); } case USER_SCRAM_CREDENTIAL_RECORD: { UserScramCredentialRecord record = (UserScramCredentialRecord) message; return "UserScramCredentialRecord(" + "name=" + ((record.name() == null) ? "null" : "'" + record.name() + "'") + ", mechanism=" + record.mechanism() + ", salt=(redacted)" + ", storedKey=(redacted)" + ", serverKey=(redacted)" + ", iterations=" + record.iterations() + ")"; } default: return message.toString(); } }
@Test public void testSensitiveConfigRecordToString() { assertEquals("ConfigRecord(resourceType=4, resourceName='0', name='quux', " + "value='(redacted)')", REDACTOR.toLoggableString(new ConfigRecord(). setResourceType(BROKER.id()). setResourceName("0"). setName("quux"). setValue("mysecret"))); }
@Override public void run() throws Exception { //init all file systems List<PinotFSSpec> pinotFSSpecs = _spec.getPinotFSSpecs(); for (PinotFSSpec pinotFSSpec : pinotFSSpecs) { PinotFSFactory.register(pinotFSSpec.getScheme(), pinotFSSpec.getClassName(), new PinotConfiguration(pinotFSSpec)); } //Get list of files to process URI inputDirURI = new URI(_spec.getInputDirURI()); if (inputDirURI.getScheme() == null) { inputDirURI = new File(_spec.getInputDirURI()).toURI(); } PinotFS inputDirFS = PinotFSFactory.create(inputDirURI.getScheme()); List<String> filteredFiles = SegmentGenerationUtils.listMatchedFilesWithRecursiveOption(inputDirFS, inputDirURI, _spec.getIncludeFileNamePattern(), _spec.getExcludeFileNamePattern(), _spec.isSearchRecursively()); LOGGER.info("Found {} files to create Pinot segments!", filteredFiles.size()); //Get outputFS for writing output pinot segments URI outputDirURI = new URI(_spec.getOutputDirURI()); if (outputDirURI.getScheme() == null) { outputDirURI = new File(_spec.getOutputDirURI()).toURI(); } PinotFS outputDirFS = PinotFSFactory.create(outputDirURI.getScheme()); outputDirFS.mkdir(outputDirURI); //Get staging directory for temporary output pinot segments String stagingDir = _spec.getExecutionFrameworkSpec().getExtraConfigs().get(STAGING_DIR); URI stagingDirURI = null; if (stagingDir != null) { stagingDirURI = URI.create(stagingDir); if (stagingDirURI.getScheme() == null) { stagingDirURI = new File(stagingDir).toURI(); } if (!outputDirURI.getScheme().equals(stagingDirURI.getScheme())) { throw new RuntimeException(String .format("The scheme of staging directory URI [%s] and output directory URI [%s] has to be same.", stagingDirURI, outputDirURI)); } outputDirFS.mkdir(stagingDirURI); } try { JavaSparkContext sparkContext = JavaSparkContext.fromSparkContext(SparkContext.getOrCreate()); // Pinot plugins are necessary to launch Pinot ingestion job from every mapper. // In order to ensure pinot plugins would be loaded to each worker, this method // tars entire plugins directory and set this file into Distributed cache. // Then each executor job will untar the plugin tarball, and set system properties accordingly. packPluginsToDistributedCache(sparkContext); // Add dependency jars if (_spec.getExecutionFrameworkSpec().getExtraConfigs().containsKey(DEPS_JAR_DIR)) { addDepsJarToDistributedCache(sparkContext, _spec.getExecutionFrameworkSpec().getExtraConfigs().get(DEPS_JAR_DIR)); } List<String> pathAndIdxList = new ArrayList<>(); if (!SegmentGenerationJobUtils.useGlobalDirectorySequenceId(_spec.getSegmentNameGeneratorSpec())) { Map<String, List<String>> localDirIndex = new HashMap<>(); for (String filteredFile : filteredFiles) { Path filteredParentPath = Paths.get(filteredFile).getParent(); if (!localDirIndex.containsKey(filteredParentPath.toString())) { localDirIndex.put(filteredParentPath.toString(), new ArrayList<>()); } localDirIndex.get(filteredParentPath.toString()).add(filteredFile); } for (String parentPath : localDirIndex.keySet()) { List<String> siblingFiles = localDirIndex.get(parentPath); Collections.sort(siblingFiles); for (int i = 0; i < siblingFiles.size(); i++) { pathAndIdxList.add(String.format("%s %d", siblingFiles.get(i), i)); } } } else { for (int i = 0; i < filteredFiles.size(); i++) { pathAndIdxList.add(String.format("%s %d", filteredFiles.get(i), i)); } } int numDataFiles = pathAndIdxList.size(); int jobParallelism = _spec.getSegmentCreationJobParallelism(); if (jobParallelism <= 0 || jobParallelism > numDataFiles) { jobParallelism = numDataFiles; } JavaRDD<String> pathRDD = sparkContext.parallelize(pathAndIdxList, jobParallelism); final String pluginsInclude = (sparkContext.getConf().contains(PLUGINS_INCLUDE_PROPERTY_NAME)) ? sparkContext.getConf() .get(PLUGINS_INCLUDE_PROPERTY_NAME) : null; final URI finalInputDirURI = inputDirURI; final URI finalOutputDirURI = (stagingDirURI == null) ? outputDirURI : stagingDirURI; // Prevent using lambda expression in Spark to avoid potential serialization exceptions, use inner function // instead. pathRDD.foreach(new VoidFunction<String>() { @Override public void call(String pathAndIdx) throws Exception { PluginManager.get().init(); for (PinotFSSpec pinotFSSpec : _spec.getPinotFSSpecs()) { PinotFSFactory .register(pinotFSSpec.getScheme(), pinotFSSpec.getClassName(), new PinotConfiguration(pinotFSSpec)); } PinotFS finalOutputDirFS = PinotFSFactory.create(finalOutputDirURI.getScheme()); String[] splits = pathAndIdx.split(" "); String path = splits[0]; int idx = Integer.valueOf(splits[1]); // Load Pinot Plugins copied from Distributed cache. File localPluginsTarFile = new File(PINOT_PLUGINS_TAR_GZ); if (localPluginsTarFile.exists()) { File pluginsDirFile = new File(PINOT_PLUGINS_DIR + "-" + idx); try { TarCompressionUtils.untar(localPluginsTarFile, pluginsDirFile); } catch (Exception e) { LOGGER.error("Failed to untar local Pinot plugins tarball file [{}]", localPluginsTarFile, e); throw new RuntimeException(e); } LOGGER.info("Trying to set System Property: [{}={}]", PLUGINS_DIR_PROPERTY_NAME, pluginsDirFile.getAbsolutePath()); System.setProperty(PLUGINS_DIR_PROPERTY_NAME, pluginsDirFile.getAbsolutePath()); if (pluginsInclude != null) { LOGGER.info("Trying to set System Property: [{}={}]", PLUGINS_INCLUDE_PROPERTY_NAME, pluginsInclude); System.setProperty(PLUGINS_INCLUDE_PROPERTY_NAME, pluginsInclude); } LOGGER.info("Pinot plugins System Properties are set at [{}], plugins includes [{}]", System.getProperty(PLUGINS_DIR_PROPERTY_NAME), System.getProperty(PLUGINS_INCLUDE_PROPERTY_NAME)); } else { LOGGER.warn("Cannot find local Pinot plugins tar file at [{}]", localPluginsTarFile.getAbsolutePath()); } URI inputFileURI = URI.create(path); if (inputFileURI.getScheme() == null) { inputFileURI = new URI(finalInputDirURI.getScheme(), inputFileURI.getSchemeSpecificPart(), inputFileURI.getFragment()); } //create localTempDir for input and output File localTempDir = new File(FileUtils.getTempDirectory(), "pinot-" + UUID.randomUUID()); File localInputTempDir = new File(localTempDir, "input"); FileUtils.forceMkdir(localInputTempDir); File localOutputTempDir = new File(localTempDir, "output"); FileUtils.forceMkdir(localOutputTempDir); //copy input path to local File localInputDataFile = new File(localInputTempDir, getFileName(inputFileURI)); LOGGER.info("Trying to copy input file from {} to {}", inputFileURI, localInputDataFile); PinotFSFactory.create(inputFileURI.getScheme()).copyToLocalFile(inputFileURI, localInputDataFile); //create task spec SegmentGenerationTaskSpec taskSpec = new SegmentGenerationTaskSpec(); taskSpec.setInputFilePath(localInputDataFile.getAbsolutePath()); taskSpec.setOutputDirectoryPath(localOutputTempDir.getAbsolutePath()); taskSpec.setRecordReaderSpec(_spec.getRecordReaderSpec()); taskSpec .setSchema(SegmentGenerationUtils.getSchema(_spec.getTableSpec().getSchemaURI(), _spec.getAuthToken())); taskSpec.setTableConfig( SegmentGenerationUtils.getTableConfig(_spec.getTableSpec().getTableConfigURI(), _spec.getAuthToken())); taskSpec.setSequenceId(idx); taskSpec.setSegmentNameGeneratorSpec(_spec.getSegmentNameGeneratorSpec()); taskSpec.setFailOnEmptySegment(_spec.isFailOnEmptySegment()); taskSpec.setCreateMetadataTarGz(_spec.isCreateMetadataTarGz()); taskSpec.setCustomProperty(BatchConfigProperties.INPUT_DATA_FILE_URI_KEY, inputFileURI.toString()); SegmentGenerationTaskRunner taskRunner = new SegmentGenerationTaskRunner(taskSpec); String segmentName = taskRunner.run(); // Tar segment directory to compress file File localSegmentDir = new File(localOutputTempDir, segmentName); String segmentTarFileName = URIUtils.encode(segmentName + Constants.TAR_GZ_FILE_EXT); File localSegmentTarFile = new File(localOutputTempDir, segmentTarFileName); LOGGER.info("Tarring segment from: {} to: {}", localSegmentDir, localSegmentTarFile); TarCompressionUtils.createCompressedTarFile(localSegmentDir, localSegmentTarFile); long uncompressedSegmentSize = FileUtils.sizeOf(localSegmentDir); long compressedSegmentSize = FileUtils.sizeOf(localSegmentTarFile); LOGGER.info("Size for segment: {}, uncompressed: {}, compressed: {}", segmentName, DataSizeUtils.fromBytes(uncompressedSegmentSize), DataSizeUtils.fromBytes(compressedSegmentSize)); // Move segment to output PinotFS URI relativeOutputPath = SegmentGenerationUtils.getRelativeOutputPath(finalInputDirURI, inputFileURI, finalOutputDirURI); URI outputSegmentTarURI = relativeOutputPath.resolve(segmentTarFileName); SegmentGenerationJobUtils.moveLocalTarFileToRemote(localSegmentTarFile, outputSegmentTarURI, _spec.isOverwriteOutput()); // Create and upload segment metadata tar file String metadataTarFileName = URIUtils.encode(segmentName + Constants.METADATA_TAR_GZ_FILE_EXT); URI outputMetadataTarURI = relativeOutputPath.resolve(metadataTarFileName); if (finalOutputDirFS.exists(outputMetadataTarURI) && (_spec.isOverwriteOutput() || !_spec.isCreateMetadataTarGz())) { LOGGER.info("Deleting existing metadata tar gz file: {}", outputMetadataTarURI); finalOutputDirFS.delete(outputMetadataTarURI, true); } if (taskSpec.isCreateMetadataTarGz()) { File localMetadataTarFile = new File(localOutputTempDir, metadataTarFileName); SegmentGenerationJobUtils.createSegmentMetadataTarGz(localSegmentDir, localMetadataTarFile); SegmentGenerationJobUtils.moveLocalTarFileToRemote(localMetadataTarFile, outputMetadataTarURI, _spec.isOverwriteOutput()); } FileUtils.deleteQuietly(localSegmentDir); FileUtils.deleteQuietly(localInputDataFile); } }); if (stagingDirURI != null) { LOGGER.info("Trying to copy segment tars from staging directory: [{}] to output directory [{}]", stagingDirURI, outputDirURI); outputDirFS.copyDir(stagingDirURI, outputDirURI); } } finally { if (stagingDirURI != null) { LOGGER.info("Trying to clean up staging directory: [{}]", stagingDirURI); outputDirFS.delete(stagingDirURI, true); } } }
@Test public void testSegmentGeneration() throws Exception { // TODO use common resource definitions & code shared with Hadoop unit test. // So probably need a pinot-batch-ingestion-common tests jar that we depend on. File testDir = Files.createTempDirectory("testSegmentGeneration-").toFile(); testDir.delete(); testDir.mkdirs(); File inputDir = new File(testDir, "input"); inputDir.mkdirs(); File inputFile = new File(inputDir, "input.csv"); FileUtils.writeLines(inputFile, Lists.newArrayList("col1,col2", "value1,1", "value2,2")); // Create an output directory, with two empty files in it. One we'll overwrite, // and one we'll leave alone. final String outputFilename = "myTable_OFFLINE_0.tar.gz"; final String existingFilename = "myTable_OFFLINE_100.tar.gz"; File outputDir = new File(testDir, "output"); FileUtils.touch(new File(outputDir, outputFilename)); FileUtils.touch(new File(outputDir, existingFilename)); // Set up schema file. final String schemaName = "myTable"; File schemaFile = new File(testDir, "myTable.schema"); Schema schema = new SchemaBuilder() .setSchemaName(schemaName) .addSingleValueDimension("col1", DataType.STRING) .addMetric("col2", DataType.INT) .build(); FileUtils.write(schemaFile, schema.toPrettyJsonString(), StandardCharsets.UTF_8); // Set up table config file. File tableConfigFile = new File(testDir, "myTable.table"); TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE) .setTableName("myTable") .setNumReplicas(1) .build(); FileUtils.write(tableConfigFile, tableConfig.toJsonString(), StandardCharsets.UTF_8); SegmentGenerationJobSpec jobSpec = new SegmentGenerationJobSpec(); jobSpec.setJobType("SegmentCreation"); jobSpec.setInputDirURI(inputDir.toURI().toString()); jobSpec.setOutputDirURI(outputDir.toURI().toString()); jobSpec.setOverwriteOutput(false); RecordReaderSpec recordReaderSpec = new RecordReaderSpec(); recordReaderSpec.setDataFormat("csv"); recordReaderSpec.setClassName(CSVRecordReader.class.getName()); recordReaderSpec.setConfigClassName(CSVRecordReaderConfig.class.getName()); jobSpec.setRecordReaderSpec(recordReaderSpec); TableSpec tableSpec = new TableSpec(); tableSpec.setTableName("myTable"); tableSpec.setSchemaURI(schemaFile.toURI().toString()); tableSpec.setTableConfigURI(tableConfigFile.toURI().toString()); jobSpec.setTableSpec(tableSpec); ExecutionFrameworkSpec efSpec = new ExecutionFrameworkSpec(); efSpec.setName("standalone"); efSpec.setSegmentGenerationJobRunnerClassName(SparkSegmentGenerationJobRunner.class.getName()); jobSpec.setExecutionFrameworkSpec(efSpec); PinotFSSpec pfsSpec = new PinotFSSpec(); pfsSpec.setScheme("file"); pfsSpec.setClassName(LocalPinotFS.class.getName()); jobSpec.setPinotFSSpecs(Collections.singletonList(pfsSpec)); SparkSegmentGenerationJobRunner jobRunner = new SparkSegmentGenerationJobRunner(jobSpec); jobRunner.run(); // The output directory should still have the original file in it. File oldSegmentFile = new File(outputDir, existingFilename); Assert.assertTrue(oldSegmentFile.exists()); // The output directory should have the original file in it (since we aren't overwriting) File newSegmentFile = new File(outputDir, outputFilename); Assert.assertTrue(newSegmentFile.exists()); Assert.assertTrue(newSegmentFile.isFile()); Assert.assertTrue(newSegmentFile.length() == 0); // Now run again, but this time with overwriting of output files, and confirm we got a valid segment file. jobSpec.setOverwriteOutput(true); jobRunner = new SparkSegmentGenerationJobRunner(jobSpec); jobRunner.run(); // The original file should still be there. Assert.assertTrue(oldSegmentFile.exists()); Assert.assertTrue(newSegmentFile.exists()); Assert.assertTrue(newSegmentFile.isFile()); Assert.assertTrue(newSegmentFile.length() > 0); // FUTURE - validate contents of file? }
public static boolean isVariableNameValid( String source ) { return checkVariableName( source ).isEmpty(); }
@Test void variableNameWithValidCharactersHorseEmoji() { String var = "🐎"; assertThat(FEELParser.isVariableNameValid(var)).isEqualTo(true); }
public static String escapeSpecialCharacter(String command) { StringBuilder builder = new StringBuilder(); for (char c : command.toCharArray()) { if (SPECIAL_CHARACTER.indexOf(c) != -1) { builder.append("\\"); } builder.append(c); } return builder.toString(); }
@Test public void testEscapeSpecialCharacters() { String cmd = "{}."; assertEquals("\\{\\}\\.", InterpreterLauncher.escapeSpecialCharacter(cmd)); }
static Optional<Object> getValueFromPMMLResultByVariableName(final String variableName, final Map<String, Object> resultsVariables) { return Optional.ofNullable(resultsVariables.get(variableName)); }
@Test void getValueFromPMMLResultByVariableName() { final String variableName = "variableName"; final Map<String, Object> resultsVariables = new HashMap<>(); Optional<Object> retrieved = KiePMMLOutputField.getValueFromPMMLResultByVariableName(variableName, resultsVariables); assertThat(retrieved).isNotPresent(); final Object variableValue = 243.94; resultsVariables.put(variableName, variableValue); retrieved = KiePMMLOutputField.getValueFromPMMLResultByVariableName(variableName, resultsVariables); assertThat(retrieved).isPresent(); assertThat(retrieved.get()).isEqualTo(variableValue); }
static Map<String, String> toMap(List<Settings.Setting> settingsList) { Map<String, String> result = new LinkedHashMap<>(); for (Settings.Setting s : settingsList) { // we need the "*.file.suffixes" and "*.file.patterns" properties for language detection // see DefaultLanguagesRepository.populateFileSuffixesAndPatterns() if (!s.getInherited() || s.getKey().endsWith(".file.suffixes") || s.getKey().endsWith(".file.patterns")) { switch (s.getValueOneOfCase()) { case VALUE: result.put(s.getKey(), s.getValue()); break; case VALUES: result.put(s.getKey(), s.getValues().getValuesList().stream().map(StringEscapeUtils::escapeCsv).collect(Collectors.joining(","))); break; case FIELDVALUES: convertPropertySetToProps(result, s); break; default: if (!s.getKey().endsWith(".secured")) { throw new IllegalStateException("Unknown property value for " + s.getKey()); } } } } return result; }
@Test public void should_throw_exception_when_no_value_of_non_secured_settings() { Setting setting = Setting.newBuilder().setKey("sonar.open.setting").build(); List<Setting> singletonList = singletonList(setting); assertThatThrownBy(() -> AbstractSettingsLoader.toMap(singletonList)) .isInstanceOf(IllegalStateException.class) .hasMessage("Unknown property value for sonar.open.setting"); }
public String toJsonString(Object object) { return String.valueOf(toJson(object)); }
@Test public void testJsonNode() throws IOException { JsonNode node = JsonUtils.stringToJsonNode("{\"key\":\"VALUE\",\"my.secret\":\"SECRET\"}"); String output = _obfuscator.toJsonString(node); Assert.assertTrue(output.contains(VALUE)); Assert.assertFalse(output.contains(SECRET)); }