focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static SharedScheduledExecutorService getSingleThreadExecutor() { setup(); return singleThreadExecutor; }
@Test public void singleThread() { ScheduledExecutorService a = SharedScheduledExecutors.getSingleThreadExecutor(); assertNotNull("ScheduledExecutorService must not be null", a); ScheduledExecutorService b = SharedScheduledExecutors.getSingleThreadExecutor(); assertSame("factories should be same", a, b); }
@Override public Duration convertFrom(String value) { // ISO8601 format if (value.startsWith("P")) { final Period period = Period.parse(value); return Duration.parse(period.toString()); } // number + unit formats final com.github.joschi.jadconfig.util.Duration jadDuration = com.github.joschi.jadconfig.util.Duration.parse(value); final ChronoUnit chronoUnit = toChronoUnit(jadDuration.getUnit()); return Duration.of(jadDuration.getQuantity(), chronoUnit); }
@Test public void convertFrom() { assertThat(converter.convertFrom("10ms")).isEqualTo(Duration.ofMillis(10)); assertThat(converter.convertFrom("10s")).isEqualTo(Duration.ofSeconds(10)); assertThat(converter.convertFrom("PT0.01S")).isEqualTo(Duration.ofMillis(10)); assertThat(converter.convertFrom("PT10S")).isEqualTo(Duration.ofSeconds(10)); }
public static long readInt64(ByteBuffer buf) throws BufferUnderflowException { return buf.order(ByteOrder.LITTLE_ENDIAN).getLong(); }
@Test(expected = ArrayIndexOutOfBoundsException.class) public void testReadInt64ThrowsException1() { ByteUtils.readInt64(new byte[]{1, 2, 3, 4, 5, 6, 7}, 2); }
public RouteContext route(final ConnectionContext connectionContext, final QueryContext queryContext, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database) { SQLRouteExecutor executor = isNeedAllSchemas(queryContext.getSqlStatementContext().getSqlStatement()) ? new AllSQLRouteExecutor() : new PartialSQLRouteExecutor(rules, props); return executor.route(connectionContext, queryContext, globalRuleMetaData, database); }
@Test void assertRouteSuccess() { ConnectionContext connectionContext = mock(ConnectionContext.class); when(connectionContext.getCurrentDatabaseName()).thenReturn(Optional.of("logic_schema")); ShardingSphereMetaData metaData = mock(ShardingSphereMetaData.class); RuleMetaData ruleMetaData = new RuleMetaData(Collections.singleton(new RouteRuleFixture())); ShardingSphereDatabase database = new ShardingSphereDatabase("logic_schema", mock(DatabaseType.class), mock(ResourceMetaData.class, RETURNS_DEEP_STUBS), ruleMetaData, Collections.emptyMap()); when(metaData.containsDatabase("logic_schema")).thenReturn(true); when(metaData.getDatabase("logic_schema")).thenReturn(database); QueryContext queryContext = new QueryContext(mock(CommonSQLStatementContext.class), "SELECT 1", Collections.emptyList(), new HintValueContext(), connectionContext, metaData); SQLRouteEngine sqlRouteEngine = new SQLRouteEngine(Collections.singleton(new RouteRuleFixture()), new ConfigurationProperties(new Properties())); RouteContext actual = sqlRouteEngine.route(new ConnectionContext(Collections::emptySet), queryContext, mock(RuleMetaData.class), database); assertThat(actual.getRouteUnits().size(), is(1)); RouteUnit routeUnit = actual.getRouteUnits().iterator().next(); assertThat(routeUnit.getDataSourceMapper().getLogicName(), is("ds")); assertThat(routeUnit.getDataSourceMapper().getActualName(), is("ds_0")); assertTrue(routeUnit.getTableMappers().isEmpty()); }
@Override public ResultSet getSuperTables(final String catalog, final String schemaPattern, final String tableNamePattern) throws SQLException { return createDatabaseMetaDataResultSet(getDatabaseMetaData().getSuperTables(getActualCatalog(catalog), getActualSchema(schemaPattern), getActualTableNamePattern(tableNamePattern))); }
@Test void assertGetSuperTables() throws SQLException { when(databaseMetaData.getSuperTables("test", null, null)).thenReturn(resultSet); assertThat(shardingSphereDatabaseMetaData.getSuperTables("test", null, null), instanceOf(DatabaseMetaDataResultSet.class)); }
@Override public void start() { configuration.get(PROPERTY_SONAR_CE_WORKER_COUNT) .ifPresent(workerCount -> LOG.warn("Property {} is not supported anymore and will be ignored." + " Remove it from sonar.properties to remove this warning.", PROPERTY_SONAR_CE_WORKER_COUNT)); }
@Test public void start_logs_a_warning_if_property_ceWorkerCount_exists_with_a_value() { settings.setProperty(PROPERTY_SONAR_CE_WORKER_COUNT, randomAlphabetic(12)); underTest.start(); verifyWarnMessage(); }
@Override public void v(String tag, String message, Object... args) { Log.v(tag, formatString(message, args)); }
@Test public void infoLoggedCorrectly() { String expectedMessage = "Hello World"; logger.v(tag, "Hello %s", "World"); assertLogged(VERBOSE, tag, expectedMessage, null); }
@Override protected @UnknownKeyFor @NonNull @Initialized SchemaTransform from( JdbcWriteSchemaTransformConfiguration configuration) { configuration.validate(); return new JdbcWriteSchemaTransform(configuration); }
@Test public void testWriteToTable() throws SQLException { JdbcWriteSchemaTransformProvider provider = null; for (SchemaTransformProvider p : ServiceLoader.load(SchemaTransformProvider.class)) { if (p instanceof JdbcWriteSchemaTransformProvider) { provider = (JdbcWriteSchemaTransformProvider) p; break; } } assertNotNull(provider); Schema schema = Schema.of( Schema.Field.of("id", Schema.FieldType.INT64), Schema.Field.of("name", Schema.FieldType.STRING)); List<Row> rows = ImmutableList.of( Row.withSchema(schema).attachValues(1L, "name1"), Row.withSchema(schema).attachValues(2L, "name2")); PCollectionRowTuple.of("input", pipeline.apply(Create.of(rows).withRowSchema(schema))) .apply( provider.from( JdbcWriteSchemaTransformProvider.JdbcWriteSchemaTransformConfiguration.builder() .setDriverClassName(DATA_SOURCE_CONFIGURATION.getDriverClassName().get()) .setJdbcUrl(DATA_SOURCE_CONFIGURATION.getUrl().get()) .setLocation(writeTableName) .build())); pipeline.run(); DatabaseTestHelper.assertRowCount(DATA_SOURCE, writeTableName, 2); }
@Override public Path copy(final Path source, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException { return copy(source, segmentService.list(source), target, status, callback, listener); }
@Test public void testCopyLargeObjectDifferentBucket() throws Exception { final Path container = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); container.attributes().setRegion("IAD"); final Path originFolder = new Path(container, UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory)); final Path sourceFile = new Path(originFolder, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final SwiftRegionService regionService = new SwiftRegionService(session); final SwiftSegmentService segmentService = new SwiftSegmentService(session, ".segments-test/"); prepareFile(sourceFile, regionService, segmentService); final SwiftFindFeature findFeature = new SwiftFindFeature(session); assertTrue(findFeature.find(sourceFile)); final Path targetBucket = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); targetBucket.attributes().setRegion("IAD"); final Path targetFolder = new Path(targetBucket, UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory)); final Path targetFile = new Path(targetFolder, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final SwiftObjectListService listService = new SwiftObjectListService(session, regionService); final Path copiedFile = new SwiftLargeObjectCopyFeature(session, regionService, segmentService) .copy(sourceFile, targetFile, new TransferStatus(), new DisabledConnectionCallback(), new DisabledStreamListener()); // copied file exists assertTrue(findFeature.find(copiedFile)); final List<Path> targetSegments = segmentService.list(targetFile); // delete source, without deleting copy new SwiftDeleteFeature(session, segmentService, regionService).delete( Collections.singletonMap(sourceFile, new TransferStatus()), new DisabledPasswordCallback(), new Delete.DisabledCallback(), true); assertFalse(findFeature.find(sourceFile)); assertTrue(targetSegments.stream().allMatch(p -> { try { return findFeature.find(p); } catch(BackgroundException e) { e.printStackTrace(); return false; } })); new SwiftDeleteFeature(session, segmentService, regionService).delete( Collections.singletonMap(copiedFile, new TransferStatus()), new DisabledPasswordCallback(), new Delete.DisabledCallback(), true); assertFalse(findFeature.find(copiedFile)); }
@Override public void processElement(StreamRecord<RowData> element) throws Exception { RowData inputRow = element.getValue(); long timestamp; if (windowAssigner.isEventTime()) { if (inputRow.isNullAt(rowtimeIndex)) { // null timestamp would be dropped numNullRowTimeRecordsDropped.inc(); return; } timestamp = inputRow.getTimestamp(rowtimeIndex, 3).getMillisecond(); } else { timestamp = getProcessingTimeService().getCurrentProcessingTime(); } timestamp = toUtcTimestampMills(timestamp, shiftTimeZone); Collection<TimeWindow> elementWindows = windowAssigner.assignWindows(inputRow, timestamp); collect(inputRow, elementWindows); }
@Test public void testCumulativeWindows() throws Exception { final CumulativeWindowAssigner assigner = CumulativeWindowAssigner.of(Duration.ofSeconds(3), Duration.ofSeconds(1)); OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = createTestHarness(assigner, shiftTimeZone); testHarness.setup(OUT_SERIALIZER); testHarness.open(); // process elements ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>(); testHarness.processElement(insertRecord("key1", 1, 20L)); testHarness.processElement(insertRecord("key2", 1, 3999L)); testHarness.processWatermark(new Watermark(999)); // append 3 fields: window_start, window_end, window_time expectedOutput.add(insertRecord("key1", 1, 20L, localMills(0), localMills(1000L), 999L)); expectedOutput.add(insertRecord("key1", 1, 20L, localMills(0), localMills(2000L), 1999L)); expectedOutput.add(insertRecord("key1", 1, 20L, localMills(0L), localMills(3000L), 2999L)); expectedOutput.add( insertRecord("key2", 1, 3999L, localMills(3000L), localMills(4000L), 3999L)); expectedOutput.add( insertRecord("key2", 1, 3999L, localMills(3000L), localMills(5000L), 4999L)); expectedOutput.add( insertRecord("key2", 1, 3999L, localMills(3000L), localMills(6000L), 5999L)); expectedOutput.add(new Watermark(999)); ASSERTER.assertOutputEqualsSorted( "Output was not correct.", expectedOutput, testHarness.getOutput()); // late element would not be dropped testHarness.processElement(insertRecord("key2", 1, 80L)); expectedOutput.add(insertRecord("key2", 1, 80L, localMills(0), localMills(1000L), 999L)); expectedOutput.add(insertRecord("key2", 1, 80L, localMills(0), localMills(2000L), 1999L)); expectedOutput.add(insertRecord("key2", 1, 80L, localMills(0L), localMills(3000L), 2999L)); ASSERTER.assertOutputEqualsSorted( "Output was not correct.", expectedOutput, testHarness.getOutput()); testHarness.close(); }
public String selfHealth() { return memberManager.getSelf().getState().name(); }
@Test void testSelfHealth() { Member member = new Member(); member.setIp("1.1.1.1"); member.setPort(8848); member.setState(NodeState.UP); when(serverMemberManager.getSelf()).thenReturn(member); String health = nacosClusterOperationService.selfHealth(); assertEquals(NodeState.UP.name(), health); }
@Override public void expand( ExpansionApi.ExpansionRequest request, StreamObserver<ExpansionApi.ExpansionResponse> responseObserver) { if (!checkedAllServices) { try { waitForAllServicesToBeReady(); } catch (TimeoutException e) { throw new RuntimeException(e); } checkedAllServices = true; } try { responseObserver.onNext(processExpand(request)); responseObserver.onCompleted(); } catch (RuntimeException exn) { responseObserver.onNext( ExpansionApi.ExpansionResponse.newBuilder() .setError(Throwables.getStackTraceAsString(exn)) .build()); responseObserver.onCompleted(); } }
@Test public void testExpandFirstEndpoint() { ExpansionServiceClient expansionServiceClient = Mockito.mock(ExpansionServiceClient.class); Mockito.when(clientFactory.getExpansionServiceClient(Mockito.any())) .thenReturn(expansionServiceClient); Mockito.when(expansionServiceClient.expand(Mockito.any())) .thenReturn( ExpansionResponse.newBuilder() .setTransform( PTransform.newBuilder() .setSpec(FunctionSpec.newBuilder().setUrn("dummy_urn_1"))) .build()); ExpansionRequest request = ExpansionRequest.newBuilder().build(); StreamObserver<ExpansionResponse> responseObserver = Mockito.mock(StreamObserver.class); expansionService.expand(request, responseObserver); Mockito.verify(expansionServiceClient, Mockito.times(1)).expand(request); ArgumentCaptor<ExpansionResponse> expansionResponseCapture = ArgumentCaptor.forClass(ExpansionResponse.class); Mockito.verify(responseObserver).onNext(expansionResponseCapture.capture()); assertEquals( "dummy_urn_1", expansionResponseCapture.getValue().getTransform().getSpec().getUrn()); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { return this.list(directory, listener, String.valueOf(Path.DELIMITER)); }
@Test public void testListPlaceholderPlusCharacter() throws Exception { final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final S3AccessControlListFeature acl = new S3AccessControlListFeature(session); final Path placeholder = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir( new Path(container, String.format("test+%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory)), new TransferStatus()); assertTrue(new S3ObjectListService(session, acl).list(container, new DisabledListProgressListener()).contains(placeholder)); assertTrue(new S3ObjectListService(session, acl).list(placeholder, new DisabledListProgressListener()).isEmpty()); new S3DefaultDeleteFeature(session).delete(Collections.singletonList(placeholder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public void unregisterSuperProperty(String superPropertyName) { }
@Test public void unregisterSuperProperty() { JSONObject jsonObject = new JSONObject(); try { jsonObject.put("super", "super"); } catch (JSONException e) { e.printStackTrace(); } mSensorsAPI.registerSuperProperties(jsonObject); mSensorsAPI.unregisterSuperProperty("super"); Assert.assertEquals(0, mSensorsAPI.getSuperProperties().length()); }
@Override public String getFileId(final DriveItem.Metadata metadata) { final ItemReference parent = metadata.getParentReference(); if(metadata.getRemoteItem() != null) { final DriveItem.Metadata remoteMetadata = metadata.getRemoteItem(); final ItemReference remoteParent = remoteMetadata.getParentReference(); if(parent == null) { return String.join(String.valueOf(Path.DELIMITER), remoteParent.getDriveId(), remoteMetadata.getId()); } else { return String.join(String.valueOf(Path.DELIMITER), parent.getDriveId(), metadata.getId(), remoteParent.getDriveId(), remoteMetadata.getId()); } } else { return String.join(String.valueOf(Path.DELIMITER), parent.getDriveId(), metadata.getId()); } }
@Test public void testParentReferenceFileIdUnknownId() throws Exception { final DriveItem.Metadata metadata; try (final InputStream test = getClass().getResourceAsStream("/ParentReferenceFileIdUnknownId.json")) { final InputStreamReader reader = new InputStreamReader(test); metadata = DriveItem.parseJson(session.getClient(), (JsonObject) Json.parse(reader)); } assertEquals("ParentDriveId/MyId", session.getFileId(metadata)); }
public static void multiply256(Slice left, Slice right, Slice result) { checkArgument(result.length() >= NUMBER_OF_LONGS * Long.BYTES * 2); long l0 = getInt(left, 0) & LONG_MASK; long l1 = getInt(left, 1) & LONG_MASK; long l2 = getInt(left, 2) & LONG_MASK; long l3 = getInt(left, 3) & LONG_MASK; long r0 = getInt(right, 0) & LONG_MASK; long r1 = getInt(right, 1) & LONG_MASK; long r2 = getInt(right, 2) & LONG_MASK; long r3 = getInt(right, 3) & LONG_MASK; long z0 = 0; long z1 = 0; long z2 = 0; long z3 = 0; long z4 = 0; long z5 = 0; long z6 = 0; long z7 = 0; if (l0 != 0) { long accumulator = r0 * l0; z0 = accumulator & LONG_MASK; accumulator = (accumulator >>> 32) + r1 * l0; z1 = accumulator & LONG_MASK; accumulator = (accumulator >>> 32) + r2 * l0; z2 = accumulator & LONG_MASK; accumulator = (accumulator >>> 32) + r3 * l0; z3 = accumulator & LONG_MASK; z4 = (accumulator >>> 32) & LONG_MASK; } if (l1 != 0) { long accumulator = r0 * l1 + z1; z1 = accumulator & LONG_MASK; accumulator = (accumulator >>> 32) + r1 * l1 + z2; z2 = accumulator & LONG_MASK; accumulator = (accumulator >>> 32) + r2 * l1 + z3; z3 = accumulator & LONG_MASK; accumulator = (accumulator >>> 32) + r3 * l1 + z4; z4 = accumulator & LONG_MASK; z5 = (accumulator >>> 32) & LONG_MASK; } if (l2 != 0) { long accumulator = r0 * l2 + z2; z2 = accumulator & LONG_MASK; accumulator = (accumulator >>> 32) + r1 * l2 + z3; z3 = accumulator & LONG_MASK; accumulator = (accumulator >>> 32) + r2 * l2 + z4; z4 = accumulator & LONG_MASK; accumulator = (accumulator >>> 32) + r3 * l2 + z5; z5 = accumulator & LONG_MASK; z6 = (accumulator >>> 32) & LONG_MASK; } if (l3 != 0) { long accumulator = r0 * l3 + z3; z3 = accumulator & LONG_MASK; accumulator = (accumulator >>> 32) + r1 * l3 + z4; z4 = accumulator & LONG_MASK; accumulator = (accumulator >>> 32) + r2 * l3 + z5; z5 = accumulator & LONG_MASK; accumulator = (accumulator >>> 32) + r3 * l3 + z6; z6 = accumulator & LONG_MASK; z7 = (accumulator >>> 32) & LONG_MASK; } setRawInt(result, 0, (int) z0); setRawInt(result, 1, (int) z1); setRawInt(result, 2, (int) z2); setRawInt(result, 3, (int) z3); setRawInt(result, 4, (int) z4); setRawInt(result, 5, (int) z5); setRawInt(result, 6, (int) z6); setRawInt(result, 7, (int) z7); }
@Test public void testMultiply256() { assertMultiply256(MAX_DECIMAL, MAX_DECIMAL, wrappedLongArray(0xECEBBB8000000001L, 0xE0FF0CA0BC87870BL, 0x0764B4ABE8652978L, 0x161BCCA7119915B5L)); assertMultiply256(MIN_DECIMAL, MIN_DECIMAL, wrappedLongArray(0xECEBBB8000000001L, 0xE0FF0CA0BC87870BL, 0x0764B4ABE8652978L, 0x161BCCA7119915B5L)); assertMultiply256(wrappedLongArray(0xFFFFFFFFFFFFFFFFL, 0x0FFFFFFFFFFFFFFFL), wrappedLongArray(0xFFFFFFFFFFFFFFFFL, 0x0FFFFFFFFFFFFFFFL), wrappedLongArray(0x0000000000000001L, 0xE000000000000000L, 0xFFFFFFFFFFFFFFFFL, 0x00FFFFFFFFFFFFFFL)); assertMultiply256(wrappedLongArray(0x1234567890ABCDEFL, 0x0EDCBA0987654321L), wrappedLongArray(0xFEDCBA0987654321L, 0x1234567890ABCDEL), wrappedLongArray(0xC24A442FE55618CFL, 0xAA71A60D0DA49DDAL, 0x7C163D5A13DF8695L, 0x0010E8EEF9BD1294L)); }
@Delete(uri = "{namespace}/{id}") @ExecuteOn(TaskExecutors.IO) @Operation(tags = {"Flows"}, summary = "Delete a flow") @ApiResponse(responseCode = "204", description = "On success") public HttpResponse<Void> delete( @Parameter(description = "The flow namespace") @PathVariable String namespace, @Parameter(description = "The flow id") @PathVariable String id ) { Optional<Flow> flow = flowRepository.findById(tenantService.resolveTenant(), namespace, id); if (flow.isPresent()) { flowRepository.delete(flow.get()); return HttpResponse.status(HttpStatus.NO_CONTENT); } else { return HttpResponse.status(HttpStatus.NOT_FOUND); } }
@Test void importFlowsWithYaml() throws IOException { var yaml = generateFlowAsString("io.kestra.unittest","a") + "---" + generateFlowAsString("io.kestra.unittest","b") + "---" + generateFlowAsString("io.kestra.unittest","c"); var temp = File.createTempFile("flows", ".yaml"); Files.writeString(temp.toPath(), yaml); var body = MultipartBody.builder() .addPart("fileUpload", "flows.yaml", temp) .build(); var response = client.toBlocking().exchange(POST("/api/v1/flows/import", body).contentType(MediaType.MULTIPART_FORM_DATA)); assertThat(response.getStatus(), is(NO_CONTENT)); temp.delete(); }
@VisibleForTesting Entity exportNativeEntity(PipelineDao pipelineDao, EntityDescriptorIds entityDescriptorIds) { final Set<ValueReference> connectedStreams = connectedStreams(pipelineDao.id(), entityDescriptorIds); final PipelineEntity pipelineEntity = PipelineEntity.create( ValueReference.of(pipelineDao.title()), ValueReference.of(pipelineDao.description()), ValueReference.of(pipelineDao.source()), connectedStreams); final JsonNode data = objectMapper.convertValue(pipelineEntity, JsonNode.class); return EntityV1.builder() .id(ModelId.of(entityDescriptorIds.getOrThrow(pipelineDao.id(), ModelTypes.PIPELINE_V1))) .type(ModelTypes.PIPELINE_V1) .data(data) .build(); }
@Test @MongoDBFixtures("PipelineFacadeTest/pipelines.json") public void exportNativeEntity() { final EntityDescriptor descriptor = EntityDescriptor.create("5a85c4854b900afd5d662be3", ModelTypes.PIPELINE_V1); final EntityDescriptor streamDescriptor = EntityDescriptor.create("5adf23894b900a0fdb4e517d", ModelTypes.STREAM_V1); final EntityDescriptorIds entityDescriptorIds = EntityDescriptorIds.of(descriptor, streamDescriptor); final Entity entity = facade.exportEntity(descriptor, entityDescriptorIds).orElseThrow(AssertionError::new); assertThat(entity).isInstanceOf(EntityV1.class); assertThat(entity.id()).isEqualTo(ModelId.of(entityDescriptorIds.get(descriptor).orElse(null))); assertThat(entity.type()).isEqualTo(ModelTypes.PIPELINE_V1); final EntityV1 entityV1 = (EntityV1) entity; final PipelineEntity pipelineEntity = objectMapper.convertValue(entityV1.data(), PipelineEntity.class); assertThat(pipelineEntity.title()).isEqualTo(ValueReference.of("Test")); assertThat(pipelineEntity.description()).isEqualTo(ValueReference.of("Description")); assertThat(pipelineEntity.source().asString(Collections.emptyMap())).startsWith("pipeline \"Test\""); assertThat(pipelineEntity.connectedStreams()).containsOnly(ValueReference.of(entityDescriptorIds.get(streamDescriptor).orElse(null))); }
@Override public V remove(Object key) { if (key instanceof CharSequence) { CharSequenceWrapper wrapper = WRAPPERS.get(); V result = wrapperMap.remove(wrapper.set((CharSequence) key)); wrapper.set(null); // don't hold a reference to the value return result; } return null; }
@Test public void testRemove() { CharSequenceMap<String> map = CharSequenceMap.create(); map.put("key1", "value1"); map.remove(new StringBuilder("key1")); assertThat(map).doesNotContainKey("key1"); assertThat(map).isEmpty(); }
public static Expression substituteBoolExpression(ConfigVariableExpander cve, Expression expression) { try { if (expression instanceof BinaryBooleanExpression) { BinaryBooleanExpression binaryBoolExp = (BinaryBooleanExpression) expression; Expression substitutedLeftExp = substituteBoolExpression(cve, binaryBoolExp.getLeft()); Expression substitutedRightExp = substituteBoolExpression(cve, binaryBoolExp.getRight()); if (substitutedLeftExp != binaryBoolExp.getLeft() || substitutedRightExp != binaryBoolExp.getRight()) { Constructor<? extends BinaryBooleanExpression> constructor = binaryBoolExp.getClass().getConstructor(SourceWithMetadata.class, Expression.class, Expression.class); return constructor.newInstance(binaryBoolExp.getSourceWithMetadata(), substitutedLeftExp, substitutedRightExp); } } else if (expression instanceof UnaryBooleanExpression) { UnaryBooleanExpression unaryBoolExp = (UnaryBooleanExpression) expression; Expression substitutedExp = substituteBoolExpression(cve, unaryBoolExp.getExpression()); if (substitutedExp != unaryBoolExp.getExpression()) { Constructor<? extends UnaryBooleanExpression> constructor = unaryBoolExp.getClass().getConstructor(SourceWithMetadata.class, Expression.class); return constructor.newInstance(unaryBoolExp.getSourceWithMetadata(), substitutedExp); } } else if (expression instanceof ValueExpression && !(expression instanceof RegexValueExpression) && (((ValueExpression) expression).get() != null)) { Object expanded = CompiledPipeline.expandConfigVariableKeepingSecrets(cve, ((ValueExpression) expression).get()); return new ValueExpression(expression.getSourceWithMetadata(), expanded); } return expression; } catch (NoSuchMethodException | InstantiationException | IllegalAccessException | InvocationTargetException | InvalidIRException e) { throw new IllegalStateException("Unable to instantiate substituted condition expression", e); } }
@Test public void nestedBinaryBooleanSubstitution() throws InvalidIRException { SourceWithMetadata swm = new SourceWithMetadata("proto", "path", 1, 8, "if \"${SMALL}\" == 1 and 100 == \"${BIG}\" or [version] == 1 { add_tag => \"pass\" }"); ValueExpression smallLeft = new ValueExpression(swm, "${SMALL}"); ValueExpression smallRight = new ValueExpression(swm, "1"); BinaryBooleanExpression smallExp = new Eq(swm, smallLeft, smallRight); ValueExpression bigLeft = new ValueExpression(swm, "100"); ValueExpression bigRight = new ValueExpression(swm, "${BIG}"); BinaryBooleanExpression bigExp = new Eq(swm, bigLeft, bigRight); EventValueExpression versionLeft = new EventValueExpression(swm, "version"); ValueExpression versionRight = new ValueExpression(swm, "1"); BinaryBooleanExpression versionExp = new Eq(swm, versionLeft, versionRight); And andExp = new And(swm, smallExp, bigExp); Or orExp = new Or(swm, andExp, versionExp); ConfigVariableExpander cve = getConfigVariableExpander(); BinaryBooleanExpression substituted = (BinaryBooleanExpression) ExpressionSubstitution.substituteBoolExpression(cve, orExp); And subAnd = (And) substituted.getLeft(); BinaryBooleanExpression subSmallExp = (BinaryBooleanExpression) subAnd.getLeft(); BinaryBooleanExpression subBigExp = (BinaryBooleanExpression) subAnd.getRight(); assertEquals(((ValueExpression) subSmallExp.getLeft()).get(), "1"); assertEquals(((ValueExpression) subBigExp.getRight()).get(), "100"); }
static ContainerPopulator<TaskContainer> newContainerPopulator(CeTask task) { return taskContainer -> { taskContainer.add(task); taskContainer.add(IgnoreOrphanBranchStep.class); taskContainer.add(IndexIssuesStep.class); taskContainer.add(new SyncComputationSteps(taskContainer)); taskContainer.add(ComputationStepExecutor.class); }; }
@Test public void newContainerPopulator() { CeTask task = new CeTask.Builder() .setUuid("TASK_UUID") .setType("Type") .build(); IssueSyncTaskProcessor.newContainerPopulator(task).populateContainer(container); Mockito.verify(container, Mockito.times(5)).add(any()); }
@Override public void addTrackedResources(Key intentKey, Collection<NetworkResource> resources) { for (NetworkResource resource : resources) { if (resource instanceof Link) { intentsByLink.put(linkKey((Link) resource), intentKey); } else if (resource instanceof ElementId) { intentsByDevice.put((ElementId) resource, intentKey); } } }
@Test public void testEventLinkDownMatch() throws Exception { final Link link = link("src", 1, "dst", 2); final LinkEvent linkEvent = new LinkEvent(LinkEvent.Type.LINK_REMOVED, link); reasons.add(linkEvent); final TopologyEvent event = new TopologyEvent( TopologyEvent.Type.TOPOLOGY_CHANGED, topology, reasons); final Key key = Key.of(0x333L, APP_ID); Collection<NetworkResource> resources = ImmutableSet.of(link); tracker.addTrackedResources(key, resources); listener.event(event); assertThat( delegate.latch.await(WAIT_TIMEOUT_SECONDS, TimeUnit.SECONDS), is(true)); assertThat(delegate.intentIdsFromEvent, hasSize(1)); assertThat(delegate.compileAllFailedFromEvent, is(false)); assertThat(delegate.intentIdsFromEvent.get(0).toString(), equalTo("0x333")); }
@Override public <T> Optional<T> getProperty(String key, Class<T> targetType) { var targetKey = targetPropertyName(key); var result = binder.bind(targetKey, Bindable.of(targetType)); return result.isBound() ? Optional.of(result.get()) : Optional.empty(); }
@Test void resolvesCustomConfigClassProperties() { env.setProperty("prop.0.custProps.f1", "f1val"); env.setProperty("prop.0.custProps.f2", "1234"); var resolver = new PropertyResolverImpl(env); assertThat(resolver.getProperty("prop.0.custProps", CustomPropertiesClass.class)) .hasValue(new CustomPropertiesClass("f1val", 1234)); }
public Optional<ViewDefinition> getViewDefinition(QualifiedObjectName viewName) { if (!viewDefinitions.containsKey(viewName)) { throw new PrestoException(VIEW_NOT_FOUND, format("View %s not found, the available view names are: %s", viewName, viewDefinitions.keySet())); } try { return viewDefinitions.get(viewName).get(); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); throw new RuntimeException(ex); } catch (ExecutionException ex) { throwIfUnchecked(ex.getCause()); throw new RuntimeException(ex.getCause()); } }
@Test public void testGetViewDefinitionNotPresent() { try { metadataHandle.getViewDefinition(QualifiedObjectName.valueOf("tpch.s1.t2")); fail("PrestoException not thrown for invalid view"); } catch (Exception e) { assertTrue(e instanceof PrestoException); PrestoException prestoException = (PrestoException) e; assertEquals(prestoException.getErrorCode(), VIEW_NOT_FOUND.toErrorCode()); assertEquals(prestoException.getMessage(), "View tpch.s1.t2 not found, the available view names are: [tpch.s1.t1]"); } }
protected static void configureMulticastSocket(MulticastSocket multicastSocket, Address bindAddress, HazelcastProperties hzProperties, MulticastConfig multicastConfig, ILogger logger) throws SocketException, IOException, UnknownHostException { multicastSocket.setReuseAddress(true); // bind to receive interface multicastSocket.bind(new InetSocketAddress(multicastConfig.getMulticastPort())); multicastSocket.setTimeToLive(multicastConfig.getMulticastTimeToLive()); try { boolean loopbackBind = bindAddress.getInetAddress().isLoopbackAddress(); Boolean loopbackModeEnabled = multicastConfig.getLoopbackModeEnabled(); if (loopbackModeEnabled != null) { // setting loopbackmode is just a hint - and the argument means "disable"! // to check the real value we call getLoopbackMode() (and again - return value means "disabled") multicastSocket.setLoopbackMode(!loopbackModeEnabled); } // If LoopBack mode is not enabled (i.e. getLoopbackMode return true) and bind address is a loopback one, // then print a warning if (loopbackBind && multicastSocket.getLoopbackMode()) { logger.warning("Hazelcast is bound to " + bindAddress.getHost() + " and loop-back mode is " + "disabled. This could cause multicast auto-discovery issues " + "and render it unable to work. Check your network connectivity, try to enable the " + "loopback mode and/or force -Djava.net.preferIPv4Stack=true on your JVM."); } // warning: before modifying lines below, take a look at these links: // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4417033 // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6402758 // https://github.com/hazelcast/hazelcast/pull/19251#issuecomment-891375270 boolean callSetInterface = OS.isMac() || !loopbackBind; String propSetInterface = hzProperties.getString(ClusterProperty.MULTICAST_SOCKET_SET_INTERFACE); if (propSetInterface != null) { callSetInterface = Boolean.parseBoolean(propSetInterface); } if (callSetInterface) { multicastSocket.setInterface(bindAddress.getInetAddress()); } } catch (Exception e) { logger.warning(e); } multicastSocket.setReceiveBufferSize(SOCKET_BUFFER_SIZE); multicastSocket.setSendBufferSize(SOCKET_BUFFER_SIZE); String multicastGroup = hzProperties.getString(ClusterProperty.MULTICAST_GROUP); if (multicastGroup == null) { multicastGroup = multicastConfig.getMulticastGroup(); } multicastConfig.setMulticastGroup(multicastGroup); multicastSocket.joinGroup(InetAddress.getByName(multicastGroup)); multicastSocket.setSoTimeout(SOCKET_TIMEOUT); }
@Test public void testSetInterfaceDisabled() throws Exception { Config config = createConfig(Boolean.FALSE); MulticastConfig multicastConfig = config.getNetworkConfig().getJoin().getMulticastConfig(); MulticastSocket multicastSocket = mock(MulticastSocket.class); Address address = new Address("127.0.0.1", 5701); HazelcastProperties hzProperties = new HazelcastProperties(config); MulticastService.configureMulticastSocket(multicastSocket, address, hzProperties , multicastConfig, mock(ILogger.class)); verify(multicastSocket, never()).setInterface(any()); }
public void installAll() { for (AvailableInterpreterInfo info : availableInterpreters) { install(info.name, info.artifact); } }
@Test void installAll() { installer.installAll(); assertTrue(new File(interpreterBaseDir, "intp1").isDirectory()); assertTrue(new File(interpreterBaseDir, "intp2").isDirectory()); }
public void close() { runWithLock( () -> { closed = true; if (!udfFinished) { cacheNotEmpty.signalAll(); waitUDFFinished(); } udfExecutor.shutdown(); }); }
@Test void testClose() throws ExecutionException, InterruptedException { // 1.Test close() when the cache is not empty in the MapPartitionIterator. CompletableFuture<?> udfFinishTrigger1 = new CompletableFuture<>(); MapPartitionIterator<String> iterator1 = new MapPartitionIterator<>( ignored -> { try { udfFinishTrigger1.get(); } catch (InterruptedException | ExecutionException e) { ExceptionUtils.rethrow(e); } }); iterator1.addRecord(RECORD); CompletableFuture<Object> mockedTaskThread1 = new CompletableFuture<>(); CompletableFuture<Object> iteratorCloseIdentifier1 = new CompletableFuture<>(); mockedTaskThread1.thenRunAsync( () -> { iterator1.close(); iteratorCloseIdentifier1.complete(null); }); mockedTaskThread1.complete(null); assertThat(iteratorCloseIdentifier1).isNotCompleted(); udfFinishTrigger1.complete(null); iteratorCloseIdentifier1.get(); assertThat(iteratorCloseIdentifier1).isCompleted(); // 2.Test close() when the cache is empty in the MapPartitionIterator. CompletableFuture<?> udfFinishTrigger2 = new CompletableFuture<>(); MapPartitionIterator<String> iterator2 = new MapPartitionIterator<>( ignored -> { try { udfFinishTrigger2.get(); } catch (InterruptedException | ExecutionException e) { ExceptionUtils.rethrow(e); } }); CompletableFuture<Object> mockedTaskThread2 = new CompletableFuture<>(); CompletableFuture<Object> iteratorCloseIdentifier2 = new CompletableFuture<>(); mockedTaskThread1.thenRunAsync( () -> { iterator2.close(); iteratorCloseIdentifier2.complete(null); }); mockedTaskThread2.complete(null); assertThat(iteratorCloseIdentifier2).isNotCompleted(); udfFinishTrigger2.complete(null); iteratorCloseIdentifier2.get(); assertThat(iteratorCloseIdentifier2).isCompleted(); // 2.Test close() when the udf is finished in the MapPartitionIterator. MapPartitionIterator<String> iterator3 = new MapPartitionIterator<>(ignored -> {}); iterator3.close(); }
public static Object[][] toObjectArray(Map<?, ?> map) { if (map == null) { return null; } final Object[][] result = new Object[map.size()][2]; if (map.isEmpty()) { return result; } int index = 0; for (Entry<?, ?> entry : map.entrySet()) { result[index][0] = entry.getKey(); result[index][1] = entry.getValue(); index++; } return result; }
@Test public void toObjectArrayTest() { final Map<String, String> map = MapUtil.newHashMap(true); map.put("a", "1"); map.put("b", "2"); map.put("c", "3"); map.put("d", "4"); final Object[][] objectArray = MapUtil.toObjectArray(map); assertEquals("a", objectArray[0][0]); assertEquals("1", objectArray[0][1]); assertEquals("b", objectArray[1][0]); assertEquals("2", objectArray[1][1]); assertEquals("c", objectArray[2][0]); assertEquals("3", objectArray[2][1]); assertEquals("d", objectArray[3][0]); assertEquals("4", objectArray[3][1]); }
@VisibleForTesting static Optional<String> findEntryClass(File jarFile) throws IOException { return findFirstManifestAttribute( jarFile, PackagedProgram.MANIFEST_ATTRIBUTE_ASSEMBLER_CLASS, PackagedProgram.MANIFEST_ATTRIBUTE_MAIN_CLASS); }
@Test void testFindEntryClassAssemblerClassAndMainClass() throws IOException { // We want the assembler class entry to have precedence over main class File jarFile = createJarFileWithManifest( ImmutableMap.of( PackagedProgram.MANIFEST_ATTRIBUTE_ASSEMBLER_CLASS, "AssemblerClass", PackagedProgram.MANIFEST_ATTRIBUTE_MAIN_CLASS, "MainClass")); Optional<String> entry = JarManifestParser.findEntryClass(jarFile); assertThat(entry).isPresent().get().isEqualTo("AssemblerClass"); }
public Schema mergeTables( Map<FeatureOption, MergingStrategy> mergingStrategies, Schema sourceSchema, List<SqlNode> derivedColumns, List<SqlWatermark> derivedWatermarkSpecs, SqlTableConstraint derivedPrimaryKey) { SchemaBuilder schemaBuilder = new SchemaBuilder( mergingStrategies, sourceSchema, (FlinkTypeFactory) validator.getTypeFactory(), dataTypeFactory, validator, escapeExpression); schemaBuilder.appendDerivedColumns(mergingStrategies, derivedColumns); schemaBuilder.appendDerivedWatermarks(mergingStrategies, derivedWatermarkSpecs); schemaBuilder.appendDerivedPrimaryKey(derivedPrimaryKey); return schemaBuilder.build(); }
@Test void mergeExcludingGeneratedColumnsDuplicate() { Schema sourceSchema = Schema.newBuilder() .column("one", DataTypes.INT()) .columnByExpression("two", "one + 1") .build(); List<SqlNode> derivedColumns = Collections.singletonList(computedColumn("two", plus("one", "3"))); Map<FeatureOption, MergingStrategy> mergingStrategies = getDefaultMergingStrategies(); mergingStrategies.put(FeatureOption.GENERATED, MergingStrategy.EXCLUDING); Schema mergedSchema = util.mergeTables( mergingStrategies, sourceSchema, derivedColumns, Collections.emptyList(), null); Schema expectedSchema = Schema.newBuilder() .column("one", DataTypes.INT()) .columnByExpression("two", "`one` + 3") .build(); assertThat(mergedSchema).isEqualTo(expectedSchema); }
public static void addSupplier(String interfaceName, Function<Invoker<?>, Object> supplier) { STUB_SUPPLIERS.put(interfaceName, supplier); }
@Test void addSupplier() { Invoker<?> invoker = Mockito.mock(Invoker.class); ServiceDescriptor descriptor = Mockito.mock(ServiceDescriptor.class); StubSuppliers.addSupplier(serviceName, i -> invoker); Assertions.assertEquals(invoker, StubSuppliers.createStub(serviceName, invoker)); }
@Override public void invoke() throws Exception { // -------------------------------------------------------------------- // Initialize // -------------------------------------------------------------------- LOG.debug(getLogString("Start registering input and output")); // initialize OutputFormat initOutputFormat(); // initialize input readers try { initInputReaders(); } catch (Exception e) { throw new RuntimeException( "Initializing the input streams failed" + (e.getMessage() == null ? "." : ": " + e.getMessage()), e); } LOG.debug(getLogString("Finished registering input and output")); // -------------------------------------------------------------------- // Invoke // -------------------------------------------------------------------- LOG.debug(getLogString("Starting data sink operator")); RuntimeContext ctx = createRuntimeContext(); final Counter numRecordsIn; { Counter tmpNumRecordsIn; try { InternalOperatorIOMetricGroup ioMetricGroup = ((InternalOperatorMetricGroup) ctx.getMetricGroup()).getIOMetricGroup(); ioMetricGroup.reuseInputMetricsForTask(); ioMetricGroup.reuseOutputMetricsForTask(); tmpNumRecordsIn = ioMetricGroup.getNumRecordsInCounter(); } catch (Exception e) { LOG.warn("An exception occurred during the metrics setup.", e); tmpNumRecordsIn = new SimpleCounter(); } numRecordsIn = tmpNumRecordsIn; } if (RichOutputFormat.class.isAssignableFrom(this.format.getClass())) { ((RichOutputFormat) this.format).setRuntimeContext(ctx); LOG.debug(getLogString("Rich Sink detected. Initializing runtime context.")); } ExecutionConfig executionConfig = getExecutionConfig(); boolean objectReuseEnabled = executionConfig.isObjectReuseEnabled(); try { // initialize local strategies MutableObjectIterator<IT> input1; switch (this.config.getInputLocalStrategy(0)) { case NONE: // nothing to do localStrategy = null; input1 = reader; break; case SORT: // initialize sort local strategy try { // get type comparator TypeComparatorFactory<IT> compFact = this.config.getInputComparator(0, getUserCodeClassLoader()); if (compFact == null) { throw new Exception( "Missing comparator factory for local strategy on input " + 0); } // initialize sorter Sorter<IT> sorter = ExternalSorter.newBuilder( getEnvironment().getMemoryManager(), this, this.inputTypeSerializerFactory.getSerializer(), compFact.createComparator()) .maxNumFileHandles(this.config.getFilehandlesInput(0)) .enableSpilling( getEnvironment().getIOManager(), this.config.getSpillingThresholdInput(0)) .memoryFraction(this.config.getRelativeMemoryInput(0)) .objectReuse( this.getExecutionConfig().isObjectReuseEnabled()) .largeRecords(this.config.getUseLargeRecordHandler()) .build(this.reader); this.localStrategy = sorter; input1 = sorter.getIterator(); } catch (Exception e) { throw new RuntimeException( "Initializing the input processing failed" + (e.getMessage() == null ? "." : ": " + e.getMessage()), e); } break; default: throw new RuntimeException("Invalid local strategy for DataSinkTask"); } // read the reader and write it to the output final TypeSerializer<IT> serializer = this.inputTypeSerializerFactory.getSerializer(); final MutableObjectIterator<IT> input = input1; final OutputFormat<IT> format = this.format; // check if task has been canceled if (this.taskCanceled) { return; } LOG.debug(getLogString("Starting to produce output")); // open format.open( new InitializationContext() { @Override public int getNumTasks() { return getEnvironment().getTaskInfo().getNumberOfParallelSubtasks(); } @Override public int getTaskNumber() { return getEnvironment().getTaskInfo().getIndexOfThisSubtask(); } @Override public int getAttemptNumber() { return getEnvironment().getTaskInfo().getAttemptNumber(); } }); if (objectReuseEnabled) { IT record = serializer.createInstance(); // work! while (!this.taskCanceled && ((record = input.next(record)) != null)) { numRecordsIn.inc(); format.writeRecord(record); } } else { IT record; // work! while (!this.taskCanceled && ((record = input.next()) != null)) { numRecordsIn.inc(); format.writeRecord(record); } } // close. We close here such that a regular close throwing an exception marks a task as // failed. if (!this.taskCanceled) { this.format.close(); this.format = null; } } catch (Exception ex) { // make a best effort to clean up try { if (!cleanupCalled && format instanceof CleanupWhenUnsuccessful) { cleanupCalled = true; ((CleanupWhenUnsuccessful) format).tryCleanupOnError(); } } catch (Throwable t) { LOG.error("Cleanup on error failed.", t); } ex = ExceptionInChainedStubException.exceptionUnwrap(ex); if (ex instanceof CancelTaskException) { // forward canceling exception throw ex; } // drop, if the task was canceled else if (!this.taskCanceled) { if (LOG.isErrorEnabled()) { LOG.error(getLogString("Error in user code: " + ex.getMessage()), ex); } throw ex; } } finally { if (this.format != null) { // close format, if it has not been closed, yet. // This should only be the case if we had a previous error, or were canceled. try { this.format.close(); } catch (Throwable t) { if (LOG.isWarnEnabled()) { LOG.warn(getLogString("Error closing the output format"), t); } } } // close local strategy if necessary if (localStrategy != null) { try { this.localStrategy.close(); } catch (Throwable t) { LOG.error("Error closing local strategy", t); } } BatchTask.clearReaders(new MutableReader<?>[] {inputReader}); } if (!this.taskCanceled) { LOG.debug(getLogString("Finished data sink operator")); } else { LOG.debug(getLogString("Data sink operator cancelled")); } }
@Test @SuppressWarnings("unchecked") void testFailingSortingDataSinkTask() { int keyCnt = 100; int valCnt = 20; double memoryFraction = 1.0; super.initEnvironment(MEMORY_MANAGER_SIZE, NETWORK_BUFFER_SIZE); super.addInput(new UniformRecordGenerator(keyCnt, valCnt, true), 0); DataSinkTask<Record> testTask = new DataSinkTask<>(this.mockEnv); Configuration stubParams = new Configuration(); // set sorting super.getTaskConfig().setInputLocalStrategy(0, LocalStrategy.SORT); super.getTaskConfig() .setInputComparator( new RecordComparatorFactory(new int[] {1}, (new Class[] {IntValue.class})), 0); super.getTaskConfig().setRelativeMemoryInput(0, memoryFraction); super.getTaskConfig().setFilehandlesInput(0, 8); super.getTaskConfig().setSpillingThresholdInput(0, 0.8f); File tempTestFile = new File(tempFolder.toFile(), UUID.randomUUID().toString()); super.registerFileOutputTask( MockFailingOutputFormat.class, tempTestFile.toURI().toString(), stubParams); boolean stubFailed = false; try { testTask.invoke(); } catch (Exception e) { stubFailed = true; } assertThat(stubFailed).withFailMessage("Function exception was not forwarded.").isTrue(); // assert that temp file was removed assertThat(tempTestFile) .withFailMessage("Temp output file has not been removed") .doesNotExist(); }
@Override public String generatePoetStringTypes() { StringBuilder symbolBuilder = new StringBuilder(); if (getMethodReturnType().equals(theContract)) { symbolBuilder.append("$L = $T."); } else { symbolBuilder.append("$T $L = $L."); } symbolBuilder .append(method.getName()) .append("(") .append(getPoetFormatSpecifier()) .append(").send()"); return symbolBuilder.toString(); }
@Test public void testGenerateJavaPoetStringTypesWhenReturnTypeIsContract() { List<Method> listOfFilteredMethods = MethodFilter.extractValidMethods(greeterContractClass); Method deploy = listOfFilteredMethods.stream() .filter(m -> m.getName().equals("deploy")) .collect(Collectors.toList()) .get(0); JavaParser parser = new JavaParser(greeterContractClass, deploy, new JavaMappingHelper()); assertEquals("$L = $T.deploy($L, $L, $L, $S).send()", parser.generatePoetStringTypes()); }
@Override public void configure(final KsqlConfig config) { if (!config.getKsqlStreamConfigProps().containsKey(StreamsConfig.APPLICATION_SERVER_CONFIG)) { throw new IllegalArgumentException("Need KS application server set"); } this.primaryContext.configure(config); }
@Test public void shouldThrowIfConfigureNotCalledWithAppServerConfig() { // When/Then: setupKsqlEngineWithSharedRuntimeEnabled(); assertThrows(IllegalArgumentException.class, () -> ksqlEngine.configure(KsqlConfig.empty())); }
@SuppressWarnings("nls") private void initialize() { LOG.debug("ObjectStore, initialize called"); // if this method fails, PersistenceManagerProvider will retry for the configured number of times // before giving up boolean isForCompactor = MetastoreConf.getBoolVar(conf, COMPACTOR_USE_CUSTOM_POOL); pm = PersistenceManagerProvider.getPersistenceManager(isForCompactor); LOG.info("RawStore: {}, with PersistenceManager: {}" + " created in the thread with id: {}", this, pm, Thread.currentThread().getId()); String productName = MetaStoreDirectSql.getProductName(pm); sqlGenerator = new SQLGenerator(DatabaseProduct.determineDatabaseProduct(productName, conf), conf); isInitialized = pm != null; if (isInitialized) { dbType = determineDatabaseProduct(); expressionProxy = PartFilterExprUtil.createExpressionProxy(conf); if (MetastoreConf.getBoolVar(getConf(), ConfVars.TRY_DIRECT_SQL)) { String schema = PersistenceManagerProvider.getProperty("javax.jdo.mapping.Schema"); schema = org.apache.commons.lang3.StringUtils.defaultIfBlank(schema, null); directSql = new MetaStoreDirectSql(pm, conf, schema); } } if (propertyStore == null) { propertyStore = new CachingPropertyStore(new JdoPropertyStore(this), conf); } }
@Test public void testDirectSqlErrorMetrics() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED, true); Metrics.initialize(conf); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES, "org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " + "org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter" ); // recall setup so that we get an object store with the metrics initalized setUp(); Counter directSqlErrors = Metrics.getRegistry().getCounters().get(MetricsConstants.DIRECTSQL_ERRORS); objectStore.new GetDbHelper(DEFAULT_CATALOG_NAME, "foo", true, true) { @Override protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException { return null; } @Override protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException, NoSuchObjectException { return null; } }.run(false); Assert.assertEquals(0, directSqlErrors.getCount()); objectStore.new GetDbHelper(DEFAULT_CATALOG_NAME, "foo", true, true) { @Override protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException { throw new RuntimeException(); } @Override protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException, NoSuchObjectException { return null; } }.run(false); Assert.assertEquals(1, directSqlErrors.getCount()); }
@Override public void transform(Message message, DataType fromType, DataType toType) { ProtobufSchema schema = message.getExchange().getProperty(SchemaHelper.CONTENT_SCHEMA, ProtobufSchema.class); if (schema == null) { throw new CamelExecutionException("Missing proper Protobuf schema for data type processing", message.getExchange()); } try { byte[] marshalled; String contentClass = SchemaHelper.resolveContentClass(message.getExchange(), null); if (contentClass != null) { Class<?> contentType = message.getExchange().getContext().getClassResolver().resolveMandatoryClass(contentClass); marshalled = Protobuf.mapper().writer().forType(contentType).with(schema) .writeValueAsBytes(message.getBody()); } else { marshalled = Protobuf.mapper().writer().forType(JsonNode.class).with(schema) .writeValueAsBytes(getBodyAsJsonNode(message, schema)); } message.setBody(marshalled); message.setHeader(Exchange.CONTENT_TYPE, MimeType.PROTOBUF_BINARY.type()); message.setHeader(SchemaHelper.CONTENT_SCHEMA, schema.getSource().toString()); } catch (InvalidPayloadException | IOException | ClassNotFoundException e) { throw new CamelExecutionException( "Failed to apply Protobuf binary data type on exchange", message.getExchange(), e); } }
@Test void shouldHandleExplicitContentClass() throws Exception { Exchange exchange = new DefaultExchange(camelContext); ProtobufSchema protobufSchema = getSchema(); exchange.setProperty(SchemaHelper.CONTENT_SCHEMA, protobufSchema); exchange.setProperty(SchemaHelper.CONTENT_CLASS, Person.class.getName()); exchange.getMessage().setBody(new Person("Donald", 19)); transformer.transform(exchange.getMessage(), DataType.ANY, DataType.ANY); JSONAssert.assertEquals(""" {"name":"Donald","age":19} """, Json.mapper().writeValueAsString( Protobuf.mapper().reader().with(protobufSchema).readTree(exchange.getMessage().getBody(byte[].class))), true); }
@Override Object getValue(Object obj) throws Exception { return NULL_MULTIVALUE_RESULT; }
@Test public void test_getValue() throws Exception { Object value = NullMultiValueGetter.NULL_MULTIVALUE_GETTER.getValue("anything"); assertInstanceOf(MultiResult.class, value); assertTrue(((MultiResult) value).isNullEmptyTarget()); }
@Override @DataPermission(enable = false) // 禁用数据权限,避免建立不正确的缓存 @Cacheable(cacheNames = RedisKeyConstants.DEPT_CHILDREN_ID_LIST, key = "#id") public Set<Long> getChildDeptIdListFromCache(Long id) { List<DeptDO> children = getChildDeptList(id); return convertSet(children, DeptDO::getId); }
@Test public void testGetChildDeptListFromCache() { // mock 数据(1 级别子节点) DeptDO dept1 = randomPojo(DeptDO.class, o -> o.setName("1")); deptMapper.insert(dept1); DeptDO dept2 = randomPojo(DeptDO.class, o -> o.setName("2")); deptMapper.insert(dept2); // mock 数据(2 级子节点) DeptDO dept1a = randomPojo(DeptDO.class, o -> o.setName("1-a").setParentId(dept1.getId())); deptMapper.insert(dept1a); DeptDO dept2a = randomPojo(DeptDO.class, o -> o.setName("2-a").setParentId(dept2.getId())); deptMapper.insert(dept2a); // 准备参数 Long id = dept1.getParentId(); // 调用 Set<Long> result = deptService.getChildDeptIdListFromCache(id); // 断言 assertEquals(result.size(), 2); assertTrue(result.contains(dept1.getId())); assertTrue(result.contains(dept1a.getId())); }
UuidGenerator loadUuidGenerator() { Class<? extends UuidGenerator> objectFactoryClass = options.getUuidGeneratorClass(); ClassLoader classLoader = classLoaderSupplier.get(); ServiceLoader<UuidGenerator> loader = ServiceLoader.load(UuidGenerator.class, classLoader); if (objectFactoryClass == null) { return loadSingleUuidGeneratorOrDefault(loader); } return loadSelectedUuidGenerator(loader, objectFactoryClass); }
@Test void test_case_11() { Options options = () -> null; UuidGeneratorServiceLoader loader = new UuidGeneratorServiceLoader( () -> new ServiceLoaderTestClassLoader(UuidGenerator.class, OtherGenerator.class), options); assertThat(loader.loadUuidGenerator(), instanceOf(OtherGenerator.class)); }
@Override public boolean decide(final SelectStatementContext selectStatementContext, final List<Object> parameters, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database, final SingleRule rule, final Collection<DataNode> includedDataNodes) { Collection<QualifiedTable> singleTables = getSingleTables(selectStatementContext, database, rule); if (singleTables.isEmpty()) { return false; } if (containsView(database, singleTables)) { return true; } if (!includedDataNodes.isEmpty() && !isInnerCommaJoin(selectStatementContext.getSqlStatement())) { return true; } boolean result = rule.isAllTablesInSameComputeNode(includedDataNodes, singleTables); includedDataNodes.addAll(getTableDataNodes(rule, singleTables)); return !result; }
@Test void assertDecideWhenAllTablesInSameComputeNode() { Collection<QualifiedTable> qualifiedTables = Arrays.asList(new QualifiedTable(DefaultDatabase.LOGIC_NAME, "t_order"), new QualifiedTable(DefaultDatabase.LOGIC_NAME, "t_order_item")); SingleRule rule = createSingleRule(qualifiedTables); SelectStatementContext select = createStatementContext(); Collection<DataNode> includedDataNodes = new HashSet<>(Collections.singleton(new DataNode("ds_0", "t_user"))); when(rule.isAllTablesInSameComputeNode(includedDataNodes, qualifiedTables)).thenReturn(true); assertFalse(new SingleSQLFederationDecider().decide(select, Collections.emptyList(), mock(RuleMetaData.class), createDatabase(), rule, includedDataNodes)); assertThat(includedDataNodes.size(), is(3)); }
@Override @SuppressWarnings("unchecked") public void onApplicationEvent(@NotNull final DataChangedEvent event) { for (DataChangedListener listener : listeners) { if ((!(listener instanceof AbstractDataChangedListener)) && clusterProperties.isEnabled() && Objects.nonNull(shenyuClusterSelectMasterService) && !shenyuClusterSelectMasterService.isMaster()) { LOG.info("received DataChangedEvent, not master, pass"); return; } if (LOG.isDebugEnabled()) { LOG.debug("received DataChangedEvent, dispatching, event:{}", JsonUtils.toJson(event)); } switch (event.getGroupKey()) { case APP_AUTH: listener.onAppAuthChanged((List<AppAuthData>) event.getSource(), event.getEventType()); break; case PLUGIN: listener.onPluginChanged((List<PluginData>) event.getSource(), event.getEventType()); break; case RULE: listener.onRuleChanged((List<RuleData>) event.getSource(), event.getEventType()); break; case SELECTOR: listener.onSelectorChanged((List<SelectorData>) event.getSource(), event.getEventType()); break; case META_DATA: listener.onMetaDataChanged((List<MetaData>) event.getSource(), event.getEventType()); break; case PROXY_SELECTOR: listener.onProxySelectorChanged((List<ProxySelectorData>) event.getSource(), event.getEventType()); break; case DISCOVER_UPSTREAM: listener.onDiscoveryUpstreamChanged((List<DiscoverySyncData>) event.getSource(), event.getEventType()); applicationContext.getBean(LoadServiceDocEntry.class).loadDocOnUpstreamChanged((List<DiscoverySyncData>) event.getSource(), event.getEventType()); break; default: throw new IllegalStateException("Unexpected value: " + event.getGroupKey()); } } }
@Test public void onApplicationEventWithRuleConfigGroupTest() { when(clusterProperties.isEnabled()).thenReturn(true); when(shenyuClusterSelectMasterService.isMaster()).thenReturn(true); ConfigGroupEnum configGroupEnum = ConfigGroupEnum.RULE; DataChangedEvent dataChangedEvent = new DataChangedEvent(configGroupEnum, null, new ArrayList<>()); dataChangedEventDispatcher.onApplicationEvent(dataChangedEvent); verify(httpLongPollingDataChangedListener, times(1)).onRuleChanged(anyList(), any()); verify(nacosDataChangedListener, times(1)).onRuleChanged(anyList(), any()); verify(websocketDataChangedListener, times(1)).onRuleChanged(anyList(), any()); verify(zookeeperDataChangedListener, times(1)).onRuleChanged(anyList(), any()); }
public static String padString(int level) { return padString(level, 2); }
@Test public void testPad() { assertEquals(" ", StringHelper.padString(1)); assertEquals(" ", StringHelper.padString(2)); assertEquals(" ", StringHelper.padString(3)); assertEquals(" ", StringHelper.padString(3, 1)); assertEquals(" ", StringHelper.padString(1, 1)); assertEquals("", StringHelper.padString(0)); assertEquals("", StringHelper.padString(0, 2)); }
@Override public void acquirePermissionToRemove(OrchestratorContext context, ApplicationApi applicationApi) throws HostStateChangeDeniedException { ApplicationInstanceStatus applicationStatus = applicationApi.getApplicationStatus(); if (applicationStatus == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN) { throw new HostStateChangeDeniedException( applicationApi.getNodeGroup(), HostedVespaPolicy.APPLICATION_SUSPENDED_CONSTRAINT, "Unable to test availability constraints as the application " + applicationApi.applicationId() + " is allowed to be down"); } // Apply per-cluster policy for (ClusterApi cluster : applicationApi.getClusters()) { clusterPolicy.verifyGroupGoingDownPermanentlyIsFine(cluster); } // Get permission from the Cluster Controller to remove the content nodes. for (StorageNode storageNode : applicationApi.getStorageNodesInGroupInClusterOrder()) { // Consider changing the semantics of setting storage node state to DOWN in cluster controller, to avoid 2 calls. storageNode.setStorageNodeState(context.createSubcontextForSingleAppOp(true), ClusterControllerNodeState.DOWN); storageNode.forceDistributorState(context, ClusterControllerNodeState.DOWN); } // Ensure all nodes in the group are marked as permanently down for (HostName hostName : applicationApi.getNodesInGroupWith(status -> status != HostStatus.PERMANENTLY_DOWN)) { applicationApi.setHostState(context, hostName, HostStatus.PERMANENTLY_DOWN); } }
@Test public void testAcquirePermissionToRemove() throws OrchestrationException { final HostedVespaClusterPolicy clusterPolicy = mock(HostedVespaClusterPolicy.class); final HostedVespaPolicy policy = new HostedVespaPolicy(clusterPolicy, clientFactory, applicationApiFactory, flagSource); final ApplicationApi applicationApi = mock(ApplicationApi.class); when(applicationApi.applicationId()).thenReturn(ApplicationId.fromSerializedForm("tenant:app:default")); ClusterApi clusterApi1 = mock(ClusterApi.class); ClusterApi clusterApi2 = mock(ClusterApi.class); ClusterApi clusterApi3 = mock(ClusterApi.class); List<ClusterApi> clusterApis = List.of(clusterApi1, clusterApi2, clusterApi3); when(applicationApi.getClusters()).thenReturn(clusterApis); StorageNode storageNode1 = mock(StorageNode.class); HostName hostName1 = new HostName("storage-1"); when(storageNode1.hostName()).thenReturn(hostName1); HostName hostName2 = new HostName("host-2"); StorageNode storageNode3 = mock(StorageNode.class); HostName hostName3 = new HostName("storage-3"); when(storageNode1.hostName()).thenReturn(hostName3); List<StorageNode> upStorageNodes = List.of(storageNode1, storageNode3); when(applicationApi.getStorageNodesInGroupInClusterOrder()).thenReturn(upStorageNodes); List<HostName> noRemarksHostNames = List.of(hostName1, hostName2, hostName3); when(applicationApi.getNodesInGroupWith(any())).thenReturn(noRemarksHostNames); InOrder order = inOrder(applicationApi, clusterPolicy, storageNode1, storageNode3); OrchestratorContext context = mock(OrchestratorContext.class); OrchestratorContext probeContext = mock(OrchestratorContext.class); when(context.createSubcontextForSingleAppOp(true)).thenReturn(probeContext); policy.acquirePermissionToRemove(context, applicationApi); order.verify(applicationApi).getClusters(); order.verify(clusterPolicy).verifyGroupGoingDownPermanentlyIsFine(clusterApi1); order.verify(clusterPolicy).verifyGroupGoingDownPermanentlyIsFine(clusterApi2); order.verify(clusterPolicy).verifyGroupGoingDownPermanentlyIsFine(clusterApi3); order.verify(applicationApi).getStorageNodesInGroupInClusterOrder(); order.verify(storageNode1).setStorageNodeState(probeContext, ClusterControllerNodeState.DOWN); order.verify(storageNode3).setStorageNodeState(probeContext, ClusterControllerNodeState.DOWN); order.verify(applicationApi).getNodesInGroupWith(any()); order.verify(applicationApi).setHostState(context, hostName1, HostStatus.PERMANENTLY_DOWN); order.verify(applicationApi).setHostState(context, hostName2, HostStatus.PERMANENTLY_DOWN); order.verify(applicationApi).setHostState(context, hostName3, HostStatus.PERMANENTLY_DOWN); order.verifyNoMoreInteractions(); }
@Override public @Nullable String getFilename() { if (gcsPath.getNameCount() <= 1) { return null; } else { GcsPath gcsFilename = gcsPath.getFileName(); return gcsFilename == null ? null : gcsFilename.toString(); } }
@Test public void testGetFilename() { assertNull(toResourceIdentifier("gs://my_bucket/").getFilename()); assertEquals("abc", toResourceIdentifier("gs://my_bucket/abc").getFilename()); assertEquals("abc", toResourceIdentifier("gs://my_bucket/abc/").getFilename()); assertEquals("xyz.txt", toResourceIdentifier("gs://my_bucket/abc/xyz.txt").getFilename()); }
@Override public String getLogChannelId() { return log.getLogChannelId(); }
@Test public void testTwoTransformationsGetSameLogChannelId() throws Exception { Trans trans1 = new Trans( meta ); Trans trans2 = new Trans( meta ); assertEquals( trans1.getLogChannelId(), trans2.getLogChannelId() ); }
public void triggerRemoveOne(final String selectorId, final Upstream upstream) { removeFromMap(healthyUpstream, selectorId, upstream); removeFromMap(unhealthyUpstream, selectorId, upstream); }
@Test public void testTriggerRemoveOne() { final String selectorId = "s1"; Upstream upstream = mock(Upstream.class); healthCheckTask.triggerAddOne(selectorId, upstream); healthCheckTask.triggerRemoveOne(selectorId, upstream); assertThat(healthCheckTask.getHealthyUpstream().get(selectorId).size(), is(0)); healthCheckTask.triggerAddOne(selectorId, upstream); healthCheckTask.triggerRemoveOne(selectorId, upstream); assertThat(healthCheckTask.getHealthyUpstream().get(selectorId).size(), is(0)); }
public synchronized void start() throws IllegalStateException, StreamsException { if (setState(State.REBALANCING)) { log.debug("Starting Streams client"); if (globalStreamThread != null) { globalStreamThread.start(); } final int numThreads = processStreamThread(StreamThread::start); log.info("Started {} stream threads", numThreads); final Long cleanupDelay = applicationConfigs.getLong(StreamsConfig.STATE_CLEANUP_DELAY_MS_CONFIG); stateDirCleaner.scheduleAtFixedRate(() -> { // we do not use lock here since we only read on the value and act on it if (state == State.RUNNING) { stateDirectory.cleanRemovedTasks(cleanupDelay); } }, cleanupDelay, cleanupDelay, TimeUnit.MILLISECONDS); final long recordingDelay = 0; final long recordingInterval = 1; if (rocksDBMetricsRecordingService != null) { rocksDBMetricsRecordingService.scheduleAtFixedRate( streamsMetrics.rocksDBMetricsRecordingTrigger(), recordingDelay, recordingInterval, TimeUnit.MINUTES ); } } else { throw new IllegalStateException("The client is either already started or already stopped, cannot re-start"); } }
@Test public void shouldTriggerRecordingOfRocksDBMetricsIfRecordingLevelIsDebug() throws Exception { prepareStreams(); prepareStreamThread(streamThreadOne, 1); prepareStreamThread(streamThreadTwo, 2); prepareTerminableThread(streamThreadOne); try (final MockedStatic<Executors> executorsMockedStatic = mockStatic(Executors.class)) { final ScheduledExecutorService cleanupSchedule = mock(ScheduledExecutorService.class); final ScheduledExecutorService rocksDBMetricsRecordingTriggerThread = mock(ScheduledExecutorService.class); executorsMockedStatic.when(() -> Executors.newSingleThreadScheduledExecutor( any(ThreadFactory.class))).thenReturn(cleanupSchedule, rocksDBMetricsRecordingTriggerThread); final StreamsBuilder builder = new StreamsBuilder(); builder.table("topic", Materialized.as("store")); props.setProperty(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG, RecordingLevel.DEBUG.name()); try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) { streams.start(); } executorsMockedStatic.verify(() -> Executors.newSingleThreadScheduledExecutor(any(ThreadFactory.class)), times(2)); verify(rocksDBMetricsRecordingTriggerThread).scheduleAtFixedRate(any(RocksDBMetricsRecordingTrigger.class), eq(0L), eq(1L), eq(TimeUnit.MINUTES)); verify(rocksDBMetricsRecordingTriggerThread).shutdownNow(); } }
@Override public void shutdown() { threadPoolExecutor.shutdown(); synchronized (tasks) { // Notify tasks which checks to see if the ThreadPoolExecutor is shutdown and exits cleanly. tasks.notify(); } // Re-throw any errors during shutdown of the launchTasks thread. try { launchTasks.get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } catch (ExecutionException e) { throw new RuntimeException(e.getCause()); } }
@Test public void testShutdown() throws Exception { FastNanoClockAndSleeper fastNanoClockAndSleeper = new FastNanoClockAndSleeper(); UnboundedScheduledExecutorService executorService = new UnboundedScheduledExecutorService(fastNanoClockAndSleeper); Runnable runnable1 = Mockito.mock(Runnable.class); Runnable runnable2 = Mockito.mock(Runnable.class); Runnable runnable3 = Mockito.mock(Runnable.class); Callable<?> callable1 = Mockito.mock(Callable.class); Future<?> rFuture1 = executorService.schedule(runnable1, 10, SECONDS); Future<?> cFuture1 = executorService.schedule(callable1, 10, SECONDS); Future<?> rFuture2 = executorService.scheduleAtFixedRate(runnable2, 10, 10, SECONDS); Future<?> rFuture3 = executorService.scheduleWithFixedDelay(runnable3, 10, 10, SECONDS); assertThat( executorService.shutdownNow(), IsIterableContaining.hasItems( (Runnable) rFuture1, (Runnable) rFuture2, (Runnable) rFuture3, (Runnable) cFuture1)); verifyNoInteractions(runnable1, runnable2, runnable3, callable1); assertTrue(executorService.isShutdown()); assertTrue(executorService.awaitTermination(10, SECONDS)); assertTrue(executorService.isTerminated()); }
@Override public List<MetricFamilySamples> collect() { try { return exporter.export("Prometheus") .<List<MetricFamilySamples>>map(optional -> Collections.singletonList((GaugeMetricFamily) optional.getRawMetricFamilyObject())).orElse(Collections.emptyList()); // CHECKSTYLE:OFF } catch (final Exception ex) { // CHECKSTYLE:ON log.warn("Collect metrics error: {}", ex.getMessage()); } return Collections.emptyList(); }
@Test void assertCollectWithAbsentMetricsExporter() { MetricsExporter exporter = mock(MetricsExporter.class); when(exporter.export("Prometheus")).thenReturn(Optional.empty()); assertTrue(new PrometheusMetricsExporter(exporter).collect().isEmpty()); }
T call() throws IOException, RegistryException { String apiRouteBase = "https://" + registryEndpointRequestProperties.getServerUrl() + "/v2/"; URL initialRequestUrl = registryEndpointProvider.getApiRoute(apiRouteBase); return call(initialRequestUrl); }
@Test public void testHttpTimeout_0accepted() throws IOException, RegistryException { ArgumentCaptor<Request> requestCaptor = ArgumentCaptor.forClass(Request.class); Mockito.when(mockHttpClient.call(Mockito.any(), Mockito.any(), requestCaptor.capture())) .thenReturn(mockResponse); System.setProperty(JibSystemProperties.HTTP_TIMEOUT, "0"); endpointCaller.call(); Assert.assertEquals(0, new RequestWrapper(requestCaptor.getValue()).getHttpTimeout()); }
public void validate(ExternalIssueReport report, Path reportPath) { if (report.rules != null && report.issues != null) { Set<String> ruleIds = validateRules(report.rules, reportPath); validateIssuesCctFormat(report.issues, ruleIds, reportPath); } else if (report.rules == null && report.issues != null) { String documentationLink = documentationLinkGenerator.getDocumentationLink(DOCUMENTATION_SUFFIX); LOGGER.warn("External issues were imported with a deprecated format which will be removed soon. " + "Please switch to the newest format to fully benefit from Clean Code: {}", documentationLink); validateIssuesDeprecatedFormat(report.issues, reportPath); } else { throw new IllegalStateException(String.format("Failed to parse report '%s': invalid report detected.", reportPath)); } }
@Test public void validate_whenMissingOrEmptyRuleIdField_shouldThrowException() throws IOException { String errorMessage = "Failed to parse report 'report-path': missing mandatory field 'id'."; ExternalIssueReport report = read(REPORTS_LOCATION); report.rules[0].id = null; assertThatThrownBy(() -> validator.validate(report, reportPath)) .isInstanceOf(IllegalStateException.class) .hasMessage(errorMessage); report.rules[0].id = ""; assertThatThrownBy(() -> validator.validate(report, reportPath)) .isInstanceOf(IllegalStateException.class) .hasMessage(errorMessage); }
public CompletableFuture<Void> handlePullQuery( final ServiceContext serviceContext, final PullPhysicalPlan pullPhysicalPlan, final ConfiguredStatement<Query> statement, final RoutingOptions routingOptions, final PullQueryWriteStream pullQueryQueue, final CompletableFuture<Void> shouldCancelRequests ) { final List<KsqlPartitionLocation> allLocations = pullPhysicalPlan.getMaterialization().locator() .locate( pullPhysicalPlan.getKeys(), routingOptions, routingFilterFactory, pullPhysicalPlan.getPlanType() == PullPhysicalPlanType.RANGE_SCAN ); final Map<Integer, List<Host>> emptyPartitions = allLocations.stream() .filter(loc -> loc.getNodes().stream().noneMatch(node -> node.getHost().isSelected())) .collect(Collectors.toMap( KsqlPartitionLocation::getPartition, loc -> loc.getNodes().stream().map(KsqlNode::getHost).collect(Collectors.toList()))); if (!emptyPartitions.isEmpty()) { final MaterializationException materializationException = new MaterializationException( "Unable to execute pull query. " + emptyPartitions.entrySet() .stream() .map(kv -> String.format( "Partition %s failed to find valid host. Hosts scanned: %s", kv.getKey(), kv.getValue())) .collect(Collectors.joining(", ", "[", "]"))); LOG.debug(materializationException.getMessage()); throw materializationException; } // at this point we should filter out the hosts that we should not route to final List<KsqlPartitionLocation> locations = allLocations .stream() .map(KsqlPartitionLocation::removeFilteredHosts) .collect(Collectors.toList()); final CompletableFuture<Void> completableFuture = new CompletableFuture<>(); coordinatorExecutorService.submit(() -> { try { executeRounds(serviceContext, pullPhysicalPlan, statement, routingOptions, locations, pullQueryQueue, shouldCancelRequests); completableFuture.complete(null); } catch (Throwable t) { completableFuture.completeExceptionally(t); } }); return completableFuture; }
@Test public void forwardingError_invalidSchema() { // Given: locate(location5); when(ksqlClient.makeQueryRequest(eq(node2.location()), any(), any(), any(), any(), any(), any())) .thenAnswer(i -> { Map<String, ?> requestProperties = i.getArgument(3); WriteStream<List<StreamedRow>> rowConsumer = i.getArgument(4); assertThat(requestProperties.get(KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_PARTITIONS), is ("4")); rowConsumer.write( ImmutableList.of( StreamedRow.header(queryId, logicalSchema2).withSourceHost( new KsqlHostInfoEntity( node2.location().getHost(), node2.location().getPort())), StreamedRow.error(new RuntimeException("Row Error!"), 500))); return RestResponse.successful(200, 2); } ); // When: CompletableFuture<Void> future = haRouting.handlePullQuery( serviceContext, pullPhysicalPlan, statement, routingOptions, pullQueryQueue, disconnect); final Exception e = assertThrows( ExecutionException.class, future::get ); // Then: assertThat(pullQueryQueue.size(), is(0)); assertThat(Throwables.getRootCause(e).getSuppressed()[0].getMessage(), containsString("Schemas logicalSchema2 from host node2 differs from schema logicalSchema")); }
public void evaluate(AuthenticationContext context) { if (context == null) { return; } this.authenticationStrategy.evaluate(context); }
@Test public void evaluate5() { if (MixAll.isMac()) { return; } this.authConfig.setAuthenticationEnabled(false); this.evaluator = new AuthenticationEvaluator(authConfig); DefaultAuthenticationContext context = new DefaultAuthenticationContext(); context.setRpcCode("11"); context.setUsername("test"); context.setContent("test".getBytes(StandardCharsets.UTF_8)); context.setSignature("test"); this.evaluator.evaluate(context); }
private void executeOnKey(String[] args) { // executeOnKey <echo-string> <key> doExecute(true, false, args); }
@Test public void executeOnKey() { ClientConsoleApp consoleApp = new ClientConsoleApp(hazelcastFactory.newHazelcastClient(new ClientConfig()), printWriter); for (int i = 0; i < 100; i++) { consoleApp.handleCommand(String.format("executeOnKey message%d key%d", i, i)); assertTextInSystemOut("message" + i); } }
@GetMapping( path = "/admin/publisher/{provider}/{loginName}", produces = MediaType.APPLICATION_JSON_VALUE ) public ResponseEntity<UserPublishInfoJson> getUserPublishInfo(@PathVariable String provider, @PathVariable String loginName) { try { admins.checkAdminUser(); var userPublishInfo = admins.getUserPublishInfo(provider, loginName); return ResponseEntity.ok(userPublishInfo); } catch (ErrorResultException exc) { return exc.toResponseEntity(UserPublishInfoJson.class); } }
@Test public void testGetUserPublishInfo() throws Exception { mockAdminUser(); var versions = mockExtension(1, 0, 0); var user = new UserData(); user.setLoginName("test"); user.setProvider("github"); var token = new PersonalAccessToken(); token.setUser(user); token.setActive(true); versions.forEach(v -> v.setPublishedWith(token)); Mockito.when(repositories.findUserByLoginName("github", "test")).thenReturn(user); Mockito.when(repositories.countActiveAccessTokens(user)).thenReturn(1L); Mockito.when(repositories.findLatestVersions(user)).thenReturn(versions); mockMvc.perform(get("/admin/publisher/{provider}/{loginName}", "github", "test") .with(user("admin_user").authorities(new SimpleGrantedAuthority(("ROLE_ADMIN")))) .with(csrf().asHeader())) .andExpect(status().isOk()) .andExpect(content().json(publishInfoJson(upi -> { upi.user = new UserJson(); upi.user.loginName = "test"; upi.activeAccessTokenNum = 1; var ext1 = new ExtensionJson(); ext1.namespace = "foobar"; ext1.name = "baz"; ext1.version = "1.0.0"; upi.extensions = Arrays.asList(ext1); }))); }
@Override public int size() { checkState(!destroyed, destroyedMessage); // TODO: Maintain a separate counter for tracking live elements in map. return Maps.filterValues(items, MapValue::isAlive).size(); }
@Test public void testSize() throws Exception { expectPeerMessage(clusterCommunicator); assertEquals(0, ecMap.size()); ecMap.put(KEY1, VALUE1); assertEquals(1, ecMap.size()); ecMap.put(KEY1, VALUE2); assertEquals(1, ecMap.size()); ecMap.put(KEY2, VALUE2); assertEquals(2, ecMap.size()); for (int i = 0; i < 10; i++) { ecMap.put("" + i, "" + i); } assertEquals(12, ecMap.size()); ecMap.remove(KEY1); assertEquals(11, ecMap.size()); ecMap.remove(KEY1); assertEquals(11, ecMap.size()); }
@Override public void upload(UploadTask uploadTask) throws IOException { Throwable error = getErrorSafe(); if (error != null) { LOG.debug("don't persist {} changesets, already failed", uploadTask.changeSets.size()); uploadTask.fail(error); return; } LOG.debug("persist {} changeSets", uploadTask.changeSets.size()); try { long size = uploadTask.getSize(); synchronized (lock) { while (!uploadThrottle.hasCapacity()) { lock.wait(); } uploadThrottle.seizeCapacity(size); if (!uploadThrottle.hasCapacity()) { availabilityHelper.resetUnavailable(); } scheduledBytesCounter += size; scheduled.add(wrapWithSizeUpdate(uploadTask, size)); scheduleUploadIfNeeded(); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); uploadTask.fail(e); throw new IOException(e); } catch (Exception e) { uploadTask.fail(e); throw e; } }
@Test void testNoDelayAndThreshold() throws Exception { withStore( 0, 0, MAX_BYTES_IN_FLIGHT, (store, probe) -> { List<StateChangeSet> changes1 = getChanges(4); upload(store, changes1); assertSaved(probe, changes1); List<StateChangeSet> changes2 = getChanges(4); upload(store, changes2); assertSaved(probe, changes1, changes2); }); }
public Properties createProperties(Props props, File logDir) { Log4JPropertiesBuilder log4JPropertiesBuilder = new Log4JPropertiesBuilder(props); RootLoggerConfig config = newRootLoggerConfigBuilder() .setNodeNameField(getNodeNameWhenCluster(props)) .setProcessId(ProcessId.ELASTICSEARCH) .build(); String logPattern = log4JPropertiesBuilder.buildLogPattern(config); return log4JPropertiesBuilder.internalLogLevel(Level.ERROR) .rootLoggerConfig(config) .logPattern(logPattern) .enableAllLogsToConsole(isAllLogsToConsoleEnabled(props)) .jsonOutput(isJsonOutput(props)) .logDir(logDir) .logLevelConfig( LogLevelConfig.newBuilder(log4JPropertiesBuilder.getRootLoggerName()) .rootLevelFor(ProcessId.ELASTICSEARCH) .build()) .build(); }
@Test public void createProperties_adds_nodename_if_cluster_property_is_set() throws IOException { File logDir = temporaryFolder.newFolder(); Properties properties = underTest.createProperties( newProps( "sonar.cluster.enabled", "true", "sonar.cluster.node.name", "my-node"), logDir); assertThat(properties.getProperty("appender.file_es.layout.pattern")).isEqualTo("%d{yyyy.MM.dd HH:mm:ss} %-5level my-node es[][%logger{1.}] %msg%n"); }
public Map<String, Parameter> getAllParams( Step stepDefinition, WorkflowSummary workflowSummary, StepRuntimeSummary runtimeSummary) { return paramsManager.generateMergedStepParams( workflowSummary, stepDefinition, getStepRuntime(stepDefinition.getType()), runtimeSummary); }
@Test public void testGetAllParams() { when(defaultParamManager.getDefaultStepParams()) .thenReturn( twoItemMap( "default1", ParamDefinition.buildParamDefinition("default1", "d1"), "default2", ParamDefinition.buildParamDefinition("default2", "d2"))); TypedStep testStep = new TypedStep(); testStep.setId("step1"); testStep.setParams( twoItemMap( "foo", ParamDefinition.buildParamDefinition("foo", "bar"), "test-param", ParamDefinition.buildParamDefinition("test-param", "hello"))); testStep.setType(StepType.NOOP); testStep.setId("step1"); Map<String, Parameter> params = runtimeManager.getAllParams(testStep, workflowSummary, runtimeSummary); assertTrue(params.size() >= 4); assertEquals("bar", params.get("foo").getValue()); assertEquals("hello", params.get("test-param").getValue()); assertEquals("d1", params.get("default1").getValue()); assertEquals("d2", params.get("default2").getValue()); }
public static byte[] bigIntegerToBytes(BigInteger b, int numBytes) { checkArgument(b.signum() >= 0, () -> "b must be positive or zero: " + b); checkArgument(numBytes > 0, () -> "numBytes must be positive: " + numBytes); byte[] src = b.toByteArray(); byte[] dest = new byte[numBytes]; boolean isFirstByteOnlyForSign = src[0] == 0; int length = isFirstByteOnlyForSign ? src.length - 1 : src.length; checkArgument(length <= numBytes, () -> "The given number does not fit in " + numBytes); int srcPos = isFirstByteOnlyForSign ? 1 : 0; int destPos = numBytes - length; System.arraycopy(src, srcPos, dest, destPos, length); return dest; }
@Test public void bigIntegerToBytes_singleByteSignDoesNotFit() { BigInteger b = BigInteger.valueOf(0b1000_0000); // 128 (2-compl does not fit in one byte) byte[] expected = new byte[]{-128}; // -128 == 1000_0000 (compl-2) byte[] actual = ByteUtils.bigIntegerToBytes(b, 1); assertArrayEquals(expected, actual); }
public void shutdown() { log.info("Stopping file watcher from watching for changes: " + file); shutdown = true; }
@Test public void shouldShutdownAsync() throws Exception { watcher = new FileWatcher(filePath, callback); watcher.start(); // When: watcher.shutdown(); // Then: assertThatEventually(watcher::isAlive, is(false)); }
public EndpointResponse streamQuery( final KsqlSecurityContext securityContext, final KsqlRequest request, final CompletableFuture<Void> connectionClosedFuture, final Optional<Boolean> isInternalRequest, final MetricsCallbackHolder metricsCallbackHolder, final Context context ) { throwIfNotConfigured(); activenessRegistrar.updateLastRequestTime(); final PreparedStatement<?> statement = parseStatement(request); CommandStoreUtil.httpWaitForCommandSequenceNumber( commandQueue, request, commandQueueCatchupTimeout); return handleStatement(securityContext, request, statement, connectionClosedFuture, isInternalRequest, metricsCallbackHolder, context); }
@Test public void shouldStreamRowsCorrectly() throws Throwable { final int NUM_ROWS = 5; final AtomicReference<Throwable> threadException = new AtomicReference<>(null); final Thread.UncaughtExceptionHandler threadExceptionHandler = (thread, exception) -> threadException.compareAndSet(null, exception); final String queryString = "SELECT * FROM test_stream;"; final SynchronousQueue<KeyValueMetadata<List<?>, GenericRow>> rowQueue = new SynchronousQueue<>(); final LinkedList<GenericRow> writtenRows = new LinkedList<>(); final Thread rowQueuePopulatorThread = new Thread(() -> { try { for (int i = 0; i != NUM_ROWS; i++) { final GenericRow value = genericRow(i); synchronized (writtenRows) { writtenRows.add(value); } rowQueue.put(new KeyValueMetadata<>(KeyValue.keyValue(null, value))); } } catch (final InterruptedException exception) { // This should happen during the test, so it's fine } }, "Row Queue Populator"); rowQueuePopulatorThread.setUncaughtExceptionHandler(threadExceptionHandler); rowQueuePopulatorThread.start(); final KafkaStreams mockKafkaStreams = mock(KafkaStreams.class); when(mockStatementParser.<Query>parseSingleStatement(queryString)) .thenReturn(query); final Map<String, Object> requestStreamsProperties = Collections.emptyMap(); final KafkaStreamsBuilder kafkaStreamsBuilder = mock(KafkaStreamsBuilder.class); when(kafkaStreamsBuilder.build(any(), any())).thenReturn(mockKafkaStreams); MutableBoolean closed = new MutableBoolean(false); when(mockKafkaStreams.close(any(java.time.Duration.class))).thenAnswer(i -> { closed.setValue(true); return true; }); when(mockKafkaStreams.state()).thenAnswer(i -> closed.getValue() ? State.NOT_RUNNING : State.RUNNING); final TransientQueryMetadata transientQueryMetadata = new TransientQueryMetadata( queryString, SOME_SCHEMA, Collections.emptySet(), "", new TestRowQueue(rowQueue), queryId, "appId", mock(Topology.class), kafkaStreamsBuilder, Collections.emptyMap(), Collections.emptyMap(), closeTimeout, 10, ResultType.STREAM, 0L, 0L, listener, loggerFactory ); transientQueryMetadata.initialize(); when(queryMetadataHolder.getPushQueryMetadata()) .thenReturn(Optional.of(transientQueryMetadata)); final EndpointResponse response = testResource.streamQuery( securityContext, new KsqlRequest(queryString, requestStreamsProperties, Collections.emptyMap(), null), new CompletableFuture<>(), Optional.empty(), new MetricsCallbackHolder(), context ); final PipedOutputStream responseOutputStream = new EOFPipedOutputStream(); final PipedInputStream responseInputStream = new PipedInputStream(responseOutputStream, 1); final StreamingOutput responseStream = (StreamingOutput) response.getEntity(); final Thread queryWriterThread = new Thread(() -> { try { responseStream.write(responseOutputStream); } catch (final EOFException exception) { // It's fine } catch (final IOException exception) { throw new RuntimeException(exception); } }, "Query Writer"); queryWriterThread.setUncaughtExceptionHandler(threadExceptionHandler); queryWriterThread.start(); final Scanner responseScanner = new Scanner(responseInputStream, "UTF-8"); final ObjectMapper objectMapper = ApiJsonMapper.INSTANCE.get(); for (int i = 0; i != NUM_ROWS; i++) { if (!responseScanner.hasNextLine()) { throw new Exception("Response input stream failed to have expected line available"); } final String responseLine = responseScanner.nextLine(); String jsonLine = StringUtils.stripStart(responseLine, "["); jsonLine = StringUtils.stripEnd(jsonLine, ","); jsonLine = StringUtils.stripEnd(jsonLine, "]"); if (jsonLine.isEmpty()) { i--; continue; } if (i == 0) { // Header: assertThat(jsonLine, is("{\"header\":{\"queryId\":\"queryId\",\"schema\":\"`f1` INTEGER\"}}")); continue; } final GenericRow expectedRow; synchronized (writtenRows) { expectedRow = writtenRows.poll(); } final DataRow testRow = objectMapper .readValue(jsonLine, StreamedRow.class) .getRow() .get(); assertThat(testRow.getColumns(), is(expectedRow.values())); } responseOutputStream.close(); queryWriterThread.join(); rowQueuePopulatorThread.interrupt(); rowQueuePopulatorThread.join(); // Definitely want to make sure that the Kafka Streams instance has been closed and cleaned up verify(mockKafkaStreams).start(); // called on init and when setting uncaught exception handler manually verify(mockKafkaStreams, times(2)).setUncaughtExceptionHandler(any(StreamsUncaughtExceptionHandler.class)); verify(mockKafkaStreams).cleanUp(); verify(mockKafkaStreams).close(Duration.ofMillis(closeTimeout)); // If one of the other threads has somehow managed to throw an exception without breaking things up until this // point, we throw that exception now in the main thread and cause the test to fail final Throwable exception = threadException.get(); if (exception != null) { throw exception; } }
public static MySQLBinaryProtocolValue getBinaryProtocolValue(final BinaryColumnType binaryColumnType) { Preconditions.checkArgument(BINARY_PROTOCOL_VALUES.containsKey(binaryColumnType), "Cannot find MySQL type '%s' in column type when process binary protocol value", binaryColumnType); return BINARY_PROTOCOL_VALUES.get(binaryColumnType); }
@Test void assertGetBinaryProtocolValueWithMySQLTypeTime() { assertThat(MySQLBinaryProtocolValueFactory.getBinaryProtocolValue(MySQLBinaryColumnType.TIME), instanceOf(MySQLTimeBinaryProtocolValue.class)); }
public synchronized void reloadRules() { Collection<ShardingSphereRule> toBeReloadedRules = ruleMetaData.getRules().stream() .filter(each -> each.getAttributes().findAttribute(MutableDataNodeRuleAttribute.class).isPresent()).collect(Collectors.toList()); RuleConfiguration ruleConfig = toBeReloadedRules.stream().map(ShardingSphereRule::getConfiguration).findFirst().orElse(null); Collection<ShardingSphereRule> rules = new LinkedList<>(ruleMetaData.getRules()); toBeReloadedRules.stream().findFirst().ifPresent(optional -> { rules.removeAll(toBeReloadedRules); Map<String, DataSource> dataSources = resourceMetaData.getStorageUnits().entrySet().stream() .collect(Collectors.toMap(Entry::getKey, entry -> entry.getValue().getDataSource(), (oldValue, currentValue) -> oldValue, LinkedHashMap::new)); rules.add(optional.getAttributes().getAttribute(MutableDataNodeRuleAttribute.class).reloadRule(ruleConfig, name, dataSources, rules)); }); ruleMetaData.getRules().clear(); ruleMetaData.getRules().addAll(rules); }
@Test void assertReloadRules() { Collection<ShardingSphereRule> rules = new LinkedList<>(); ShardingSphereRule rule0 = mock(ShardingSphereRule.class); when(rule0.getConfiguration()).thenReturn(mock(RuleConfiguration.class)); when(rule0.getAttributes()).thenReturn(new RuleAttributes(mock(MutableDataNodeRuleAttribute.class))); rules.add(rule0); ShardingSphereRule rule1 = mock(ShardingSphereRule.class); when(rule1.getConfiguration()).thenReturn(mock(RuleConfiguration.class)); when(rule1.getAttributes()).thenReturn(new RuleAttributes()); rules.add(rule1); RuleMetaData ruleMetaData = new RuleMetaData(rules); ResourceMetaData resourceMetaData = new ResourceMetaData(Collections.singletonMap("ds", new MockedDataSource())); ShardingSphereDatabase database = new ShardingSphereDatabase("foo_db", mock(DatabaseType.class), resourceMetaData, ruleMetaData, Collections.emptyMap()); database.reloadRules(); assertThat(database.getRuleMetaData().getRules().size(), is(2)); }
@LiteralParameters("x") @ScalarOperator(NOT_EQUAL) @SqlType(StandardTypes.BOOLEAN) @SqlNullable public static Boolean notEqual(@SqlType("char(x)") Slice left, @SqlType("char(x)") Slice right) { return !left.equals(right); }
@Test public void testNotEqual() { assertFunction("cast('foo' as char(3)) <> cast('foo' as char(5))", BOOLEAN, false); assertFunction("cast('foo' as char(3)) <> cast('foo' as char(3))", BOOLEAN, false); assertFunction("cast('foo' as char(3)) <> cast('bar' as char(3))", BOOLEAN, true); assertFunction("cast('bar' as char(3)) <> cast('foo' as char(3))", BOOLEAN, true); assertFunction("cast('bar' as char(5)) <> 'bar'", BOOLEAN, false); assertFunction("cast('bar' as char(5)) <> 'bar '", BOOLEAN, false); assertFunction("cast('a' as char(2)) <> cast('a ' as char(2))", BOOLEAN, false); assertFunction("cast('a ' as char(2)) <> cast('a' as char(2))", BOOLEAN, false); assertFunction("cast('a' as char(3)) <> cast('a' as char(2))", BOOLEAN, false); assertFunction("cast('' as char(3)) <> cast('' as char(2))", BOOLEAN, false); assertFunction("cast('' as char(2)) <> cast('' as char(2))", BOOLEAN, false); }
public final int getAndIncrement() { return atom.getAndIncrement() & MASK; }
@Test public void testGetAndIncrement() { PositiveAtomicCounter counter = new PositiveAtomicCounter(); assertThat(counter.getAndIncrement()).isEqualTo(0); }
public static org.apache.avro.Schema toAvroSchema( Schema beamSchema, @Nullable String name, @Nullable String namespace) { final String schemaName = Strings.isNullOrEmpty(name) ? "topLevelRecord" : name; final String schemaNamespace = namespace == null ? "" : namespace; String childNamespace = !"".equals(schemaNamespace) ? schemaNamespace + "." + schemaName : schemaName; List<org.apache.avro.Schema.Field> fields = Lists.newArrayList(); for (Field field : beamSchema.getFields()) { org.apache.avro.Schema.Field recordField = toAvroField(field, childNamespace); fields.add(recordField); } return org.apache.avro.Schema.createRecord(schemaName, null, schemaNamespace, false, fields); }
@Test public void testFromBeamSchema() { Schema beamSchema = getBeamSchema(); org.apache.avro.Schema avroSchema = AvroUtils.toAvroSchema(beamSchema); assertEquals(getAvroSchema(), avroSchema); }
public static String toJson(MetadataUpdate metadataUpdate) { return toJson(metadataUpdate, false); }
@Test public void testAssignUUIDFromJson() { String uuid = "9510c070-5e6d-4b40-bf40-a8915bb76e5d"; String expected = "{\"action\":\"assign-uuid\",\"uuid\":\"9510c070-5e6d-4b40-bf40-a8915bb76e5d\"}"; MetadataUpdate actual = new MetadataUpdate.AssignUUID(uuid); assertThat(MetadataUpdateParser.toJson(actual)) .as("Assign UUID should convert to the correct JSON value") .isEqualTo(expected); }
@Override public MapperResult findConfigInfo4PageFetchRows(MapperContext context) { final String appName = (String) context.getWhereParameter(FieldConstant.APP_NAME); final String dataId = (String) context.getWhereParameter(FieldConstant.DATA_ID); final String group = (String) context.getWhereParameter(FieldConstant.GROUP_ID); final String content = (String) context.getWhereParameter(FieldConstant.CONTENT); final String tenantId = (String) context.getWhereParameter(FieldConstant.TENANT_ID); final String[] tagArr = (String[]) context.getWhereParameter(FieldConstant.TAG_ARR); List<Object> paramList = new ArrayList<>(); StringBuilder where = new StringBuilder(" WHERE "); final String baseSql = "SELECT a.id,a.data_id,a.group_id,a.tenant_id,a.app_name,a.content FROM config_info a LEFT JOIN " + "config_tags_relation b ON a.id=b.id"; where.append(" a.tenant_id=? "); paramList.add(tenantId); if (StringUtils.isNotBlank(dataId)) { where.append(" AND a.data_id=? "); paramList.add(dataId); } if (StringUtils.isNotBlank(group)) { where.append(" AND a.group_id=? "); paramList.add(group); } if (StringUtils.isNotBlank(appName)) { where.append(" AND a.app_name=? "); paramList.add(appName); } if (!StringUtils.isBlank(content)) { where.append(" AND a.content LIKE ? "); paramList.add(content); } where.append(" AND b.tag_name IN ("); for (int i = 0; i < tagArr.length; i++) { if (i != 0) { where.append(", "); } where.append('?'); paramList.add(tagArr[i]); } where.append(") "); String sql = baseSql + where + " OFFSET " + context.getStartRow() + " ROWS FETCH NEXT " + context.getPageSize() + " ROWS ONLY"; return new MapperResult(sql, paramList); }
@Test void testFindConfigInfo4PageFetchRows() { MapperResult mapperResult = configInfoTagsRelationMapperByDerby.findConfigInfo4PageFetchRows(context); assertEquals(mapperResult.getSql(), "SELECT a.id,a.data_id,a.group_id,a.tenant_id,a.app_name,a.content FROM config_info a LEFT JOIN " + "config_tags_relation b ON a.id=b.id WHERE a.tenant_id=? AND b.tag_name IN (?, ?, ?, ?, ?) " + "OFFSET 0 ROWS FETCH NEXT 5 ROWS ONLY"); List<Object> list = CollectionUtils.list(tenantId); list.addAll(Arrays.asList(tagArr)); assertArrayEquals(mapperResult.getParamList().toArray(), list.toArray()); }
@Override public List<UsbSerialPort> getPorts() { return mPorts; }
@Test public void standardDevice() throws Exception { UsbDeviceConnection usbDeviceConnection = mock(UsbDeviceConnection.class); UsbDevice usbDevice = mock(UsbDevice.class); UsbInterface controlInterface = mock(UsbInterface.class); UsbInterface dataInterface = mock(UsbInterface.class); UsbEndpoint controlEndpoint = mock(UsbEndpoint.class); UsbEndpoint readEndpoint = mock(UsbEndpoint.class); UsbEndpoint writeEndpoint = mock(UsbEndpoint.class); /* * digispark - no IAD * UsbInterface[mId=0,mAlternateSetting=0,mName=null,mClass=2,mSubclass=2,mProtocol=1,mEndpoints=[ * UsbEndpoint[mAddress=131,mAttributes=3,mMaxPacketSize=8,mInterval=255]] * UsbInterface[mId=1,mAlternateSetting=0,mName=null,mClass=10,mSubclass=0,mProtocol=0,mEndpoints=[ * UsbEndpoint[mAddress=1,mAttributes=2,mMaxPacketSize=8,mInterval=0] * UsbEndpoint[mAddress=129,mAttributes=2,mMaxPacketSize=8,mInterval=0]] */ when(usbDeviceConnection.getRawDescriptors()).thenReturn(HexDump.hexStringToByteArray( "12 01 10 01 02 00 00 08 D0 16 7E 08 00 01 01 02 00 01\n" + "09 02 43 00 02 01 00 80 32\n" + "09 04 00 00 01 02 02 01 00\n" + "05 24 00 10 01\n" + "04 24 02 02\n" + "05 24 06 00 01\n" + "05 24 01 03 01\n" + "07 05 83 03 08 00 FF\n" + "09 04 01 00 02 0A 00 00 00\n" + "07 05 01 02 08 00 00\n" + "07 05 81 02 08 00 00")); when(usbDeviceConnection.claimInterface(controlInterface,true)).thenReturn(true); when(usbDeviceConnection.claimInterface(dataInterface,true)).thenReturn(true); when(usbDevice.getInterfaceCount()).thenReturn(2); when(usbDevice.getInterface(0)).thenReturn(controlInterface); when(usbDevice.getInterface(1)).thenReturn(dataInterface); when(controlInterface.getId()).thenReturn(0); when(controlInterface.getInterfaceClass()).thenReturn(UsbConstants.USB_CLASS_COMM); when(controlInterface.getInterfaceSubclass()).thenReturn(USB_SUBCLASS_ACM); when(controlInterface.getEndpointCount()).thenReturn(1); when(controlInterface.getEndpoint(0)).thenReturn(controlEndpoint); when(dataInterface.getId()).thenReturn(1); when(dataInterface.getInterfaceClass()).thenReturn(UsbConstants.USB_CLASS_CDC_DATA); when(dataInterface.getEndpointCount()).thenReturn(2); when(dataInterface.getEndpoint(0)).thenReturn(writeEndpoint); when(dataInterface.getEndpoint(1)).thenReturn(readEndpoint); when(controlEndpoint.getDirection()).thenReturn(UsbConstants.USB_DIR_IN); when(controlEndpoint.getType()).thenReturn(UsbConstants.USB_ENDPOINT_XFER_INT); when(readEndpoint.getDirection()).thenReturn(UsbConstants.USB_DIR_IN); when(readEndpoint.getType()).thenReturn(UsbConstants.USB_ENDPOINT_XFER_BULK); when(writeEndpoint.getDirection()).thenReturn(UsbConstants.USB_DIR_OUT); when(writeEndpoint.getType()).thenReturn(UsbConstants.USB_ENDPOINT_XFER_BULK); CdcAcmSerialDriver driver = new CdcAcmSerialDriver(usbDevice); CdcAcmSerialDriver.CdcAcmSerialPort port = (CdcAcmSerialDriver.CdcAcmSerialPort) driver.getPorts().get(0); port.mConnection = usbDeviceConnection; port.openInt(); assertEquals(readEndpoint, port.mReadEndpoint); assertEquals(writeEndpoint, port.mWriteEndpoint); ProbeTable probeTable = UsbSerialProber.getDefaultProbeTable(); Class<? extends UsbSerialDriver> probeDriver = probeTable.findDriver(usbDevice); assertEquals(driver.getClass(), probeDriver); }
@Override public PageResult<NotifyTemplateDO> getNotifyTemplatePage(NotifyTemplatePageReqVO pageReqVO) { return notifyTemplateMapper.selectPage(pageReqVO); }
@Test public void testGetNotifyTemplatePage() { // mock 数据 NotifyTemplateDO dbNotifyTemplate = randomPojo(NotifyTemplateDO.class, o -> { // 等会查询到 o.setName("芋头"); o.setCode("test_01"); o.setStatus(CommonStatusEnum.ENABLE.getStatus()); o.setCreateTime(buildTime(2022, 2, 3)); }); notifyTemplateMapper.insert(dbNotifyTemplate); // 测试 name 不匹配 notifyTemplateMapper.insert(cloneIgnoreId(dbNotifyTemplate, o -> o.setName("投"))); // 测试 code 不匹配 notifyTemplateMapper.insert(cloneIgnoreId(dbNotifyTemplate, o -> o.setCode("test_02"))); // 测试 status 不匹配 notifyTemplateMapper.insert(cloneIgnoreId(dbNotifyTemplate, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()))); // 测试 createTime 不匹配 notifyTemplateMapper.insert(cloneIgnoreId(dbNotifyTemplate, o -> o.setCreateTime(buildTime(2022, 1, 5)))); // 准备参数 NotifyTemplatePageReqVO reqVO = new NotifyTemplatePageReqVO(); reqVO.setName("芋"); reqVO.setCode("est_01"); reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus()); reqVO.setCreateTime(buildBetweenTime(2022, 2, 1, 2022, 2, 5)); // 调用 PageResult<NotifyTemplateDO> pageResult = notifyTemplateService.getNotifyTemplatePage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbNotifyTemplate, pageResult.getList().get(0)); }
@Override public void validate() throws TelegramApiValidationException { if (inlineQueryId.isEmpty()) { throw new TelegramApiValidationException("InlineQueryId can't be empty", this); } for (InlineQueryResult result : results) { result.validate(); } if (button != null) { button.validate(); } }
@Test void testSwitchPmParameterOnlyContainsAcceptedCharacters() { answerInlineQuery.setInlineQueryId("RANDOMEID"); answerInlineQuery.setResults(new ArrayList<>()); answerInlineQuery.setButton(InlineQueryResultsButton .builder() .text("Test Text") .startParameter("*") .build()); try { answerInlineQuery.validate(); } catch (TelegramApiValidationException e) { assertEquals("SwitchPmParameter only allows A-Z, a-z, 0-9, _ and - characters", e.getMessage()); } }
public double bearingTo(final IGeoPoint other) { final double lat1 = Math.toRadians(this.mLatitude); final double long1 = Math.toRadians(this.mLongitude); final double lat2 = Math.toRadians(other.getLatitude()); final double long2 = Math.toRadians(other.getLongitude()); final double delta_long = long2 - long1; final double a = Math.sin(delta_long) * Math.cos(lat2); final double b = Math.cos(lat1) * Math.sin(lat2) - Math.sin(lat1) * Math.cos(lat2) * Math.cos(delta_long); final double bearing = Math.toDegrees(Math.atan2(a, b)); final double bearing_normalized = (bearing + 360) % 360; return bearing_normalized; }
@Test public void test_bearingTo_north_west() { final GeoPoint target = new GeoPoint(0.0, 0.0); final GeoPoint other = new GeoPoint(-10.0, -10.0); assertEquals("north west", 180 + 45, Math.round(target.bearingTo(other))); }
static ClientSettings createApiConnectionSettings(Configuration conf) { Duration apiCallTimeout = getDuration(conf, REQUEST_TIMEOUT, DEFAULT_REQUEST_TIMEOUT_DURATION, TimeUnit.MILLISECONDS, Duration.ZERO); // if the API call timeout is set, it must be at least the minimum duration if (apiCallTimeout.compareTo(Duration.ZERO) > 0) { apiCallTimeout = enforceMinimumDuration(REQUEST_TIMEOUT, apiCallTimeout, minimumOperationDuration); } return new ClientSettings(apiCallTimeout); }
@Test public void testCreateApiConnectionSettings() { final Configuration conf = conf(); conf.set(REQUEST_TIMEOUT, "1h"); final AWSClientConfig.ClientSettings settings = createApiConnectionSettings(conf); Assertions.assertThat(settings.getApiCallTimeout()) .describedAs("%s in %s", REQUEST_TIMEOUT, settings) .isEqualTo(Duration.ofHours(1)); }
@Override public ProjectRepositories load(String projectKey, @Nullable String branchBase) { GetRequest request = new GetRequest(getUrl(projectKey, branchBase)); try (WsResponse response = wsClient.call(request)) { try (InputStream is = response.contentStream()) { return processStream(is); } catch (IOException e) { throw new IllegalStateException("Couldn't load project repository for " + projectKey, e); } } catch (RuntimeException e) { if (shouldThrow(e)) { throw e; } LOG.debug("Project repository not available - continuing without it"); return new SingleProjectRepository(); } }
@Test(expected = IllegalStateException.class) public void parsingError() throws IOException { InputStream is = mock(InputStream.class); when(is.read(any(byte[].class), anyInt(), anyInt())).thenThrow(IOException.class); WsTestUtil.mockStream(wsClient, "/batch/project.protobuf?key=foo%3F", is); loader.load(PROJECT_KEY, null); }
@Override public Iterator<IndexKeyEntries> getSqlRecordIteratorBatch(@Nonnull Comparable value, boolean descending) { return getSqlRecordIteratorBatch(value, descending, null); }
@Test(expected = IllegalArgumentException.class) public void getSqlRecordIteratorBatchCursorLeftExcludedRightExcluded() { store.getSqlRecordIteratorBatch(0, false, 1, false, true, buildCursor(0)); }
@Override public VersionedRecord<V> get(final K key) { final ValueAndTimestamp<V> valueAndTimestamp = internal.get(key); return valueAndTimestamp == null ? null : new VersionedRecord<>(valueAndTimestamp.value(), valueAndTimestamp.timestamp()); }
@Test public void shouldThrowNullPointerOnGetWithTimestampIfKeyIsNull() { assertThrows(NullPointerException.class, () -> store.get(null, TIMESTAMP)); }
@Override public void delete(K key) { map.delete(key); }
@Test public void testDelete() { map.put(23, "value-23"); assertTrue(map.containsKey(23)); adapter.delete(23); assertFalse(map.containsKey(23)); }
@Override public DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, DescribeLogDirsOptions options) { final Map<Integer, KafkaFutureImpl<Map<String, LogDirDescription>>> futures = new HashMap<>(brokers.size()); final long now = time.milliseconds(); for (final Integer brokerId : brokers) { KafkaFutureImpl<Map<String, LogDirDescription>> future = new KafkaFutureImpl<>(); futures.put(brokerId, future); runnable.call(new Call("describeLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) { @Override public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) { // Query selected partitions in all log directories return new DescribeLogDirsRequest.Builder(new DescribeLogDirsRequestData().setTopics(null)); } @Override public void handleResponse(AbstractResponse abstractResponse) { DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse; Map<String, LogDirDescription> descriptions = logDirDescriptions(response); if (!descriptions.isEmpty()) { future.complete(descriptions); } else { // Up to v3 DescribeLogDirsResponse did not have an error code field, hence it defaults to None Errors error = response.data().errorCode() == Errors.NONE.code() ? Errors.CLUSTER_AUTHORIZATION_FAILED : Errors.forCode(response.data().errorCode()); future.completeExceptionally(error.exception()); } } @Override void handleFailure(Throwable throwable) { future.completeExceptionally(throwable); } }, now); } return new DescribeLogDirsResult(new HashMap<>(futures)); }
@Test public void testDescribeLogDirsWithVolumeBytes() throws ExecutionException, InterruptedException { Set<Integer> brokers = singleton(0); String logDir = "/var/data/kafka"; TopicPartition tp = new TopicPartition("topic", 12); long partitionSize = 1234567890; long offsetLag = 24; long totalBytes = 123L; long usableBytes = 456L; try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponseFrom( prepareDescribeLogDirsResponse(Errors.NONE, logDir, tp, partitionSize, offsetLag, totalBytes, usableBytes), env.cluster().nodeById(0)); DescribeLogDirsResult result = env.adminClient().describeLogDirs(brokers); Map<Integer, KafkaFuture<Map<String, LogDirDescription>>> descriptions = result.descriptions(); assertEquals(brokers, descriptions.keySet()); assertNotNull(descriptions.get(0)); assertDescriptionContains(descriptions.get(0).get(), logDir, tp, partitionSize, offsetLag, OptionalLong.of(totalBytes), OptionalLong.of(usableBytes)); Map<Integer, Map<String, LogDirDescription>> allDescriptions = result.allDescriptions().get(); assertEquals(brokers, allDescriptions.keySet()); assertDescriptionContains(allDescriptions.get(0), logDir, tp, partitionSize, offsetLag, OptionalLong.of(totalBytes), OptionalLong.of(usableBytes)); // Empty results when not authorized with version < 3 env.kafkaClient().prepareResponseFrom( prepareEmptyDescribeLogDirsResponse(Optional.empty()), env.cluster().nodeById(0)); final DescribeLogDirsResult errorResult = env.adminClient().describeLogDirs(brokers); ExecutionException exception = assertThrows(ExecutionException.class, () -> errorResult.allDescriptions().get()); assertInstanceOf(ClusterAuthorizationException.class, exception.getCause()); // Empty results with an error with version >= 3 env.kafkaClient().prepareResponseFrom( prepareEmptyDescribeLogDirsResponse(Optional.of(Errors.UNKNOWN_SERVER_ERROR)), env.cluster().nodeById(0)); final DescribeLogDirsResult errorResult2 = env.adminClient().describeLogDirs(brokers); exception = assertThrows(ExecutionException.class, () -> errorResult2.allDescriptions().get()); assertInstanceOf(UnknownServerException.class, exception.getCause()); } }
@Override public String create(UserDto user) { UserDto userDto = requireNonNull(user, "User cannot be null"); return hash(requireNonNull(emptyToNull(userDto.getEmail()), "Email cannot be null")); }
@Test public void fail_when_email_is_empty() { assertThatThrownBy(() -> underTest.create(UserTesting.newUserDto("john", "John", ""))) .isInstanceOf(NullPointerException.class) .hasMessage("Email cannot be null"); }
public static <InputT, OutputT> MapElements<InputT, OutputT> via( final InferableFunction<InputT, OutputT> fn) { return new MapElements<>(fn, fn.getInputTypeDescriptor(), fn.getOutputTypeDescriptor()); }
@Test @Category(NeedsRunner.class) public void testInferableFunctionOutputTypeDescriptor() throws Exception { PCollection<String> output = pipeline .apply(Create.of("hello")) .apply( MapElements.via( new InferableFunction<String, String>() { @Override public String apply(String input) throws Exception { return input; } })); assertThat( output.getTypeDescriptor(), equalTo((TypeDescriptor<String>) new TypeDescriptor<String>() {})); assertThat( pipeline.getCoderRegistry().getCoder(output.getTypeDescriptor()), equalTo(pipeline.getCoderRegistry().getCoder(new TypeDescriptor<String>() {}))); // Make sure the pipeline runs too pipeline.run(); }
public static GenericRecord convertToAvro(Schema schema, Message message) { return AvroSupport.convert(schema, message); }
@Test public void noFieldsSet_wellKnownTypesAndTimestampsAsRecords() throws IOException { Sample sample = Sample.newBuilder().build(); Schema.Parser parser = new Schema.Parser(); Schema convertedSchema = parser.parse(getClass().getClassLoader().getResourceAsStream("schema-provider/proto/sample_schema_wrapped_and_timestamp_as_record.avsc")); GenericRecord actual = serializeAndDeserializeAvro(ProtoConversionUtil.convertToAvro(convertedSchema, sample), convertedSchema); Assertions.assertEquals(createDefaultOutput(convertedSchema), actual); }
public String getDefaultSchemaName(final String databaseName) { return dialectDatabaseMetaData.getDefaultSchema().orElseGet(() -> null == databaseName ? null : databaseName.toLowerCase()); }
@Test void assertGetDefaultSchemaNameWhenDatabaseTypeContainsDefaultSchema() { assertThat(new DatabaseTypeRegistry(TypedSPILoader.getService(DatabaseType.class, "TRUNK")).getDefaultSchemaName("FOO"), is("test")); }
@Override public int getLength() { return precomputed.length; }
@Test void testSingleFieldSerializer() { TEST_FIELD_SERIALIZERS.forEach( t -> { @SuppressWarnings("unchecked") TypeSerializer<Object>[] fieldSerializers = new TypeSerializer[] {t.f0}; List<Object>[] instances = Arrays.stream(t.f1) .map(Arrays::asList) .toArray((IntFunction<List<Object>[]>) List[]::new); runTests(t.f0.getLength(), fieldSerializers, instances); }); }
@Override public boolean equals(Object o) { return this == o || o != null && o.getClass() == WatermarkStatus.class && ((WatermarkStatus) o).status == this.status; }
@Test void testEquals() { WatermarkStatus idleStatus = new WatermarkStatus(WatermarkStatus.IDLE_STATUS); WatermarkStatus activeStatus = new WatermarkStatus(WatermarkStatus.ACTIVE_STATUS); assertThat(idleStatus).isEqualTo(WatermarkStatus.IDLE); assertThat(idleStatus.isIdle()).isTrue(); assertThat(idleStatus.isActive()).isFalse(); assertThat(activeStatus).isEqualTo(WatermarkStatus.ACTIVE); assertThat(activeStatus.isActive()).isTrue(); assertThat(activeStatus.isIdle()).isFalse(); }
@Override public boolean skip(final ServerWebExchange exchange) { return skipExcept(exchange, RpcTypeEnum.SOFA); }
@Test public void testSkip() { final ServerWebExchange exchange = MockServerWebExchange.from(MockServerHttpRequest.get("localhost").build()); ShenyuContext context = mock(ShenyuContext.class); when(context.getRpcType()).thenReturn(RpcTypeEnum.SOFA.getName()); exchange.getAttributes().put(Constants.CONTEXT, context); exchange.getAttributes().put(Constants.META_DATA, metaData); final boolean result = sofaPlugin.skip(exchange); assertFalse(result); }
public static FuryBuilder builder() { return new FuryBuilder(); }
@Test(dataProvider = "languageConfig") public void testSerializationToBuffer(Language language) { Fury fury1 = Fury.builder().withLanguage(language).requireClassRegistration(false).build(); Fury fury2 = Fury.builder().withLanguage(language).requireClassRegistration(false).build(); MemoryBuffer buffer = MemoryUtils.buffer(64); assertSerializationToBuffer(fury1, fury2, buffer); }
public boolean createTable(CreateTableStmt stmt, List<Column> partitionColumns) throws DdlException { String dbName = stmt.getDbName(); String tableName = stmt.getTableName(); Map<String, String> properties = stmt.getProperties() != null ? stmt.getProperties() : new HashMap<>(); Path tablePath = null; boolean tableLocationExists = false; if (!stmt.isExternal()) { checkLocationProperties(properties); if (!Strings.isNullOrEmpty(properties.get(LOCATION_PROPERTY))) { String tableLocationWithUserAssign = properties.get(LOCATION_PROPERTY); tablePath = new Path(tableLocationWithUserAssign); if (pathExists(tablePath, hadoopConf)) { tableLocationExists = true; if (!isEmpty(tablePath, hadoopConf)) { throw new StarRocksConnectorException("not support creating table under non-empty directory: %s", tableLocationWithUserAssign); } } } else { tablePath = getDefaultLocation(dbName, tableName); } } else { // checkExternalLocationProperties(properties); if (properties.containsKey(EXTERNAL_LOCATION_PROPERTY)) { tablePath = new Path(properties.get(EXTERNAL_LOCATION_PROPERTY)); } else if (properties.containsKey(LOCATION_PROPERTY)) { tablePath = new Path(properties.get(LOCATION_PROPERTY)); } tableLocationExists = true; } HiveStorageFormat.check(properties); List<String> partitionColNames; if (partitionColumns.isEmpty()) { partitionColNames = stmt.getPartitionDesc() != null ? ((ListPartitionDesc) stmt.getPartitionDesc()).getPartitionColNames() : new ArrayList<>(); } else { partitionColNames = partitionColumns.stream().map(Column::getName).collect(Collectors.toList()); } // default is managed table HiveTable.HiveTableType tableType = HiveTable.HiveTableType.MANAGED_TABLE; if (stmt.isExternal()) { tableType = HiveTable.HiveTableType.EXTERNAL_TABLE; } HiveTable.Builder builder = HiveTable.builder() .setId(ConnectorTableId.CONNECTOR_ID_GENERATOR.getNextId().asInt()) .setTableName(tableName) .setCatalogName(catalogName) .setResourceName(toResourceName(catalogName, "hive")) .setHiveDbName(dbName) .setHiveTableName(tableName) .setPartitionColumnNames(partitionColNames) .setDataColumnNames(stmt.getColumns().stream() .map(Column::getName) .collect(Collectors.toList()).subList(0, stmt.getColumns().size() - partitionColNames.size())) .setFullSchema(stmt.getColumns()) .setTableLocation(tablePath == null ? null : tablePath.toString()) .setProperties(stmt.getProperties()) .setStorageFormat(HiveStorageFormat.get(properties.getOrDefault(FILE_FORMAT, "parquet"))) .setCreateTime(System.currentTimeMillis()) .setHiveTableType(tableType); Table table = builder.build(); try { if (!tableLocationExists) { createDirectory(tablePath, hadoopConf); } metastore.createTable(dbName, table); } catch (Exception e) { LOG.error("Failed to create table {}.{}", dbName, tableName); boolean shouldDelete; try { if (tableExists(dbName, tableName)) { LOG.warn("Table {}.{} already exists. But some error occur such as accessing meta service timeout", dbName, table, e); return true; } FileSystem fileSystem = FileSystem.get(URI.create(tablePath.toString()), hadoopConf); shouldDelete = !fileSystem.listLocatedStatus(tablePath).hasNext() && !tableLocationExists; if (shouldDelete) { fileSystem.delete(tablePath); } } catch (Exception e1) { LOG.error("Failed to delete table location {}", tablePath, e); } throw new DdlException(String.format("Failed to create table %s.%s. msg: %s", dbName, tableName, e.getMessage())); } return true; }
@Test public void testCreateTableForExternalWithoutLocation() throws DdlException { new MockUp<HiveWriteUtils>() { @Mock public void createDirectory(Path path, Configuration conf) { } }; HiveMetastoreOperations mockedHmsOps = new HiveMetastoreOperations(cachingHiveMetastore, true, new Configuration(), MetastoreType.HMS, "hive_catalog") { @Override public Path getDefaultLocation(String dbName, String tableName) { return new Path("mytable_locatino"); } }; Map<String, String> properties = Maps.newHashMap(); CreateTableStmt stmt = new CreateTableStmt( false, true, new TableName("hive_catalog", "hive_db", "hive_table"), Lists.newArrayList( new ColumnDef("c1", TypeDef.create(PrimitiveType.INT)), new ColumnDef("p1", TypeDef.create(PrimitiveType.INT))), "hive", null, new ListPartitionDesc(Lists.newArrayList("p1"), new ArrayList<>()), null, properties, new HashMap<>(), "my table comment"); List<Column> columns = stmt.getColumnDefs().stream().map(def -> def.toColumn(null)).collect(Collectors.toList()); stmt.setColumns(columns); Assert.assertTrue(mockedHmsOps.createTable(stmt)); }
public Credentials pair(final Host bookmark, final ConnectionCallback alert, final LoginCallback prompt, final CancelCallback cancel, final String title, final String message) throws BackgroundException { return this.pair(bookmark, alert, prompt, cancel, title, message, BrowserLauncherFactory.get()); }
@Test(expected = ConnectionCanceledException.class) public void testLoginInterrupt() throws Exception { final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new BrickProtocol()))); final Profile profile = new ProfilePlistReader(factory).read( this.getClass().getResourceAsStream("/Brick.cyberduckprofile")); final Host host = new Host(profile, "mountainduck.files.com") { @Override public String getProperty(final String key) { switch(key) { case "brick.pairing.interval.ms": return String.valueOf(100L); case "brick.pairing.interrupt.ms": return String.valueOf(1000L); } return super.getProperty(key); } }; final BrickSession session = new BrickSession(host, new DefaultX509TrustManager(), new DefaultX509KeyManager()); session.pair(host, new DisabledConnectionCallback(), new DisabledLoginCallback(), new DisabledCancelCallback(), "t", "m", new BrowserLauncher() { @Override public boolean open(final String url) { return true; } }); }
public RewriteCommand(Logger console) { super(console); }
@Test public void testRewriteCommand() throws IOException { File file = parquetFile(); RewriteCommand command = new RewriteCommand(createLogger()); command.inputs = Arrays.asList(file.getAbsolutePath()); File output = new File(getTempFolder(), "converted.parquet"); command.output = output.getAbsolutePath(); command.setConf(new Configuration()); Assert.assertEquals(0, command.run()); Assert.assertTrue(output.exists()); }
@Nullable @Override public GenericRow decode(byte[] payload, GenericRow destination) { try { destination = (GenericRow) _decodeMethod.invoke(null, payload, destination); } catch (Exception e) { throw new RuntimeException(e); } return destination; }
@Test(dataProvider = "unsetCases") public void whenUnset(String fieldName, Object pinotVal) throws Exception { Descriptors.FieldDescriptor fd = ComplexTypes.TestMessage.getDescriptor().findFieldByName(fieldName); ComplexTypes.TestMessage.Builder messageBuilder = ComplexTypes.TestMessage.newBuilder(); GenericRow row = new GenericRow(); ProtoBufCodeGenMessageDecoder messageDecoder = setupDecoder("complex_types.jar", "org.apache.pinot.plugin.inputformat.protobuf.ComplexTypes$TestMessage", getAllSourceFieldsForComplexType()); messageDecoder.decode(messageBuilder.build().toByteArray(), row); Assert.assertEquals(row.getValue(fd.getName()), pinotVal); }
public T wiretap(boolean enable) { if (enable) { T dup = duplicate(); dup.configuration().loggingHandler = configuration().defaultLoggingHandler(); return dup; } else if (configuration().loggingHandler != null) { T dup = duplicate(); dup.configuration().loggingHandler = null; return dup; } else { @SuppressWarnings("unchecked") T dup = (T) this; return dup; } }
@Test void testWiretap() { TestTransportConfig config = new TestTransportConfig(Collections.emptyMap()); TestTransport transport = new TestTransport(config); doTestWiretap(transport.wiretap(true), LogLevel.DEBUG, ByteBufFormat.HEX_DUMP); doTestWiretap(transport.wiretap("category"), LogLevel.DEBUG, ByteBufFormat.HEX_DUMP); doTestWiretap(transport.wiretap("category", LogLevel.DEBUG), LogLevel.DEBUG, ByteBufFormat.HEX_DUMP); doTestWiretap(transport.wiretap("category", LogLevel.INFO), LogLevel.INFO, ByteBufFormat.HEX_DUMP); doTestWiretap( transport.wiretap("category", LogLevel.INFO, AdvancedByteBufFormat.HEX_DUMP), LogLevel.INFO, ByteBufFormat.HEX_DUMP); doTestWiretap( transport.wiretap("category", LogLevel.DEBUG, AdvancedByteBufFormat.SIMPLE), LogLevel.DEBUG, ByteBufFormat.SIMPLE); doTestWiretapForTextualLogger( transport.wiretap("category", LogLevel.DEBUG, AdvancedByteBufFormat.TEXTUAL), LogLevel.DEBUG); doTestWiretapForTextualLogger( transport.wiretap("category", LogLevel.DEBUG, AdvancedByteBufFormat.TEXTUAL, Charset.defaultCharset()), LogLevel.DEBUG); }
public static EvictionConfig newEvictionConfig(Integer maxSize, MaxSizePolicy maxSizePolicy, EvictionPolicy evictionPolicy, boolean isNearCache, boolean isIMap, String comparatorClassName, EvictionPolicyComparator<?, ?, ?> comparator) { int finalSize = maxSize(maxSize, isIMap); MaxSizePolicy finalMaxSizePolicy = maxSizePolicy(maxSizePolicy, isIMap); EvictionPolicy finalEvictionPolicy = evictionPolicy(evictionPolicy, isIMap); try { doEvictionConfigChecks(finalMaxSizePolicy, finalEvictionPolicy, comparatorClassName, comparator, isIMap, isNearCache); } catch (IllegalArgumentException e) { throw new InvalidConfigurationException(e.getMessage()); } EvictionConfig evictionConfig = new EvictionConfig() .setSize(finalSize) .setMaxSizePolicy(finalMaxSizePolicy) .setEvictionPolicy(finalEvictionPolicy); if (comparatorClassName != null) { evictionConfig.setComparatorClassName(comparatorClassName); } if (comparator != null) { evictionConfig.setComparator(comparator); } return evictionConfig; }
@Test public void should_create_eviction_config_with_comparator_class_name() { EvictionConfig evictionConfig = ConfigFactory.newEvictionConfig(42, MaxSizePolicy.PER_NODE, EvictionPolicy.LRU, false, false, SOME_COMPARATOR_CLASS_NAME, null); assertThat(evictionConfig.getComparatorClassName()).isEqualTo(SOME_COMPARATOR_CLASS_NAME); }
public static String decode(String url) throws UtilException { return decode(url, CharsetUtil.UTF_8); }
@Test public void encodeTest() { String body = "366466 - 副本.jpg"; String encode = URLUtil.encode(body); assertEquals("366466%20-%20%E5%89%AF%E6%9C%AC.jpg", encode); assertEquals(body, URLUtil.decode(encode)); String encode2 = URLUtil.encodeQuery(body); assertEquals("366466%20-%20%E5%89%AF%E6%9C%AC.jpg", encode2); }
@Override protected void processRecord(RowData row) { synchronized (resultLock) { boolean isInsertOp = row.getRowKind() == RowKind.INSERT || row.getRowKind() == RowKind.UPDATE_AFTER; // Always set the RowKind to INSERT, so that we can compare rows correctly (RowKind will // be ignored), row.setRowKind(RowKind.INSERT); // insert if (isInsertOp) { processInsert(row); } // delete else { processDelete(row); } } }
@Test void testSnapshot() { final ResolvedSchema schema = ResolvedSchema.physical( new String[] {"f0", "f1"}, new DataType[] {DataTypes.STRING(), DataTypes.INT()}); @SuppressWarnings({"unchecked", "rawtypes"}) final DataStructureConverter<RowData, Row> rowConverter = (DataStructureConverter) DataStructureConverters.getConverter(schema.toPhysicalRowDataType()); try (TestMaterializedCollectStreamResult result = new TestMaterializedCollectStreamResult( CliClientTestUtils.createTestClient(schema), Integer.MAX_VALUE, createInternalBinaryRowDataConverter(schema.toPhysicalRowDataType()))) { result.isRetrieving = true; result.processRecord(Row.ofKind(RowKind.INSERT, "A", 1)); result.processRecord(Row.ofKind(RowKind.INSERT, "B", 1)); result.processRecord(Row.ofKind(RowKind.INSERT, "A", 1)); result.processRecord(Row.ofKind(RowKind.INSERT, "C", 2)); assertThat(result.snapshot(1)).isEqualTo(TypedResult.payload(4)); assertRowEquals( Collections.singletonList(Row.of("A", 1)), result.retrievePage(1), rowConverter); assertRowEquals( Collections.singletonList(Row.of("B", 1)), result.retrievePage(2), rowConverter); assertRowEquals( Collections.singletonList(Row.of("A", 1)), result.retrievePage(3), rowConverter); assertRowEquals( Collections.singletonList(Row.of("C", 2)), result.retrievePage(4), rowConverter); result.processRecord(Row.ofKind(RowKind.UPDATE_BEFORE, "A", 1)); assertThat(result.snapshot(1)).isEqualTo(TypedResult.payload(3)); assertRowEquals( Collections.singletonList(Row.of("A", 1)), result.retrievePage(1), rowConverter); assertRowEquals( Collections.singletonList(Row.of("B", 1)), result.retrievePage(2), rowConverter); assertRowEquals( Collections.singletonList(Row.of("C", 2)), result.retrievePage(3), rowConverter); result.processRecord(Row.ofKind(RowKind.UPDATE_BEFORE, "C", 2)); result.processRecord(Row.ofKind(RowKind.UPDATE_BEFORE, "A", 1)); result.processRecord(Row.ofKind(RowKind.UPDATE_AFTER, "D", 1)); assertThat(result.snapshot(1)).isEqualTo(TypedResult.payload(2)); assertRowEquals( Collections.singletonList(Row.of("B", 1)), result.retrievePage(1), rowConverter); assertRowEquals( Collections.singletonList(Row.of("D", 1)), result.retrievePage(2), rowConverter); } }
public VlanId ingressVlan() { if (!object.has(INGRESS_VLAN)) { return VlanId.NONE; } try { return VlanId.vlanId(object.path(INGRESS_VLAN).asText()); } catch (IllegalArgumentException e) { return null; } }
@Test public void ingressVlan() throws Exception { VlanId ingressVlan = config.ingressVlan(); assertNotNull("ingressVlan should not be null", ingressVlan); assertThat(ingressVlan, is(INGRESS_VLAN_1)); }
@Override public void marshal(Exchange exchange, Object graph, OutputStream stream) throws Exception { // Retrieve the message body as input stream InputStream is = exchange.getContext().getTypeConverter().mandatoryConvertTo(InputStream.class, graph); // and covert that to XML Document document = exchange.getContext().getTypeConverter().convertTo(Document.class, exchange, is); if (null != keyCipherAlgorithm && (keyCipherAlgorithm.equals(XMLCipher.RSA_v1dot5) || keyCipherAlgorithm.equals(XMLCipher.RSA_OAEP) || keyCipherAlgorithm.equals(XMLCipher.RSA_OAEP_11))) { encryptAsymmetric(exchange, document, stream); } else if (null != recipientKeyAlias) { encryptAsymmetric(exchange, document, stream); } else { encryptSymmetric(exchange, document, stream); } }
@Test public void testPartialPayloadXMLElementEncryptionWithKeyAndAlgorithm() throws Exception { final byte[] bits128 = { (byte) 0x08, (byte) 0x09, (byte) 0x0A, (byte) 0x0B, (byte) 0x0C, (byte) 0x0D, (byte) 0x0E, (byte) 0x0F, (byte) 0x10, (byte) 0x11, (byte) 0x12, (byte) 0x13, (byte) 0x14, (byte) 0x15, (byte) 0x16, (byte) 0x17 }; final String passCode = new String(bits128); context.addRoutes(new RouteBuilder() { public void configure() { from("direct:start") .marshal().xmlSecurity("//cheesesites/netherlands", false, passCode, XMLCipher.AES_128) .to("mock:encrypted"); } }); xmlsecTestHelper.testEncryption(context); }
public JSONObject set(String key, Object value) throws JSONException { return set(key, value, null, false); }
@Test public void toBeanTest6() { final JSONObject json = JSONUtil.createObj() .set("targetUrl", "http://test.com") .set("success", "true") .set("result", JSONUtil.createObj() .set("token", "tokenTest") .set("userId", "测试用户1")); final TokenAuthWarp2 bean = json.toBean(TokenAuthWarp2.class); assertEquals("http://test.com", bean.getTargetUrl()); assertEquals("true", bean.getSuccess()); final TokenAuthResponse result = bean.getResult(); assertNotNull(result); assertEquals("tokenTest", result.getToken()); assertEquals("测试用户1", result.getUserId()); }
public static <E> E checkNotInstanceOf(Class type, E object, String errorMessage) { isNotNull(type, "type"); if (type.isInstance(object)) { throw new IllegalArgumentException(errorMessage); } return object; }
@Test public void test_checkNotInstanceOf() { BigInteger value = checkNotInstanceOf(Integer.class, BigInteger.ONE, "argumentName"); assertEquals("Returned value should be equal to BigInteger.ONE", BigInteger.ONE, value); }
public synchronized void joinElection(byte[] data) throws HadoopIllegalArgumentException { if (data == null) { throw new HadoopIllegalArgumentException("data cannot be null"); } if (wantToBeInElection) { LOG.info("Already in election. Not re-connecting."); return; } appData = new byte[data.length]; System.arraycopy(data, 0, appData, 0, data.length); if (LOG.isDebugEnabled()) { LOG.debug("Attempting active election for " + this); } joinElectionInternal(); }
@Test(expected = HadoopIllegalArgumentException.class) public void testJoinElectionException() { elector.joinElection(null); }