focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Udf(description = "When transforming an array, " + "the function provided must have two arguments. " + "The two arguments for each function are in order: " + "the key and then the value. " + "The transformed array is returned." ) public <T, R> List<R> transformArray( @UdfParameter(description = "The array") final List<T> array, @UdfParameter(description = "The lambda function") final Function<T, R> function ) { if (array == null || function == null) { return null; } return array.stream().map(function).collect(Collectors.toList()); }
@Test public void shouldReturnTransformedArray() { assertThat(udf.transformArray(Collections.emptyList(), function1()), is(Collections.emptyList())); assertThat(udf.transformArray(Arrays.asList(-5, -2, 0), function1()), is(Arrays.asList(0, 3, 5))); assertThat(udf.transformArray(Collections.emptyList(), function2()), is(Collections.emptyList())); assertThat(udf.transformArray(Arrays.asList(-5, -2, 0), function2()), is(Arrays.asList("odd", "even", "even"))); assertThat(udf.transformArray(Collections.emptyList(), function3()), is(Collections.emptyList())); assertThat(udf.transformArray(Arrays.asList("steven", "leah"), function3()), is(Arrays.asList("hello steven", "hello leah"))); assertThat(udf.transformArray(Collections.emptyList(), function4()), is(Collections.emptyList())); assertThat(udf.transformArray(Arrays.asList(Arrays.asList(5, 4 ,3), Collections.emptyList()), function4()), is(Arrays.asList(3, 0))); }
public static void initStaticGetter(final CompilationDTO<? extends Model> compilationDTO, final ClassOrInterfaceDeclaration modelTemplate) { final MethodDeclaration staticGetterMethod = modelTemplate.getMethodsByName(GET_MODEL).get(0); final BlockStmt staticGetterBody = staticGetterMethod.getBody().orElseThrow(() -> new KiePMMLException(String.format(MISSING_BODY_TEMPLATE, staticGetterMethod))); final VariableDeclarator variableDeclarator = getVariableDeclarator(staticGetterBody, TO_RETURN).orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_IN_BODY, TO_RETURN, staticGetterBody))); final MethodCallExpr initializer = variableDeclarator.getInitializer() .orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_INITIALIZER_TEMPLATE, TO_RETURN, staticGetterBody))) .asMethodCallExpr(); final MethodCallExpr builder = getChainedMethodCallExprFrom("builder", initializer); final String fileName = compilationDTO.getFileName(); final String name = compilationDTO.getModelName(); final Expression miningFunctionExpression; if (compilationDTO.getMINING_FUNCTION() != null) { MINING_FUNCTION miningFunction = compilationDTO.getMINING_FUNCTION(); miningFunctionExpression = new NameExpr(miningFunction.getClass().getName() + "." + miningFunction.name()); } else { miningFunctionExpression = new NullLiteralExpr(); } builder.setArgument(0, new StringLiteralExpr(fileName)); builder.setArgument(1, new StringLiteralExpr(name)); builder.setArgument(2, miningFunctionExpression); String targetFieldName = compilationDTO.getTargetFieldName(); final Expression targetFieldExpression; if (targetFieldName != null) { targetFieldExpression = new StringLiteralExpr(targetFieldName); } else { targetFieldExpression = new NullLiteralExpr(); } getChainedMethodCallExprFrom("withTargetField", initializer).setArgument(0, targetFieldExpression); // populateGetCreatedMiningFieldsMethod(modelTemplate, compilationDTO.getKieMiningFields()); populateGetCreatedOutputFieldsMethod(modelTemplate, compilationDTO.getKieOutputFields()); populateGetCreatedKiePMMLMiningFieldsMethod(modelTemplate, compilationDTO.getMiningSchema().getMiningFields() , compilationDTO.getFields()); if (compilationDTO.getOutput() != null) { populateGetCreatedKiePMMLOutputFieldsMethod(modelTemplate, compilationDTO.getOutput().getOutputFields()); } if (compilationDTO.getKieTargetFields() != null) { populateGetCreatedKiePMMLTargetsMethod(modelTemplate, compilationDTO.getKieTargetFields()); } populateGetCreatedTransformationDictionaryMethod(modelTemplate, compilationDTO.getTransformationDictionary()); populateGetCreatedLocalTransformationsMethod(modelTemplate, compilationDTO.getLocalTransformations()); }
@Test void initStaticGetter() throws IOException { final CompilationDTO compilationDTO = CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME, pmmlModel, model, new PMMLCompilationContextMock(), SOURCE_BASE); org.kie.pmml.compiler.commons.codegenfactories.KiePMMLModelFactoryUtils.initStaticGetter(compilationDTO, classOrInterfaceDeclaration); String text = getFileContent(TEST_04_SOURCE); MethodDeclaration expected = JavaParserUtils.parseMethod(text); assertThat(staticGetterMethod.toString()).isEqualTo(expected.toString()); assertThat(JavaParserUtils.equalsNode(expected, staticGetterMethod)).isTrue(); }
public static Gson instance() { return SingletonHolder.INSTANCE; }
@Test void rejectsSerializationOfAESCipherProvider() { final AESCipherProvider acp = new AESCipherProvider(new TempSystemEnvironment()); try { final IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> Serialization.instance().toJson(acp)); assertEquals(format("Refusing to serialize a %s instance and leak security details!", AESCipherProvider.class.getName()), e.getMessage()); } finally { acp.removeCachedKey(); } }
private RemotingCommand listAcl(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { final RemotingCommand response = RemotingCommand.createResponseCommand(null); ListAclsRequestHeader requestHeader = request.decodeCommandCustomHeader(ListAclsRequestHeader.class); this.brokerController.getAuthorizationMetadataManager() .listAcl(requestHeader.getSubjectFilter(), requestHeader.getResourceFilter()) .thenAccept(acls -> { response.setCode(ResponseCode.SUCCESS); if (CollectionUtils.isNotEmpty(acls)) { List<AclInfo> aclInfos = AclConverter.convertAcls(acls); String body = JSON.toJSONString(aclInfos); response.setBody(body.getBytes(StandardCharsets.UTF_8)); } }) .exceptionally(ex -> { LOGGER.error("list acl error, subjectFilter:{}, resourceFilter:{}", requestHeader.getSubjectFilter(), requestHeader.getResourceFilter(), ex); return handleAuthException(response, ex); }) .join(); return response; }
@Test public void testListAcl() throws RemotingCommandException { Acl aclInfo = Acl.of(User.of("abc"), Arrays.asList(Resource.of("Topic:*")), Arrays.asList(Action.PUB), Environment.of("192.168.0.1"), Decision.ALLOW); when(authorizationMetadataManager.listAcl(any(), any())).thenReturn(CompletableFuture.completedFuture(Arrays.asList(aclInfo))); ListAclsRequestHeader listAclRequestHeader = new ListAclsRequestHeader(); listAclRequestHeader.setSubjectFilter("User:abc"); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.AUTH_LIST_ACL, listAclRequestHeader); request.setVersion(441); request.addExtField("AccessKey", "rocketmq"); request.makeCustomHeaderToNet(); RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); List<AclInfo> aclInfoData = JSON.parseArray(new String(response.getBody()), AclInfo.class); assertThat(aclInfoData.get(0).getSubject()).isEqualTo("User:abc"); assertThat(aclInfoData.get(0).getPolicies().get(0).getEntries().get(0).getResource()).isEqualTo("Topic:*"); assertThat(aclInfoData.get(0).getPolicies().get(0).getEntries().get(0).getActions()).containsAll(Arrays.asList(Action.PUB.getName())); assertThat(aclInfoData.get(0).getPolicies().get(0).getEntries().get(0).getSourceIps()).containsAll(Arrays.asList("192.168.0.1")); assertThat(aclInfoData.get(0).getPolicies().get(0).getEntries().get(0).getDecision()).isEqualTo("Allow"); }
public <T> T readValue(Class<T> type, InputStream entityStream) throws IOException { ObjectReader reader = DeserializerStringCache.init( Optional.ofNullable(objectReaderByClass.get(type)).map(Supplier::get).orElseGet(()->mapper.readerFor(type)) ); try { return reader.readValue(entityStream); } finally { DeserializerStringCache.clear(reader, CacheScope.GLOBAL_SCOPE); } }
@Test public void testApplicationXStreamEncodeJacksonDecode() throws Exception { Application original = APPLICATION_1; // Encode ByteArrayOutputStream captureStream = new ByteArrayOutputStream(); new EntityBodyConverter().write(original, captureStream, MediaType.APPLICATION_JSON_TYPE); byte[] encoded = captureStream.toByteArray(); // Decode InputStream source = new ByteArrayInputStream(encoded); Application decoded = codec.readValue(Application.class, source); assertTrue(EurekaEntityComparators.equal(decoded, original)); }
public static boolean equivalent( Expression left, Expression right, Types.StructType struct, boolean caseSensitive) { return Binder.bind(struct, Expressions.rewriteNot(left), caseSensitive) .isEquivalentTo(Binder.bind(struct, Expressions.rewriteNot(right), caseSensitive)); }
@Test public void testIdenticalExpressionIsEquivalent() { Expression[] exprs = new Expression[] { Expressions.isNull("data"), Expressions.notNull("data"), Expressions.isNaN("measurement"), Expressions.notNaN("measurement"), Expressions.lessThan("id", 5), Expressions.lessThanOrEqual("id", 5), Expressions.greaterThan("id", 5), Expressions.greaterThanOrEqual("id", 5), Expressions.equal("id", 5), Expressions.notEqual("id", 5), Expressions.in("id", 5, 6), Expressions.notIn("id", 5, 6), Expressions.startsWith("data", "aaa"), Expressions.notStartsWith("data", "aaa"), Expressions.alwaysTrue(), Expressions.alwaysFalse(), Expressions.and(Expressions.lessThan("id", 5), Expressions.notNull("data")), Expressions.or(Expressions.lessThan("id", 5), Expressions.notNull("data")), }; for (Expression expr : exprs) { assertThat(ExpressionUtil.equivalent(expr, expr, STRUCT, true)) .as("Should accept identical expression: " + expr) .isTrue(); for (Expression other : exprs) { if (expr != other) { assertThat(ExpressionUtil.equivalent(expr, other, STRUCT, true)).isFalse(); } } } }
@Udf(description = "Returns the sign of an INT value, denoted by 1, 0 or -1.") public Integer sign( @UdfParameter( value = "value", description = "The value to get the sign of." ) final Integer value ) { return value == null ? null : Integer.signum(value); }
@Test public void shouldHandlePositive() { assertThat(udf.sign(1), is(1)); assertThat(udf.sign(1L), is(1)); assertThat(udf.sign(1.5), is(1)); }
public static String jsonFromMap(Map<String, Object> jsonData) { try { JsonDocument json = new JsonDocument(); json.startGroup(); for (String key : jsonData.keySet()) { Object data = jsonData.get(key); if (data instanceof Map) { /* it's a nested map, so we'll recursively add the JSON of this map to the current JSON */ json.addValue(key, jsonFromMap((Map<String, Object>) data)); } else if (data instanceof Object[]) { /* it's an object array, so we'll iterate the elements and put them all in here */ json.addValue(key, "[" + stringArrayFromObjectArray((Object[]) data) + "]"); } else if (data instanceof Collection) { /* it's a collection, so we'll iterate the elements and put them all in here */ json.addValue(key, "[" + stringArrayFromObjectArray(((Collection) data).toArray()) + "]"); } else if (data instanceof int[]) { /* it's an int array, so we'll get the string representation */ String intArray = Arrays.toString((int[]) data); /* remove whitespace */ intArray = intArray.replaceAll(" ", ""); json.addValue(key, intArray); } else if (data instanceof JsonCapableObject) { json.addValue(key, jsonFromMap(((JsonCapableObject) data).jsonMap())); } else { /* all other objects we assume we are to just put the string value in */ json.addValue(key, String.valueOf(data)); } } json.endGroup(); logger.debug("created json from map => {}", json); return json.toString(); } catch (Exception e) { logger.error("Could not create JSON from Map. ", e); return "{}"; } }
@Test void testMapAndList() { Map<String, Object> jsonData = new LinkedHashMap<String, Object>(); jsonData.put("myKey", "myValue"); int[] numbers = {1, 2, 3, 4}; jsonData.put("myNumbers", numbers); Map<String, Object> jsonData2 = new LinkedHashMap<String, Object>(); jsonData2.put("myNestedKey", "myNestedValue"); jsonData2.put("myNestedKey2", "myNestedValue2"); String[] values = {"one", "two", "three", "four"}; jsonData2.put("myStringNumbers", values); jsonData.put("myNestedData", jsonData2); String json = JsonUtility.jsonFromMap(jsonData); String expected = "{\"myKey\":\"myValue\",\"myNumbers\":[1,2,3,4],\"myNestedData\":{\"myNestedKey\":\"myNestedValue\",\"myNestedKey2\":\"myNestedValue2\",\"myStringNumbers\":[\"one\",\"two\",\"three\",\"four\"]}}"; assertEquals(expected, json); }
@Override public int write(ByteBuffer sourceBuffer) throws IOException { if (!isOpen()) { throw new ClosedChannelException(); } int totalBytesWritten = 0; while (sourceBuffer.hasRemaining()) { int position = sourceBuffer.position(); int bytesWritten = Math.min(sourceBuffer.remaining(), uploadBuffer.remaining()); totalBytesWritten += bytesWritten; if (sourceBuffer.hasArray()) { // If the underlying array is accessible, direct access is the most efficient approach. int start = sourceBuffer.arrayOffset() + position; uploadBuffer.put(sourceBuffer.array(), start, bytesWritten); md5.update(sourceBuffer.array(), start, bytesWritten); } else { // Otherwise, use a readonly copy with an appropriate mark to read the current range of the // buffer twice. ByteBuffer copyBuffer = sourceBuffer.asReadOnlyBuffer(); copyBuffer.mark().limit(position + bytesWritten); uploadBuffer.put(copyBuffer); copyBuffer.reset(); md5.update(copyBuffer); } sourceBuffer.position(position + bytesWritten); // move position forward by the bytes written if (!uploadBuffer.hasRemaining() || sourceBuffer.hasRemaining()) { flush(); } } return totalBytesWritten; }
@Test public void write() throws IOException { writeFromConfig(s3Config("s3"), false); writeFromConfig(s3Config("s3"), true); writeFromConfig(s3ConfigWithSSEAlgorithm("s3"), false); writeFromConfig(s3ConfigWithSSECustomerKey("s3"), false); writeFromConfig(s3ConfigWithSSEAwsKeyManagementParams("s3"), false); expected.expect(IllegalArgumentException.class); writeFromConfig(s3ConfigWithMultipleSSEOptions("s3"), false); }
public static List<AclEntry> replaceAclEntries(List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException { ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec); ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); // Replacement is done separately for each scope: access and default. EnumMap<AclEntryScope, AclEntry> providedMask = Maps.newEnumMap(AclEntryScope.class); EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class); EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class); for (AclEntry aclSpecEntry: aclSpec) { scopeDirty.add(aclSpecEntry.getScope()); if (aclSpecEntry.getType() == MASK) { providedMask.put(aclSpecEntry.getScope(), aclSpecEntry); maskDirty.add(aclSpecEntry.getScope()); } else { aclBuilder.add(aclSpecEntry); } } // Copy existing entries if the scope was not replaced. for (AclEntry existingEntry: existingAcl) { if (!scopeDirty.contains(existingEntry.getScope())) { if (existingEntry.getType() == MASK) { providedMask.put(existingEntry.getScope(), existingEntry); } else { aclBuilder.add(existingEntry); } } } copyDefaultsIfNeeded(aclBuilder); calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); return buildAndValidateAcl(aclBuilder); }
@Test(expected=AclException.class) public void testReplaceAclEntriesMissingOther() throws AclException { List<AclEntry> existing = new ImmutableList.Builder<AclEntry>() .add(aclEntry(ACCESS, USER, ALL)) .add(aclEntry(ACCESS, GROUP, READ)) .add(aclEntry(ACCESS, OTHER, NONE)) .build(); List<AclEntry> aclSpec = Lists.newArrayList( aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "bruce", READ_WRITE), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "sales", ALL), aclEntry(ACCESS, MASK, ALL)); replaceAclEntries(existing, aclSpec); }
public static boolean isPeriodValid(String timeStr) { try { PERIOD_FORMATTER.parsePeriod(timeStr); return true; } catch (Exception e) { return false; } }
@Test public void testIsPeriodValid() { Assert.assertTrue(TimeUtils.isPeriodValid("2d")); Assert.assertTrue(TimeUtils.isPeriodValid("")); Assert.assertTrue(TimeUtils.isPeriodValid("1m")); Assert.assertTrue(TimeUtils.isPeriodValid("2h")); Assert.assertTrue(TimeUtils.isPeriodValid("-2h")); Assert.assertTrue(TimeUtils.isPeriodValid("-2m")); Assert.assertTrue(TimeUtils.isPeriodValid("-4d")); Assert.assertFalse(TimeUtils.isPeriodValid(null)); }
@ApiOperation(value = "Create Or Update Rule Chain (saveRuleChain)", notes = "Create or update the Rule Chain. When creating Rule Chain, platform generates Rule Chain Id as " + UUID_WIKI_LINK + "The newly created Rule Chain Id will be present in the response. " + "Specify existing Rule Chain id to update the rule chain. " + "Referencing non-existing rule chain Id will cause 'Not Found' error." + "\n\n" + RULE_CHAIN_DESCRIPTION + "Remove 'id', 'tenantId' from the request body example (below) to create new Rule Chain entity." + TENANT_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAnyAuthority('TENANT_ADMIN')") @RequestMapping(value = "/ruleChain", method = RequestMethod.POST) @ResponseBody public RuleChain saveRuleChain( @Parameter(description = "A JSON value representing the rule chain.") @RequestBody RuleChain ruleChain) throws Exception { ruleChain.setTenantId(getCurrentUser().getTenantId()); checkEntity(ruleChain.getId(), ruleChain, Resource.RULE_CHAIN); return tbRuleChainService.save(ruleChain, getCurrentUser()); }
@Test public void testSaveRuleChain() throws Exception { RuleChain ruleChain = new RuleChain(); ruleChain.setName("RuleChain"); Mockito.reset(tbClusterService, auditLogService); RuleChain savedRuleChain = doPost("/api/ruleChain", ruleChain, RuleChain.class); Assert.assertNotNull(savedRuleChain); Assert.assertNotNull(savedRuleChain.getId()); Assert.assertTrue(savedRuleChain.getCreatedTime() > 0); Assert.assertEquals(ruleChain.getName(), savedRuleChain.getName()); testNotifyEntityOneTimeMsgToEdgeServiceNever(savedRuleChain, savedRuleChain.getId(), savedRuleChain.getId(), savedTenant.getId(), tenantAdmin.getCustomerId(), tenantAdmin.getId(), tenantAdmin.getEmail(), ActionType.ADDED); savedRuleChain.setName("New RuleChain"); savedRuleChain = doPost("/api/ruleChain", savedRuleChain, RuleChain.class); RuleChain foundRuleChain = doGet("/api/ruleChain/" + savedRuleChain.getId().getId().toString(), RuleChain.class); Assert.assertEquals(savedRuleChain.getName(), foundRuleChain.getName()); testNotifyEntityOneTimeMsgToEdgeServiceNever(savedRuleChain, savedRuleChain.getId(), savedRuleChain.getId(), savedTenant.getId(), tenantAdmin.getCustomerId(), tenantAdmin.getId(), tenantAdmin.getEmail(), ActionType.UPDATED); }
public ListNode2<T> enqueue(T value) { ListNode2<T> node = new ListNode2<T>(value); if (size++ == 0) { head = node; } else { node.next = tail; tail.prev = node; } tail = node; return node; }
@Test public void testEnqueue() { DoublyLinkedList<Integer> list = new DoublyLinkedList<Integer>(); list.enqueue(1); assertFalse(list.isEmpty()); assertEquals(1, list.size()); assertArrayEquals(new Integer[]{1}, list.toArray()); list.enqueue(2); assertFalse(list.isEmpty()); assertEquals(2, list.size()); assertArrayEquals(new Integer[]{2, 1}, list.toArray()); list.enqueue(3); assertFalse(list.isEmpty()); assertEquals(3, list.size()); assertArrayEquals(new Integer[]{3, 2, 1}, list.toArray()); assertEquals(new Integer(3), list.first()); assertEquals(new Integer(1), list.last()); }
@Nullable @Override public Message decode(@Nonnull RawMessage rawMessage) { String s = new String(rawMessage.getPayload(), StandardCharsets.UTF_8); LOG.trace("Received raw message: {}", s); String timezoneID = configuration.getString(CK_TIMEZONE); // previously existing PA inputs after updating will not have a Time Zone configured, default to UTC DateTimeZone timezone = timezoneID != null ? DateTimeZone.forID(timezoneID) : DateTimeZone.UTC; LOG.trace("Configured time zone: {}", timezone); PaloAltoMessageBase p = parser.parse(s, timezone); // Return when error occurs parsing syslog header. if (p == null) { return null; } Message message = messageFactory.createMessage(p.payload(), p.source(), p.timestamp()); switch (p.panType()) { case "THREAT": final PaloAltoTypeParser parserThreat = new PaloAltoTypeParser(templates.getThreatMessageTemplate()); message.addFields(parserThreat.parseFields(p.fields(), timezone)); break; case "SYSTEM": final PaloAltoTypeParser parserSystem = new PaloAltoTypeParser(templates.getSystemMessageTemplate()); message.addFields(parserSystem.parseFields(p.fields(), timezone)); break; case "TRAFFIC": final PaloAltoTypeParser parserTraffic = new PaloAltoTypeParser(templates.getTrafficMessageTemplate()); message.addFields(parserTraffic.parseFields(p.fields(), timezone)); break; default: LOG.error("Unsupported PAN type [{}]. Not adding any parsed fields.", p.panType()); } LOG.trace("Successfully processed [{}] message with [{}] fields.", p.panType(), message.getFieldCount()); return message; }
@Test public void syslogValuesTest() { // Test System message results PaloAltoCodec codec = new PaloAltoCodec(Configuration.EMPTY_CONFIGURATION, messageFactory); Message message = codec.decode(new RawMessage(SYSLOG_THREAT_MESSAGE_NO_HOST_DOUBLE_SPACE_DATE.getBytes(StandardCharsets.UTF_8))); assertEquals("THREAT", message.getField("type")); }
public DoubleValue increment(double increment) { this.value += increment; this.set = true; return this; }
@Test public void multiples_calls_to_increment_double_increment_the_value() { DoubleValue variationValue = new DoubleValue() .increment(10.6) .increment(95.4); verifySetVariationValue(variationValue, 106); }
public MetadataReportBuilder timeout(Integer timeout) { this.timeout = timeout; return getThis(); }
@Test void timeout() { MetadataReportBuilder builder = new MetadataReportBuilder(); builder.timeout(1000); Assertions.assertEquals(1000, builder.build().getTimeout()); }
public static Builder builder() { return new Builder(); }
@Test public void testRoundTripSerDe() throws JsonProcessingException { // Full request String fullJson = "{\"removals\":[\"foo\",\"bar\"],\"updates\":{\"owner\":\"Hank\"}}"; assertRoundTripSerializesEquallyFrom( fullJson, UpdateNamespacePropertiesRequest.builder().updateAll(UPDATES).removeAll(REMOVALS).build()); // Only updates String emptyRemoval = "{\"removals\":[],\"updates\":{\"owner\":\"Hank\"}}"; assertRoundTripSerializesEquallyFrom( emptyRemoval, UpdateNamespacePropertiesRequest.builder() .updateAll(UPDATES) .removeAll(EMPTY_REMOVALS) .build()); assertRoundTripSerializesEquallyFrom( emptyRemoval, UpdateNamespacePropertiesRequest.builder().update("owner", "Hank").build()); // Only removals String emptyUpdates = "{\"removals\":[\"foo\",\"bar\"],\"updates\":{}}"; assertRoundTripSerializesEquallyFrom( emptyUpdates, UpdateNamespacePropertiesRequest.builder() .removeAll(REMOVALS) .updateAll(EMPTY_UPDATES) .build()); assertRoundTripSerializesEquallyFrom( emptyUpdates, UpdateNamespacePropertiesRequest.builder().remove("foo").remove("bar").build()); // All empty String jsonAllFieldsEmpty = "{\"removals\":[],\"updates\":{}}"; assertRoundTripSerializesEquallyFrom( jsonAllFieldsEmpty, UpdateNamespacePropertiesRequest.builder().build()); }
@Override public void onExit(Context context, ResourceWrapper rw, int acquireCount, Object... args) { Entry curEntry = context.getCurEntry(); if (curEntry == null) { return; } for (MetricExtension m : MetricExtensionProvider.getMetricExtensions()) { if (curEntry.getBlockError() != null) { continue; } String resource = rw.getName(); Throwable ex = curEntry.getError(); long completeTime = curEntry.getCompleteTimestamp(); if (completeTime <= 0) { completeTime = TimeUtil.currentTimeMillis(); } long rt = completeTime - curEntry.getCreateTimestamp(); if (m instanceof AdvancedMetricExtension) { // Since 1.8.0 (as a temporary workaround for compatibility) ((AdvancedMetricExtension) m).onComplete(rw, rt, acquireCount, args); if (ex != null) { ((AdvancedMetricExtension) m).onError(rw, ex, acquireCount, args); } } else { m.addRt(resource, rt, args); m.addSuccess(resource, acquireCount, args); m.decreaseThreadNum(resource, args); if (null != ex) { m.addException(resource, acquireCount, ex); } } } }
@Test public void advancedExtensionOnExit() { try (MockedStatic<TimeUtil> mocked = super.mockTimeUtil()) { FakeAdvancedMetricExtension extension = new FakeAdvancedMetricExtension(); MetricExtensionProvider.addMetricExtension(extension); MetricExitCallback exitCallback = new MetricExitCallback(); StringResourceWrapper resourceWrapper = new StringResourceWrapper("resource", EntryType.OUT); int count = 2; Object[] args = {"args1", "args2"}; long prevRt = 20; extension.rt = prevRt; extension.complete = 6; extension.concurrency = 10; Context context = mock(Context.class); Entry entry = mock(Entry.class); // Mock current time long curMillis = System.currentTimeMillis(); setCurrentMillis(mocked, curMillis); int deltaMs = 100; when(entry.getError()).thenReturn(null); when(entry.getCreateTimestamp()).thenReturn(curMillis - deltaMs); when(context.getCurEntry()).thenReturn(entry); exitCallback.onExit(context, resourceWrapper, count, args); Assert.assertEquals(prevRt + deltaMs, extension.rt); Assert.assertEquals(extension.complete, 6 + count); Assert.assertEquals(extension.concurrency, 10 - 1); } }
@Override public void exportData(JsonWriter writer) throws IOException { // version tag at the root writer.name(THIS_VERSION); writer.beginObject(); // clients list writer.name(CLIENTS); writer.beginArray(); writeClients(writer); writer.endArray(); writer.name(GRANTS); writer.beginArray(); writeGrants(writer); writer.endArray(); writer.name(WHITELISTEDSITES); writer.beginArray(); writeWhitelistedSites(writer); writer.endArray(); writer.name(BLACKLISTEDSITES); writer.beginArray(); writeBlacklistedSites(writer); writer.endArray(); writer.name(AUTHENTICATIONHOLDERS); writer.beginArray(); writeAuthenticationHolders(writer); writer.endArray(); writer.name(ACCESSTOKENS); writer.beginArray(); writeAccessTokens(writer); writer.endArray(); writer.name(REFRESHTOKENS); writer.beginArray(); writeRefreshTokens(writer); writer.endArray(); writer.name(SYSTEMSCOPES); writer.beginArray(); writeSystemScopes(writer); writer.endArray(); for (MITREidDataServiceExtension extension : extensions) { if (extension.supportsVersion(THIS_VERSION)) { extension.exportExtensionData(writer); break; } } writer.endObject(); // end mitreid-connect-1.3 }
@Test public void testExportClients() throws IOException { ClientDetailsEntity client1 = new ClientDetailsEntity(); client1.setId(1L); client1.setAccessTokenValiditySeconds(3600); client1.setClientId("client1"); client1.setClientSecret("clientsecret1"); client1.setRedirectUris(ImmutableSet.of("http://foo.com/")); client1.setScope(ImmutableSet.of("foo", "bar", "baz", "dolphin")); client1.setGrantTypes(ImmutableSet.of("implicit", "authorization_code", "urn:ietf:params:oauth:grant_type:redelegate", "refresh_token")); client1.setAllowIntrospection(true); ClientDetailsEntity client2 = new ClientDetailsEntity(); client2.setId(2L); client2.setAccessTokenValiditySeconds(3600); client2.setClientId("client2"); client2.setClientSecret("clientsecret2"); client2.setRedirectUris(ImmutableSet.of("http://bar.baz.com/")); client2.setScope(ImmutableSet.of("foo", "dolphin", "electric-wombat")); client2.setGrantTypes(ImmutableSet.of("client_credentials", "urn:ietf:params:oauth:grant_type:redelegate")); client2.setAllowIntrospection(false); client2.setCodeChallengeMethod(PKCEAlgorithm.S256); Set<ClientDetailsEntity> allClients = ImmutableSet.of(client1, client2); Mockito.when(clientRepository.getAllClients()).thenReturn(allClients); Mockito.when(approvedSiteRepository.getAll()).thenReturn(new HashSet<ApprovedSite>()); Mockito.when(wlSiteRepository.getAll()).thenReturn(new HashSet<WhitelistedSite>()); Mockito.when(blSiteRepository.getAll()).thenReturn(new HashSet<BlacklistedSite>()); Mockito.when(authHolderRepository.getAll()).thenReturn(new ArrayList<AuthenticationHolderEntity>()); Mockito.when(tokenRepository.getAllAccessTokens()).thenReturn(new HashSet<OAuth2AccessTokenEntity>()); Mockito.when(tokenRepository.getAllRefreshTokens()).thenReturn(new HashSet<OAuth2RefreshTokenEntity>()); Mockito.when(sysScopeRepository.getAll()).thenReturn(new HashSet<SystemScope>()); // do the data export StringWriter stringWriter = new StringWriter(); JsonWriter writer = new JsonWriter(stringWriter); writer.beginObject(); dataService.exportData(writer); writer.endObject(); writer.close(); // parse the output as a JSON object for testing JsonElement elem = new JsonParser().parse(stringWriter.toString()); JsonObject root = elem.getAsJsonObject(); // make sure the root is there assertThat(root.has(MITREidDataService.MITREID_CONNECT_1_3), is(true)); JsonObject config = root.get(MITREidDataService.MITREID_CONNECT_1_3).getAsJsonObject(); // make sure all the root elements are there assertThat(config.has(MITREidDataService.CLIENTS), is(true)); assertThat(config.has(MITREidDataService.GRANTS), is(true)); assertThat(config.has(MITREidDataService.WHITELISTEDSITES), is(true)); assertThat(config.has(MITREidDataService.BLACKLISTEDSITES), is(true)); assertThat(config.has(MITREidDataService.REFRESHTOKENS), is(true)); assertThat(config.has(MITREidDataService.ACCESSTOKENS), is(true)); assertThat(config.has(MITREidDataService.SYSTEMSCOPES), is(true)); assertThat(config.has(MITREidDataService.AUTHENTICATIONHOLDERS), is(true)); // make sure the root elements are all arrays assertThat(config.get(MITREidDataService.CLIENTS).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.GRANTS).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.WHITELISTEDSITES).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.BLACKLISTEDSITES).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.REFRESHTOKENS).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.ACCESSTOKENS).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.SYSTEMSCOPES).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.AUTHENTICATIONHOLDERS).isJsonArray(), is(true)); // check our client list (this test) JsonArray clients = config.get(MITREidDataService.CLIENTS).getAsJsonArray(); assertThat(clients.size(), is(2)); // check for both of our clients in turn Set<ClientDetailsEntity> checked = new HashSet<>(); for (JsonElement e : clients) { assertThat(e.isJsonObject(), is(true)); JsonObject client = e.getAsJsonObject(); ClientDetailsEntity compare = null; if (client.get("clientId").getAsString().equals(client1.getClientId())) { compare = client1; } else if (client.get("clientId").getAsString().equals(client2.getClientId())) { compare = client2; } if (compare == null) { fail("Could not find matching clientId: " + client.get("clientId").getAsString()); } else { assertThat(client.get("clientId").getAsString(), equalTo(compare.getClientId())); assertThat(client.get("secret").getAsString(), equalTo(compare.getClientSecret())); assertThat(client.get("accessTokenValiditySeconds").getAsInt(), equalTo(compare.getAccessTokenValiditySeconds())); assertThat(client.get("allowIntrospection").getAsBoolean(), equalTo(compare.isAllowIntrospection())); assertThat(jsonArrayToStringSet(client.get("redirectUris").getAsJsonArray()), equalTo(compare.getRedirectUris())); assertThat(jsonArrayToStringSet(client.get("scope").getAsJsonArray()), equalTo(compare.getScope())); assertThat(jsonArrayToStringSet(client.get("grantTypes").getAsJsonArray()), equalTo(compare.getGrantTypes())); assertThat((client.has("codeChallengeMethod") && !client.get("codeChallengeMethod").isJsonNull()) ? PKCEAlgorithm.parse(client.get("codeChallengeMethod").getAsString()) : null, equalTo(compare.getCodeChallengeMethod())); checked.add(compare); } } // make sure all of our clients were found assertThat(checked.containsAll(allClients), is(true)); }
public void generate() throws IOException { packageNameByTypes.clear(); generatePackageInfo(); generateTypeStubs(); generateMessageHeaderStub(); for (final List<Token> tokens : ir.messages()) { final Token msgToken = tokens.get(0); final List<Token> messageBody = getMessageBody(tokens); final boolean hasVarData = -1 != findSignal(messageBody, Signal.BEGIN_VAR_DATA); int i = 0; final List<Token> fields = new ArrayList<>(); i = collectFields(messageBody, i, fields); final List<Token> groups = new ArrayList<>(); i = collectGroups(messageBody, i, groups); final List<Token> varData = new ArrayList<>(); collectVarData(messageBody, i, varData); final String decoderClassName = formatClassName(decoderName(msgToken.name())); final String decoderStateClassName = decoderClassName + "#CodecStates"; final FieldPrecedenceModel decoderPrecedenceModel = precedenceChecks.createDecoderModel( decoderStateClassName, tokens); generateDecoder(decoderClassName, msgToken, fields, groups, varData, hasVarData, decoderPrecedenceModel); final String encoderClassName = formatClassName(encoderName(msgToken.name())); final String encoderStateClassName = encoderClassName + "#CodecStates"; final FieldPrecedenceModel encoderPrecedenceModel = precedenceChecks.createEncoderModel( encoderStateClassName, tokens); generateEncoder(encoderClassName, msgToken, fields, groups, varData, hasVarData, encoderPrecedenceModel); } }
@Test void shouldGenerateBitSetCodecs() throws Exception { final UnsafeBuffer buffer = new UnsafeBuffer(new byte[4096]); generator().generate(); final Object encoder = wrap(buffer, compileCarEncoder().getConstructor().newInstance()); final Object decoder = getCarDecoder(buffer, encoder); final Object extrasEncoder = getExtras(encoder); final Object extrasDecoder = getExtras(decoder); assertFalse(getCruiseControl(extrasDecoder)); setCruiseControl(extrasEncoder, true); assertTrue(getCruiseControl(extrasDecoder)); }
@Override public UfsStatus[] listStatus(String path) throws IOException { return listInternal(path, ListOptions.defaults()); }
@Test public void testListObjectStorageDescendantTypeNone() throws Throwable { mObjectUFS = new MockObjectUnderFileSystem(new AlluxioURI("/"), UnderFileSystemConfiguration.defaults(CONF)) { final UfsStatus mF1Status = new UfsFileStatus("f1", "", 0L, 0L, "", "", (short) 0777, 0L); final UfsStatus mF2Status = new UfsFileStatus("f2", "", 1L, 0L, "", "", (short) 0777, 0L); @Override public UfsStatus getStatus(String path) throws IOException { if (path.equals("root/f1")) { return mF1Status; } else if (path.equals("root/f2")) { return mF2Status; } throw new FileNotFoundException(); } @Override public UfsStatus[] listStatus(String path) throws IOException { if (path.equals("root") || path.equals("root/")) { return new UfsStatus[] {mF1Status, mF2Status}; } return new UfsStatus[0]; } @Override public UfsStatus[] listStatus(String path, ListOptions options) throws IOException { return listStatus(path); } @Override protected ObjectPermissions getPermissions() { return new ObjectPermissions("foo", "bar", (short) 0777); } }; UfsLoadResult result = UnderFileSystemTestUtil.performListingAsyncAndGetResult( mObjectUFS, "root", DescendantType.NONE); assertEquals(1, result.getItemsCount()); UfsStatus status = result.getItems().collect(Collectors.toList()).get(0); assertEquals("root", status.getName()); }
@Override protected int poll() throws Exception { // must reset for each poll shutdownRunningTask = null; pendingExchanges = 0; List<software.amazon.awssdk.services.sqs.model.Message> messages = pollingTask.call(); // okay we have some response from aws so lets mark the consumer as ready forceConsumerAsReady(); Queue<Exchange> exchanges = createExchanges(messages); return processBatch(CastUtils.cast(exchanges)); }
@Test void shouldRequest10MessagesWithSingleReceiveRequest() throws Exception { // given var expectedMessages = IntStream.range(0, 10).mapToObj(Integer::toString).toList(); expectedMessages.stream().map(this::message).forEach(sqsClientMock::addMessage); try (var tested = createConsumer(10)) { // when var polledMessagesCount = tested.poll(); // then assertThat(polledMessagesCount).isEqualTo(10); assertThat(receiveMessageBodies()).isEqualTo(expectedMessages); assertThat(sqsClientMock.getReceiveRequests()).containsExactlyInAnyOrder(expectedReceiveRequest(10)); assertThat(sqsClientMock.getQueues()).isEmpty(); } }
static Header[] extractHeaders(AirborneEvent record) { //Kafka Header -VALUES- String ifrVfrStatus = record.pairedIfrVfrStatus().toString(); String scoreAsString = Double.toString(record.score()); String epochTimeMs = Long.toString(record.time().toEpochMilli()); Headers headers = new RecordHeaders() .add("ifrVfrStatus", ifrVfrStatus.getBytes()) //IFR-IFR, IFR-VFR, or VFR-VFR .add("date", record.eventDate().getBytes()) //e.g. 2020-03-27, YYYY-MM-DD .add("time", record.eventTimeOfDay().getBytes()) //e.g. HH:mm:ss.SSS .add("callsign_0", record.callsign(0).getBytes()) .add("callsign_1", record.callsign(1).getBytes()) .add("eventScore", scoreAsString.getBytes()) //notice, this double is actually stored as a byte[] that encodes a String .add("epochTimeMs", epochTimeMs.getBytes()) .add("conflictAngle", record.conflictAngle().toString().getBytes()) //e.g. CROSSING, SAME, or OPPOSITE .add("schemaVer", record.schemaVersion().getBytes()); return headers.toArray(); }
@Test public void kafkaHeaderContainCorrectSchemaNumber() throws Exception { AirborneEvent event = AirborneEvent.parse( new FileReader(getResourceFile("scaryTrackOutput.json")) ); Header[] headers = extractHeaders(event); Predicate<Header> isSchemaVerHeader = header -> header.key().equals("schemaVer"); boolean thereIsASchemaVersionHeader = Stream.of(headers).anyMatch(isSchemaVerHeader); assertThat(thereIsASchemaVersionHeader, is(true)); byte[] schemaBytes = Stream.of(headers).filter(isSchemaVerHeader) .findFirst() .get() .value(); //Eventually, this schema number will change, when that happens we'll update this test assertThat(schemaBytes, is("3".getBytes())); }
@Override public WindowStore<K, V> build() { if (storeSupplier.retainDuplicates() && enableCaching) { log.warn("Disabling caching for {} since store was configured to retain duplicates", storeSupplier.name()); enableCaching = false; } return new MeteredWindowStore<>( maybeWrapCaching(maybeWrapLogging(storeSupplier.get())), storeSupplier.windowSize(), storeSupplier.metricsScope(), time, keySerde, valueSerde); }
@SuppressWarnings("unchecked") @Test public void shouldDisableCachingWithRetainDuplicates() { supplier = Stores.persistentWindowStore("name", Duration.ofMillis(10L), Duration.ofMillis(10L), true); final StoreBuilder<WindowStore<String, String>> builder = new WindowStoreBuilder<>( supplier, Serdes.String(), Serdes.String(), new MockTime() ).withCachingEnabled(); builder.build(); assertFalse(((AbstractStoreBuilder<String, String, WindowStore<String, String>>) builder).enableCaching); }
@Nullable static String getPropertyIfString(Message message, String name) { try { Object o = message.getObjectProperty(name); if (o instanceof String) return o.toString(); return null; } catch (Throwable t) { propagateIfFatal(t); log(t, "error getting property {0} from message {1}", name, message); return null; } }
@Test void getPropertyIfString() throws Exception { message.setStringProperty("b3", "1"); assertThat(MessageProperties.getPropertyIfString(message, "b3")) .isEqualTo("1"); }
@Override public void updateRemove(Object key, Object removedValue) { int keyHash = key.hashCode(); int removedValueHash = removedValue.hashCode(); int leafOrder = MerkleTreeUtil.getLeafOrderForHash(keyHash, leafLevel); int leafCurrentHash = getNodeHash(leafOrder); int leafNewHash = MerkleTreeUtil.removeHash(leafCurrentHash, removedValueHash); setNodeHash(leafOrder, leafNewHash); updateBranch(leafOrder); }
@Test public void testUpdateRemove() { MerkleTree merkleTree = new ArrayMerkleTree(3); merkleTree.updateAdd(1, 1); merkleTree.updateAdd(2, 2); merkleTree.updateAdd(3, 3); merkleTree.updateRemove(2, 2); int expectedHash = 0; expectedHash = MerkleTreeUtil.addHash(expectedHash, 1); expectedHash = MerkleTreeUtil.addHash(expectedHash, 3); int nodeHash = merkleTree.getNodeHash(5); assertEquals(expectedHash, nodeHash); }
public static <K, V> Write<K, V> write() { return new AutoValue_CdapIO_Write.Builder<K, V>().build(); }
@Test public void testWriteObjectCreationFailsIfLockDirIsNull() { assertThrows( IllegalArgumentException.class, () -> CdapIO.<String, String>write().withLocksDirPath(null)); }
public static String format(String source, Object... parameters) { String current = source; for (Object parameter : parameters) { if (!current.contains("{}")) { return current; } current = current.replaceFirst("\\{\\}", String.valueOf(parameter)); } return current; }
@Test public void testFormat1() { String fmt = "Some string {} 2 3"; assertEquals("Some string 1 2 3", format(fmt, 1)); }
@Override public Mono<GetCurrencyConversionsResponse> getCurrencyConversions(final GetCurrencyConversionsRequest request) { AuthenticationUtil.requireAuthenticatedDevice(); final CurrencyConversionEntityList currencyConversionEntityList = currencyManager .getCurrencyConversions() .orElseThrow(Status.UNAVAILABLE::asRuntimeException); final List<GetCurrencyConversionsResponse.CurrencyConversionEntity> currencyConversionEntities = currencyConversionEntityList .getCurrencies() .stream() .map(cce -> GetCurrencyConversionsResponse.CurrencyConversionEntity.newBuilder() .setBase(cce.getBase()) .putAllConversions(transformBigDecimalsToStrings(cce.getConversions())) .build()) .toList(); return Mono.just(GetCurrencyConversionsResponse.newBuilder() .addAllCurrencies(currencyConversionEntities).setTimestamp(currencyConversionEntityList.getTimestamp()) .build()); }
@Test void testGetCurrencyConversions() { final long timestamp = System.currentTimeMillis(); when(currencyManager.getCurrencyConversions()).thenReturn(Optional.of( new CurrencyConversionEntityList(List.of( new CurrencyConversionEntity("FOO", Map.of( "USD", new BigDecimal("2.35"), "EUR", new BigDecimal("1.89") )), new CurrencyConversionEntity("BAR", Map.of( "USD", new BigDecimal("1.50"), "EUR", new BigDecimal("0.98") )) ), timestamp))); final GetCurrencyConversionsResponse currencyConversions = authenticatedServiceStub().getCurrencyConversions( GetCurrencyConversionsRequest.newBuilder().build()); assertEquals(timestamp, currencyConversions.getTimestamp()); assertEquals(2, currencyConversions.getCurrenciesCount()); assertEquals("FOO", currencyConversions.getCurrencies(0).getBase()); assertEquals("2.35", currencyConversions.getCurrencies(0).getConversionsMap().get("USD")); }
public static Catalog loadIcebergCatalog(SparkSession spark, String catalogName) { CatalogPlugin catalogPlugin = spark.sessionState().catalogManager().catalog(catalogName); Preconditions.checkArgument( catalogPlugin instanceof HasIcebergCatalog, String.format( "Cannot load Iceberg catalog from catalog %s because it does not contain an Iceberg Catalog. " + "Actual Class: %s", catalogName, catalogPlugin.getClass().getName())); return ((HasIcebergCatalog) catalogPlugin).icebergCatalog(); }
@Test public void testLoadIcebergCatalog() throws Exception { spark.conf().set("spark.sql.catalog.test_cat", SparkCatalog.class.getName()); spark.conf().set("spark.sql.catalog.test_cat.type", "hive"); Catalog catalog = Spark3Util.loadIcebergCatalog(spark, "test_cat"); assertThat(catalog) .as("Should retrieve underlying catalog class") .isInstanceOf(CachingCatalog.class); }
@Override @SuppressWarnings({"unchecked", "rawtypes"}) public void executeUpdate(final LockClusterStatement sqlStatement, final ContextManager contextManager) { checkState(contextManager); checkAlgorithm(sqlStatement); LockContext lockContext = contextManager.getComputeNodeInstanceContext().getLockContext(); GlobalLockDefinition lockDefinition = new GlobalLockDefinition(GlobalLockNames.CLUSTER_LOCK.getLockName()); if (lockContext.tryLock(lockDefinition, 3000L)) { try { checkState(contextManager); TypedSPILoader.getService(ClusterLockStrategy.class, sqlStatement.getLockStrategy().getName()).lock(); } finally { lockContext.unlock(lockDefinition); } } }
@Test void assertExecuteUpdateWithLockedCluster() { ContextManager contextManager = mock(ContextManager.class, RETURNS_DEEP_STUBS); when(contextManager.getStateContext().getClusterState()).thenReturn(ClusterState.UNAVAILABLE); assertThrows(LockedClusterException.class, () -> executor.executeUpdate(new LockClusterStatement(new AlgorithmSegment("FOO", new Properties())), contextManager)); }
public <T> void resolve(T resolvable) { ParamResolver resolver = this; if (ParamScope.class.isAssignableFrom(resolvable.getClass())) { ParamScope newScope = (ParamScope) resolvable; resolver = newScope.applyOver(resolver); } resolveStringLeaves(resolvable, resolver); resolveNonStringLeaves(resolvable, resolver); resolveNodes(resolvable, resolver); }
@Test public void shouldResolveTopLevelAttribute() { PipelineConfig pipelineConfig = PipelineConfigMother.createPipelineConfig("cruise", "dev", "ant"); pipelineConfig.setLabelTemplate("2.1-${COUNT}"); setField(pipelineConfig, PipelineConfig.LOCK_BEHAVIOR, "#{partial}Finished"); new ParamResolver(new ParamSubstitutionHandlerFactory(params(param("foo", "pavan"), param("partial", "unlockWhen"), param("COUNT", "quux"))), fieldCache).resolve(pipelineConfig); assertThat(pipelineConfig.getLabelTemplate(), is("2.1-${COUNT}")); assertThat(pipelineConfig.explicitLock(), is(true)); }
public static DataMap getAnnotationsMap(Annotation[] as) { return annotationsToData(as, true); }
@Test(description = "Non-empty annotation, scalar members, overridden values: data map with annotation + members") public void succeedsOnSupportedScalarMembersWithOverriddenValues() { @SupportedScalarMembers( annotationMember = @Key(name = "id", type = String.class), booleanMember = !SupportedScalarMembers.DEFAULT_BOOLEAN_MEMBER, byteMember = SupportedScalarMembers.DEFAULT_BYTE_MEMBER + 1, classMember = Test.class, doubleMember = SupportedScalarMembers.DEFAULT_DOUBLE_MEMBER +0.5f, enumMember = TestEnum.GAMMA, floatMember = SupportedScalarMembers.DEFAULT_FLOAT_MEMBER -0.5f, intMember = SupportedScalarMembers.DEFAULT_INT_MEMBER - 1, longMember = SupportedScalarMembers.DEFAULT_LONG_MEMBER + 1, stringMember = SupportedScalarMembers.DEFAULT_STRING_MEMBER + "s" ) class LocalClass { } final Annotation[] annotations = LocalClass.class.getAnnotations(); final DataMap actual = ResourceModelAnnotation.getAnnotationsMap(annotations); Assert.assertNotNull(actual); Assert.assertTrue(actual.get(SUPPORTED_SCALAR_MEMBERS) instanceof DataMap); final DataMap dataMap = ((DataMap) actual.get(SUPPORTED_SCALAR_MEMBERS)); Assert.assertEquals(dataMap.size(), 10); Assert.assertEquals(dataMap.get("annotationMember").getClass(), DataMap.class); // from AnnotationEntry#data Assert.assertEquals(dataMap.get("booleanMember").getClass(), Boolean.class); Assert.assertEquals(dataMap.get("byteMember").getClass(), ByteString.class); // byte string Assert.assertEquals(dataMap.get("classMember").getClass(), String.class); // canonical class name Assert.assertEquals(dataMap.get("doubleMember").getClass(), Double.class); Assert.assertEquals(dataMap.get("enumMember").getClass(), String.class); // enum name Assert.assertEquals(dataMap.get("floatMember").getClass(), Float.class); Assert.assertEquals(dataMap.get("intMember").getClass(), Integer.class); Assert.assertEquals(dataMap.get("longMember").getClass(), Long.class); Assert.assertEquals(dataMap.get("stringMember").getClass(), String.class); }
@Override public void startLeaderElection(LeaderContender contender) throws Exception { Preconditions.checkNotNull(contender); parentService.register(componentId, contender); }
@Test void testContenderRegistration() throws Exception { final AtomicReference<String> componentIdRef = new AtomicReference<>(); final AtomicReference<LeaderContender> contenderRef = new AtomicReference<>(); final DefaultLeaderElection.ParentService parentService = TestingAbstractLeaderElectionService.newBuilder() .setRegisterConsumer( (actualComponentId, actualContender) -> { componentIdRef.set(actualComponentId); contenderRef.set(actualContender); }) .build(); try (final DefaultLeaderElection testInstance = new DefaultLeaderElection(parentService, DEFAULT_TEST_COMPONENT_ID)) { final LeaderContender contender = TestingGenericLeaderContender.newBuilder().build(); testInstance.startLeaderElection(contender); assertThat(componentIdRef).hasValue(DEFAULT_TEST_COMPONENT_ID); assertThat(contenderRef.get()).isSameAs(contender); } }
@Override public void move(String noteId, String notePath, String newNotePath, AuthenticationInfo subject) throws IOException { Preconditions.checkArgument(StringUtils.isNotEmpty(noteId)); BlobId sourceBlobId = makeBlobId(noteId, notePath); BlobId destinationBlobId = makeBlobId(noteId, newNotePath); try { storage.get(sourceBlobId).copyTo(destinationBlobId); } catch (Exception se) { throw new IOException("Could not copy from " + sourceBlobId.toString() + " to " + destinationBlobId.toString() + ": " + se.getMessage(), se); } remove(noteId, notePath, subject); }
@Test void testMoveFolder_nonexistent() throws Exception { zConf.setProperty(ConfVars.ZEPPELIN_NOTEBOOK_GCS_STORAGE_DIR.getVarName(), DEFAULT_URL); this.notebookRepo = new GCSNotebookRepo(zConf, noteParser, storage); assertThrows(IOException.class, () -> { notebookRepo.move("/name", "/name_new", AUTH_INFO); }); }
void wakeUp(boolean taskOnly) { // Synchronize to make sure the wake up only works for the current invocation of runOnce(). lock.lock(); try { wakeUpUnsafe(taskOnly); } finally { lock.unlock(); } }
@Test public void testWakeup() throws InterruptedException { final int numSplits = 3; final int numRecordsPerSplit = 10_000; final int wakeupRecordsInterval = 10; final int numTotalRecords = numRecordsPerSplit * numSplits; FutureCompletingBlockingQueue<RecordsWithSplitIds<int[]>> elementQueue = new FutureCompletingBlockingQueue<>(1); SplitFetcher<int[], MockSourceSplit> fetcher = new SplitFetcher<>( 0, elementQueue, MockSplitReader.newBuilder() .setNumRecordsPerSplitPerFetch(2) .setBlockingFetch(true) .build(), ExceptionUtils::rethrow, () -> {}, (ignore) -> {}, false); // Prepare the splits. List<MockSourceSplit> splits = new ArrayList<>(); for (int i = 0; i < numSplits; i++) { splits.add(new MockSourceSplit(i, 0, numRecordsPerSplit)); int base = i * numRecordsPerSplit; for (int j = base; j < base + numRecordsPerSplit; j++) { splits.get(splits.size() - 1).addRecord(j); } } // Add splits to the fetcher. fetcher.addSplits(splits); // A thread drives the fetcher. Thread fetcherThread = new Thread(fetcher, "FetcherThread"); SortedSet<Integer> recordsRead = Collections.synchronizedSortedSet(new TreeSet<>()); // A thread waking up the split fetcher frequently. AtomicInteger wakeupTimes = new AtomicInteger(0); AtomicBoolean stop = new AtomicBoolean(false); Thread wakeUpCaller = new Thread("Wakeup Caller") { @Override public void run() { int lastWakeup = 0; while (recordsRead.size() < numTotalRecords && !stop.get()) { int numRecordsRead = recordsRead.size(); if (numRecordsRead >= lastWakeup + wakeupRecordsInterval) { fetcher.wakeUp(false); wakeupTimes.incrementAndGet(); lastWakeup = numRecordsRead; } } } }; try { fetcherThread.start(); wakeUpCaller.start(); while (recordsRead.size() < numSplits * numRecordsPerSplit) { final RecordsWithSplitIds<int[]> nextBatch = elementQueue.take(); while (nextBatch.nextSplit() != null) { int[] arr; while ((arr = nextBatch.nextRecordFromSplit()) != null) { assertThat(recordsRead.add(arr[0])).isTrue(); } } } assertThat(recordsRead).hasSize(numTotalRecords); assertThat(recordsRead.first()).isEqualTo(0); assertThat(recordsRead.last()).isEqualTo(numTotalRecords - 1); assertThat(wakeupTimes.get()).isGreaterThan(0); } finally { stop.set(true); fetcher.shutdown(); fetcherThread.join(); wakeUpCaller.join(); } }
public CompletableFuture<Void> acknowledgeOnClose(final Map<TopicIdPartition, Acknowledgements> acknowledgementsMap, final long deadlineMs) { final Cluster cluster = metadata.fetch(); final AtomicInteger resultCount = new AtomicInteger(); final ResultHandler resultHandler = new ResultHandler(resultCount, Optional.empty()); closing = true; sessionHandlers.forEach((nodeId, sessionHandler) -> { Node node = cluster.nodeById(nodeId); if (node != null) { Map<TopicIdPartition, Acknowledgements> acknowledgementsMapForNode = new HashMap<>(); for (TopicIdPartition tip : sessionHandler.sessionPartitions()) { Acknowledgements acknowledgements = acknowledgementsMap.getOrDefault(tip, Acknowledgements.empty()); if (fetchAcknowledgementsMap.get(tip) != null) { acknowledgements.merge(fetchAcknowledgementsMap.remove(tip)); } if (acknowledgements != null && !acknowledgements.isEmpty()) { acknowledgementsMapForNode.put(tip, acknowledgements); metricsManager.recordAcknowledgementSent(acknowledgements.size()); log.debug("Added closing acknowledge request for partition {} to node {}", tip.topicPartition(), node.id()); resultCount.incrementAndGet(); } } acknowledgeRequestStates.putIfAbsent(nodeId, new Pair<>(null, null)); // Ensure there is no commitSync()/close() request already present as they are blocking calls // and only one request can be active at a time. if (acknowledgeRequestStates.get(nodeId).getSyncRequest() != null && !acknowledgeRequestStates.get(nodeId).getSyncRequest().isEmpty()) { log.error("Attempt to call close() when there is an existing sync request for node {}-{}", node.id(), acknowledgeRequestStates.get(nodeId).getSyncRequest()); closeFuture.completeExceptionally( new IllegalStateException("Attempt to call close() when there is an existing sync request for node : " + node.id())); } else { // There can only be one commitSync()/close() happening at a time. So per node, there will be one acknowledge request state. acknowledgeRequestStates.get(nodeId).setSyncRequest(new AcknowledgeRequestState(logContext, ShareConsumeRequestManager.class.getSimpleName() + ":3", deadlineMs, retryBackoffMs, retryBackoffMaxMs, sessionHandler, nodeId, acknowledgementsMapForNode, this::handleShareAcknowledgeCloseSuccess, this::handleShareAcknowledgeCloseFailure, resultHandler, AcknowledgeRequestType.CLOSE )); } } }); resultHandler.completeIfEmpty(); return closeFuture; }
@Test public void testAcknowledgeOnClose() { buildRequestManager(); assignFromSubscribed(Collections.singleton(tp0)); // normal fetch assertEquals(1, sendFetches()); assertFalse(shareConsumeRequestManager.hasCompletedFetches()); client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); networkClientDelegate.poll(time.timer(0)); assertTrue(shareConsumeRequestManager.hasCompletedFetches()); Acknowledgements acknowledgements = Acknowledgements.empty(); acknowledgements.add(1L, AcknowledgeType.ACCEPT); // Piggyback acknowledgements shareConsumeRequestManager.fetch(Collections.singletonMap(tip0, acknowledgements)); // Remaining acknowledgements sent with close(). Acknowledgements acknowledgements2 = Acknowledgements.empty(); acknowledgements2.add(2L, AcknowledgeType.ACCEPT); acknowledgements2.add(3L, AcknowledgeType.REJECT); shareConsumeRequestManager.acknowledgeOnClose(Collections.singletonMap(tip0, acknowledgements2), calculateDeadlineMs(time.timer(100))); assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); client.prepareResponse(fullAcknowledgeResponse(tip0, Errors.NONE)); networkClientDelegate.poll(time.timer(0)); assertEquals(1, completedAcknowledgements.size()); Acknowledgements mergedAcks = acknowledgements.merge(acknowledgements2); mergedAcks.setAcknowledgeErrorCode(Errors.NONE); // Verifying that all 3 offsets were acknowledged as part of the final ShareAcknowledge on close. assertEquals(mergedAcks.getAcknowledgementsTypeMap(), completedAcknowledgements.get(0).get(tip0).getAcknowledgementsTypeMap()); assertTrue(shareConsumeRequestManager.hasCompletedFetches()); }
@Override public CloseableIterator<ScannerReport.Duplication> readComponentDuplications(int componentRef) { ensureInitialized(); return delegate.readComponentDuplications(componentRef); }
@Test public void verify_readComponentDuplications_returns_Issues() { writer.writeComponentDuplications(COMPONENT_REF, of(DUPLICATION)); try (CloseableIterator<ScannerReport.Duplication> res = underTest.readComponentDuplications(COMPONENT_REF)) { assertThat(res.next()).isEqualTo(DUPLICATION); assertThat(res.hasNext()).isFalse(); } }
@SuppressWarnings("java:S2583") public static boolean verify(@NonNull JWKSet jwks, @NonNull JWSObject jws) { if (jwks == null) { throw new IllegalArgumentException("no JWKS provided to verify JWS"); } if (jwks.getKeys() == null || jwks.getKeys().isEmpty()) { return false; } var header = jws.getHeader(); if (!JWSAlgorithm.ES256.equals(header.getAlgorithm())) { throw new UnsupportedOperationException( "only supports ES256, found: " + header.getAlgorithm()); } var key = jwks.getKeyByKeyId(header.getKeyID()); if (key == null) { return false; } try { var processor = new DefaultJWSVerifierFactory(); var verifier = processor.createJWSVerifier(jws.getHeader(), key.toECKey().toPublicKey()); return jws.verify(verifier); } catch (JOSEException e) { throw FederationExceptions.badSignature(e); } }
@Test void verifyGarbageSignature() throws ParseException { var jws = toJws(ECKEY, "test").serialize(); jws = garbageSignature(jws); var in = JWSObject.parse(jws); // when & then assertFalse(JwsVerifier.verify(JWKS, in)); }
@Override public void run() { try { backgroundJobServer.getJobSteward().notifyThreadOccupied(); MDCMapper.loadMDCContextFromJob(job); performJob(); } catch (Exception e) { if (isJobDeletedWhileProcessing(e)) { // nothing to do anymore as Job is deleted return; } else if (isJobServerStopped(e)) { updateJobStateToFailedAndRunJobFilters("Job processing was stopped as background job server has stopped", e); Thread.currentThread().interrupt(); } else if (isJobNotFoundException(e)) { updateJobStateToFailedAndRunJobFilters("Job method not found", e); } else { updateJobStateToFailedAndRunJobFilters("An exception occurred during the performance of the job", e); } } finally { backgroundJobServer.getJobSteward().notifyThreadIdle(); MDC.clear(); } }
@Test void onFailureAfterDeleteTheIllegalJobStateChangeIsCatchedAndLogged() throws Exception { Job job = anEnqueuedJob().build(); mockBackgroundJobRunner(job, jobFromStorage -> { jobFromStorage.delete("for testing"); jobFromStorage.delete("to throw exception that will bring it to failed state"); }); BackgroundJobPerformer backgroundJobPerformer = new BackgroundJobPerformer(backgroundJobServer, job); final ListAppender<ILoggingEvent> logger = LoggerAssert.initFor(backgroundJobPerformer); backgroundJobPerformer.run(); assertThat(logAllStateChangesFilter.getStateChanges(job)).containsExactly("ENQUEUED->PROCESSING"); assertThat(logAllStateChangesFilter.onProcessingIsCalled(job)).isTrue(); assertThat(logAllStateChangesFilter.onProcessingSucceededIsCalled(job)).isFalse(); assertThat(logger) .hasNoErrorLogMessages() .hasInfoMessage("Job processing failed but it was already deleted - ignoring illegal state change from DELETED to FAILED"); }
static File getFileFromJar(URL retrieved) throws URISyntaxException, IOException { logger.debug("getFileFromJar {}", retrieved); String fileName = retrieved.getFile(); if (fileName.contains("/")) { fileName = fileName.substring(fileName.lastIndexOf('/')); } String jarPath = retrieved.toString(); jarPath = jarPath.substring(0, jarPath.lastIndexOf("!/") + 2); URI uri = new URI(jarPath); Map<String, ?> env = new HashMap<>(); Path filePath; try (FileSystem fs = FileSystems.newFileSystem(uri, env)) { filePath = fs.getPath(fileName); } File toReturn = new MemoryFile(filePath); logger.debug(TO_RETURN_TEMPLATE, toReturn); logger.debug(TO_RETURN_GETABSOLUTEPATH_TEMPLATE, toReturn.getAbsolutePath()); return toReturn; }
@Test void getFileFromJar() throws URISyntaxException, IOException { URL jarUrl = getJarUrl(); assertThat(jarUrl).isNotNull(); File retrieved = MemoryFileUtils.getFileFromJar(jarUrl); assertThat(retrieved).isNotNull(); assertThat(retrieved).isInstanceOf(MemoryFile.class); assertThat(retrieved).canRead(); }
public Exchange createDbzExchange(DebeziumConsumer consumer, final SourceRecord sourceRecord) { final Exchange exchange; if (consumer != null) { exchange = consumer.createExchange(false); } else { exchange = super.createExchange(); } final Message message = exchange.getIn(); final Schema valueSchema = sourceRecord.valueSchema(); final Object value = sourceRecord.value(); // extract values from SourceRecord final Map<String, Object> sourceMetadata = extractSourceMetadataValueFromValueStruct(valueSchema, value); final Object operation = extractValueFromValueStruct(valueSchema, value, Envelope.FieldName.OPERATION); final Object before = extractValueFromValueStruct(valueSchema, value, Envelope.FieldName.BEFORE); final Object body = extractBodyValueFromValueStruct(valueSchema, value); final Object timestamp = extractValueFromValueStruct(valueSchema, value, Envelope.FieldName.TIMESTAMP); final Object ddl = extractValueFromValueStruct(valueSchema, value, HistoryRecord.Fields.DDL_STATEMENTS); // set message headers message.setHeader(DebeziumConstants.HEADER_IDENTIFIER, sourceRecord.topic()); message.setHeader(DebeziumConstants.HEADER_KEY, sourceRecord.key()); message.setHeader(DebeziumConstants.HEADER_SOURCE_METADATA, sourceMetadata); message.setHeader(DebeziumConstants.HEADER_OPERATION, operation); message.setHeader(DebeziumConstants.HEADER_BEFORE, before); message.setHeader(DebeziumConstants.HEADER_TIMESTAMP, timestamp); message.setHeader(DebeziumConstants.HEADER_DDL_SQL, ddl); message.setHeader(Exchange.MESSAGE_TIMESTAMP, timestamp); message.setBody(body); return exchange; }
@Test void testIfCreatesExchangeFromSourceRecordOtherThanStruct() { final SourceRecord sourceRecord = createStringRecord(); final Exchange exchange = debeziumEndpoint.createDbzExchange(null, sourceRecord); final Message inMessage = exchange.getIn(); assertNotNull(exchange); // assert headers assertEquals("dummy", inMessage.getHeader(DebeziumConstants.HEADER_IDENTIFIER)); assertNull(inMessage.getHeader(DebeziumConstants.HEADER_OPERATION)); // assert value final String value = (String) inMessage.getBody(); assertEquals(sourceRecord.value(), value); }
public final void removeOutboundHandler() { checkAdded(); outboundCtx.remove(); }
@Test public void testOutboundRemoveBeforeAdded() { final CombinedChannelDuplexHandler<ChannelInboundHandler, ChannelOutboundHandler> handler = new CombinedChannelDuplexHandler<ChannelInboundHandler, ChannelOutboundHandler>( new ChannelInboundHandlerAdapter(), new ChannelOutboundHandlerAdapter()); assertThrows(IllegalStateException.class, new Executable() { @Override public void execute() { handler.removeOutboundHandler(); } }); }
public static SchemaPairCompatibility checkReaderWriterCompatibility(final Schema reader, final Schema writer) { final SchemaCompatibilityResult compatibility = new ReaderWriterCompatibilityChecker().getCompatibility(reader, writer); final String message; switch (compatibility.getCompatibility()) { case INCOMPATIBLE: { message = String.format( "Data encoded using writer schema:%n%s%n" + "will or may fail to decode using reader schema:%n%s%n", writer.toString(true), reader.toString(true)); break; } case COMPATIBLE: { message = READER_WRITER_COMPATIBLE_MESSAGE; break; } default: throw new AvroRuntimeException("Unknown compatibility: " + compatibility); } return new SchemaPairCompatibility(compatibility, reader, writer, message); }
@Test void unionReaderWriterSubsetIncompatibility() { final Schema unionWriter = Schema.createUnion(list(INT_SCHEMA, STRING_SCHEMA, LONG_SCHEMA)); final Schema unionReader = Schema.createUnion(list(INT_SCHEMA, STRING_SCHEMA)); final SchemaPairCompatibility result = checkReaderWriterCompatibility(unionReader, unionWriter); assertEquals(SchemaCompatibilityType.INCOMPATIBLE, result.getType()); assertEquals("/2", result.getResult().getIncompatibilities().get(0).getLocation()); }
@Override public void run() { // top-level command, do nothing }
@Test public void test_submit_job_with_hazelcast_classes() throws IOException { Logger logger = (Logger) LogManager.getLogger(MainClassNameFinder.class); Appender appender = mock(Appender.class); when(appender.getName()).thenReturn("Mock Appender"); when(appender.isStarted()).thenReturn(true); logger.addAppender(appender); PrintStream oldErr = System.err; PrintStream oldOut = System.out; System.setErr(new PrintStream(err)); System.setOut(new PrintStream(out)); Path testJarFile = Files.createTempFile("testjob-with-hazelcast-codebase-", ".jar"); try (InputStream inputStream = HazelcastCommandLineTest.class.getResourceAsStream("testjob-with-hazelcast-codebase.jar")) { assert inputStream != null; Files.copy(inputStream, testJarFile, StandardCopyOption.REPLACE_EXISTING); } try { run("submit", testJarFile.toString()); ArgumentCaptor<LogEvent> logEventCaptor = ArgumentCaptor.forClass(LogEvent.class); verify(appender).append(logEventCaptor.capture()); LogEvent logEvent = logEventCaptor.getValue(); String actual = logEvent.getMessage().toString(); String pathToClass = Paths.get("com", "hazelcast", "jet", "testjob", "HazelcastBootstrap.class").toString(); assertThat(actual).contains("WARNING: Hazelcast code detected in the jar: " + pathToClass + ". Hazelcast dependency should be set with the 'provided' scope or equivalent."); } finally { System.setErr(oldErr); System.setOut(oldOut); IOUtil.deleteQuietly(testJarFile.toFile()); } }
public static Impl join(By clause) { return new Impl(new JoinArguments(clause)); }
@Test @Category(NeedsRunner.class) public void testFullOuterJoin() { List<Row> pc1Rows = Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user1", 1, "us").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 2, "us").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 3, "il").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 4, "il").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 5, "fr").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 6, "fr").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 7, "ar").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 8, "ar").build(), Row.withSchema(CG_SCHEMA_1).addValues("user3", 7, "ar").build()); List<Row> pc2Rows = Lists.newArrayList( Row.withSchema(CG_SCHEMA_2).addValues("user1", 9, "us").build(), Row.withSchema(CG_SCHEMA_2).addValues("user1", 10, "us").build(), Row.withSchema(CG_SCHEMA_2).addValues("user1", 11, "il").build(), Row.withSchema(CG_SCHEMA_2).addValues("user1", 12, "il").build(), Row.withSchema(CG_SCHEMA_2).addValues("user2", 13, "fr").build(), Row.withSchema(CG_SCHEMA_2).addValues("user2", 14, "fr").build(), Row.withSchema(CG_SCHEMA_2).addValues("user2", 15, "ar").build(), Row.withSchema(CG_SCHEMA_2).addValues("user2", 16, "ar").build(), Row.withSchema(CG_SCHEMA_2).addValues("user2", 16, "es").build()); List<Row> pc3Rows = Lists.newArrayList( Row.withSchema(CG_SCHEMA_3).addValues("user1", 17, "us").build(), Row.withSchema(CG_SCHEMA_3).addValues("user1", 18, "us").build(), Row.withSchema(CG_SCHEMA_3).addValues("user1", 19, "il").build(), Row.withSchema(CG_SCHEMA_3).addValues("user1", 20, "il").build(), Row.withSchema(CG_SCHEMA_3).addValues("user2", 21, "fr").build(), Row.withSchema(CG_SCHEMA_3).addValues("user2", 22, "fr").build(), Row.withSchema(CG_SCHEMA_3).addValues("user2", 23, "ar").build(), Row.withSchema(CG_SCHEMA_3).addValues("user2", 24, "ar").build(), Row.withSchema(CG_SCHEMA_3).addValues("user27", 24, "se").build()); PCollection<Row> pc1 = pipeline.apply("Create1", Create.of(pc1Rows)).setRowSchema(CG_SCHEMA_1); PCollection<Row> pc2 = pipeline.apply("Create2", Create.of(pc2Rows)).setRowSchema(CG_SCHEMA_2); PCollection<Row> pc3 = pipeline.apply("Create3", Create.of(pc3Rows)).setRowSchema(CG_SCHEMA_3); // Full outer join, so any field might be null. Schema expectedSchema = Schema.builder() .addNullableField("pc1", FieldType.row(CG_SCHEMA_1)) .addNullableField("pc2", FieldType.row(CG_SCHEMA_2)) .addNullableField("pc3", FieldType.row(CG_SCHEMA_3)) .build(); PCollection<Row> joined = PCollectionTuple.of("pc1", pc1, "pc2", pc2, "pc3", pc3) .apply( "CoGroup", CoGroup.join("pc1", By.fieldNames("user", "country").withOptionalParticipation()) .join("pc2", By.fieldNames("user2", "country2").withOptionalParticipation()) .join("pc3", By.fieldNames("user3", "country3").withOptionalParticipation()) .crossProductJoin()); assertEquals(expectedSchema, joined.getSchema()); List<Row> expectedJoinedRows = JoinTestUtils.innerJoin( pc1Rows, pc2Rows, pc3Rows, new String[] {"user", "country"}, new String[] {"user2", "country2"}, new String[] {"user3", "country3"}, expectedSchema); // Manually add the outer-join rows to the list of expected results. expectedJoinedRows.add( Row.withSchema(expectedSchema) .addValues(Row.withSchema(CG_SCHEMA_1).addValues("user3", 7, "ar").build(), null, null) .build()); expectedJoinedRows.add( Row.withSchema(expectedSchema) .addValues(null, Row.withSchema(CG_SCHEMA_2).addValues("user2", 16, "es").build(), null) .build()); expectedJoinedRows.add( Row.withSchema(expectedSchema) .addValues( null, null, Row.withSchema(CG_SCHEMA_3).addValues("user27", 24, "se").build()) .build()); PAssert.that(joined).containsInAnyOrder(expectedJoinedRows); pipeline.run(); }
@SuppressWarnings("unchecked") public static Class<? extends UuidGenerator> parseUuidGenerator(String cucumberUuidGenerator) { Class<?> uuidGeneratorClass; try { uuidGeneratorClass = Class.forName(cucumberUuidGenerator); } catch (ClassNotFoundException e) { throw new IllegalArgumentException( String.format("Could not load UUID generator class for '%s'", cucumberUuidGenerator), e); } if (!UuidGenerator.class.isAssignableFrom(uuidGeneratorClass)) { throw new IllegalArgumentException(String.format("UUID generator class '%s' was not a subclass of '%s'", uuidGeneratorClass, UuidGenerator.class)); } return (Class<? extends UuidGenerator>) uuidGeneratorClass; }
@Test void parseUuidGenerator_not_a_class() { // When IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> UuidGeneratorParser.parseUuidGenerator("java.lang.NonExistingClassName")); // Then assertThat(exception.getMessage(), Matchers.containsString("Could not load UUID generator class")); }
@Override public Mono<AuthorizingVisitor> visitRules(Authentication authentication, RequestInfo requestInfo) { var roleNames = AuthorityUtils.authoritiesToRoles(authentication.getAuthorities()); var record = new AttributesRecord(authentication, requestInfo); var visitor = new AuthorizingVisitor(record); // If the request is an userspace scoped request, // then we should check whether the user is the owner of the userspace. if (StringUtils.isNotBlank(requestInfo.getUserspace())) { if (!authentication.getName().equals(requestInfo.getUserspace())) { return Mono.fromSupplier(() -> { visitor.visit(null, null, null); return visitor; }); } } var stopVisiting = new AtomicBoolean(false); return roleService.listDependenciesFlux(roleNames) .filter(role -> !CollectionUtils.isEmpty(role.getRules())) .doOnNext(role -> { if (stopVisiting.get()) { return; } String roleName = role.getMetadata().getName(); var rules = role.getRules(); var source = roleBindingDescriber(roleName, authentication.getName()); for (var rule : rules) { if (!visitor.visit(source, rule, null)) { stopVisiting.set(true); return; } } }) .takeUntil(item -> stopVisiting.get()) .onErrorResume(t -> visitor.visit(null, null, t), t -> { log.error("Error occurred when visiting rules", t); //Do nothing here return Mono.empty(); }) .then(Mono.just(visitor)); }
@Test void visitRules() { when(roleService.listDependenciesFlux(Set.of("ruleReadPost"))) .thenReturn(Flux.just(mockRole())); var fakeUser = new User("admin", "123456", createAuthorityList("ruleReadPost")); var authentication = authenticated(fakeUser, fakeUser.getPassword(), fakeUser.getAuthorities()); var cases = getRequestResolveCases(); cases.forEach(requestResolveCase -> { var httpMethod = HttpMethod.valueOf(requestResolveCase.method); var request = method(httpMethod, requestResolveCase.url).build(); var requestInfo = RequestInfoFactory.INSTANCE.newRequestInfo(request); StepVerifier.create(ruleResolver.visitRules(authentication, requestInfo)) .assertNext( visitor -> assertEquals(requestResolveCase.expected, visitor.isAllowed())) .verifyComplete(); }); verify(roleService, times(cases.size())).listDependenciesFlux(Set.of("ruleReadPost")); }
public Canvas canvas() { Canvas canvas = new Canvas(getLowerBound(), getUpperBound()); canvas.add(this); if (name != null) { canvas.setTitle(name); } return canvas; }
@Test public void testContour() throws Exception { System.out.println("Contour"); var canvas = Heatmap.of(Z, 256).canvas(); canvas.add(Contour.of(Z)); canvas.window(); }
public static Point<AriaCsvHit> parsePointFromAriaCsv(String rawCsvText) { AriaCsvHit ariaHit = AriaCsvHit.from(rawCsvText); Position pos = new Position(ariaHit.time(), ariaHit.latLong(), ariaHit.altitude()); return new Point<>(pos, null, ariaHit.linkId(), ariaHit); }
@Test public void exampleParsing_withExtraData() { String rawCsv = "PRIMARY_PARTITION,SECONDARY_PARTITION,2018-03-24T14:41:09.371Z,vehicleIdNumber,42.9525,-83.7056,2700,EXTRA_FIELD_A,EXTRA_FIELD_B"; Point<AriaCsvHit> pt = AriaCsvHits.parsePointFromAriaCsv(rawCsv); assertThat(pt.time(), is(Instant.parse("2018-03-24T14:41:09.371Z"))); assertThat(pt.trackId(), is("vehicleIdNumber")); assertThat(pt.latitude(), is(42.9525)); assertThat(pt.longitude(), is(-83.7056)); assertThat(pt.altitude(), is(Distance.ofFeet(2700))); assertThat(pt.velocity(), nullValue()); assertThat("The entire rawCsv text is accessible from the parsed point", pt.rawData().rawCsvText(), is(rawCsv)); assertThat(pt.rawData().token(0), is("PRIMARY_PARTITION")); assertThat(pt.rawData().token(1), is("SECONDARY_PARTITION")); assertThat(pt.rawData().token(7), is("EXTRA_FIELD_A")); assertThat(pt.rawData().token(8), is("EXTRA_FIELD_B")); }
void resolveSelectors(EngineDiscoveryRequest request, CucumberEngineDescriptor engineDescriptor) { Predicate<String> packageFilter = buildPackageFilter(request); resolve(request, engineDescriptor, packageFilter); filter(engineDescriptor, packageFilter); pruneTree(engineDescriptor); }
@Test void resolveRequestWithUniqueIdSelectorFromJarUri() { String root = Paths.get("").toAbsolutePath().toUri().getSchemeSpecificPart(); URI uri = URI.create("jar:file:" + root + "/src/test/resources/feature.jar!/single.feature"); DiscoverySelector resource = selectUri(uri); EngineDiscoveryRequest discoveryRequest = new SelectorRequest(resource); resolver.resolveSelectors(discoveryRequest, testDescriptor); assertEquals(1, testDescriptor.getChildren().size()); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } if(containerService.isContainer(file)) { final PathAttributes attributes = new PathAttributes(); if(log.isDebugEnabled()) { log.debug(String.format("Read location for bucket %s", file)); } attributes.setRegion(new S3LocationFeature(session, session.getClient().getRegionEndpointCache()).getLocation(file).getIdentifier()); return attributes; } if(file.getType().contains(Path.Type.upload)) { final Write.Append append = new S3MultipartUploadService(session, new S3WriteFeature(session, acl), acl).append(file, new TransferStatus()); if(append.append) { return new PathAttributes().withSize(append.offset); } throw new NotfoundException(file.getAbsolute()); } try { PathAttributes attr; final Path bucket = containerService.getContainer(file); try { attr = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getVersionedObjectDetails( file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))); } catch(ServiceException e) { switch(e.getResponseCode()) { case 405: if(log.isDebugEnabled()) { log.debug(String.format("Mark file %s as delete marker", file)); } // Only DELETE method is allowed for delete markers attr = new PathAttributes(); attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, Boolean.TRUE.toString())); attr.setDuplicate(true); return attr; } throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); } if(StringUtils.isNotBlank(attr.getVersionId())) { if(log.isDebugEnabled()) { log.debug(String.format("Determine if %s is latest version for %s", attr.getVersionId(), file)); } // Determine if latest version try { final String latest = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getObjectDetails( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))).getVersionId(); if(null != latest) { if(log.isDebugEnabled()) { log.debug(String.format("Found later version %s for %s", latest, file)); } // Duplicate if not latest version attr.setDuplicate(!latest.equals(attr.getVersionId())); } } catch(ServiceException e) { final BackgroundException failure = new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); if(failure instanceof NotfoundException) { attr.setDuplicate(true); } else { throw failure; } } } return attr; } catch(NotfoundException e) { if(file.isDirectory()) { if(log.isDebugEnabled()) { log.debug(String.format("Search for common prefix %s", file)); } // File may be marked as placeholder but no placeholder file exists. Check for common prefix returned. try { new S3ObjectListService(session, acl).list(file, new CancellingListProgressListener(), String.valueOf(Path.DELIMITER), 1); } catch(ListCanceledException l) { // Found common prefix return PathAttributes.EMPTY; } catch(NotfoundException n) { throw e; } // Found common prefix return PathAttributes.EMPTY; } throw e; } }
@Test(expected = NotfoundException.class) public void testDetermineRegionVirtualHostStyle() throws Exception { final S3AttributesFinderFeature f = new S3AttributesFinderFeature(virtualhost, new S3AccessControlListFeature(virtualhost)); final Path file = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); // No region is cached and must be determined although HEAD request will not allow S3 to return correct region to use in AWS4 signature f.find(new Path(file.getName(), EnumSet.of(Path.Type.file))); }
public TargetAssignmentResult build() throws PartitionAssignorException { Map<String, MemberSubscriptionAndAssignmentImpl> memberSpecs = new HashMap<>(); // Prepare the member spec for all members. members.forEach((memberId, member) -> memberSpecs.put(memberId, createMemberSubscriptionAndAssignment( member, targetAssignment.getOrDefault(memberId, Assignment.EMPTY), topicsImage )) ); // Update the member spec if updated or deleted members. updatedMembers.forEach((memberId, updatedMemberOrNull) -> { if (updatedMemberOrNull == null) { memberSpecs.remove(memberId); } else { Assignment assignment = targetAssignment.getOrDefault(memberId, Assignment.EMPTY); // A new static member joins and needs to replace an existing departed one. if (updatedMemberOrNull.instanceId() != null) { String previousMemberId = staticMembers.get(updatedMemberOrNull.instanceId()); if (previousMemberId != null && !previousMemberId.equals(memberId)) { assignment = targetAssignment.getOrDefault(previousMemberId, Assignment.EMPTY); } } memberSpecs.put(memberId, createMemberSubscriptionAndAssignment( updatedMemberOrNull, assignment, topicsImage )); } }); // Prepare the topic metadata. Map<Uuid, TopicMetadata> topicMetadataMap = new HashMap<>(); subscriptionMetadata.forEach((topicName, topicMetadata) -> topicMetadataMap.put( topicMetadata.id(), topicMetadata ) ); // Compute the assignment. GroupAssignment newGroupAssignment = assignor.assign( new GroupSpecImpl( Collections.unmodifiableMap(memberSpecs), subscriptionType, invertedTargetAssignment ), new SubscribedTopicDescriberImpl(topicMetadataMap) ); // Compute delta from previous to new target assignment and create the // relevant records. List<CoordinatorRecord> records = new ArrayList<>(); for (String memberId : memberSpecs.keySet()) { Assignment oldMemberAssignment = targetAssignment.get(memberId); Assignment newMemberAssignment = newMemberAssignment(newGroupAssignment, memberId); if (!newMemberAssignment.equals(oldMemberAssignment)) { // If the member had no assignment or had a different assignment, we // create a record for the new assignment. records.add(targetAssignmentRecordBuilder.build( groupId, memberId, newMemberAssignment.partitions() )); } } // Bump the target assignment epoch. records.add(targetAssignmentEpochRecordBuilder.build(groupId, groupEpoch)); return new TargetAssignmentResult(records, newGroupAssignment.members()); }
@Test public void testUpdateMember() { TargetAssignmentBuilderTestContext context = new TargetAssignmentBuilderTestContext( "my-group", 20 ); Uuid fooTopicId = context.addTopicMetadata("foo", 6, Collections.emptyMap()); Uuid barTopicId = context.addTopicMetadata("bar", 6, Collections.emptyMap()); context.addGroupMember("member-1", Arrays.asList("foo", "bar", "zar"), mkAssignment( mkTopicAssignment(fooTopicId, 1, 2, 3), mkTopicAssignment(barTopicId, 1, 2) )); context.addGroupMember("member-2", Arrays.asList("foo", "bar", "zar"), mkAssignment( mkTopicAssignment(fooTopicId, 4, 5, 6), mkTopicAssignment(barTopicId, 3, 4) )); context.addGroupMember("member-3", Arrays.asList("bar", "zar"), mkAssignment( mkTopicAssignment(barTopicId, 5, 6) )); context.updateMemberSubscription( "member-3", Arrays.asList("foo", "bar", "zar"), Optional.of("instance-id-3"), Optional.of("rack-0") ); context.prepareMemberAssignment("member-1", mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) )); context.prepareMemberAssignment("member-2", mkAssignment( mkTopicAssignment(fooTopicId, 3, 4), mkTopicAssignment(barTopicId, 3, 4) )); context.prepareMemberAssignment("member-3", mkAssignment( mkTopicAssignment(fooTopicId, 5, 6), mkTopicAssignment(barTopicId, 5, 6) )); TargetAssignmentBuilder.TargetAssignmentResult result = context.build(); assertEquals(4, result.records().size()); assertUnorderedListEquals(Arrays.asList( newConsumerGroupTargetAssignmentRecord("my-group", "member-1", mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) )), newConsumerGroupTargetAssignmentRecord("my-group", "member-2", mkAssignment( mkTopicAssignment(fooTopicId, 3, 4), mkTopicAssignment(barTopicId, 3, 4) )), newConsumerGroupTargetAssignmentRecord("my-group", "member-3", mkAssignment( mkTopicAssignment(fooTopicId, 5, 6), mkTopicAssignment(barTopicId, 5, 6) )) ), result.records().subList(0, 3)); assertEquals(newConsumerGroupTargetAssignmentEpochRecord( "my-group", 20 ), result.records().get(3)); Map<String, MemberAssignment> expectedAssignment = new HashMap<>(); expectedAssignment.put("member-1", new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) ))); expectedAssignment.put("member-2", new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4), mkTopicAssignment(barTopicId, 3, 4) ))); expectedAssignment.put("member-3", new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 5, 6), mkTopicAssignment(barTopicId, 5, 6) ))); assertEquals(expectedAssignment, result.targetAssignment()); }
public static String formatExpression(final Expression expression) { return formatExpression(expression, FormatOptions.of(s -> false)); }
@Test public void shouldFormatLongLiteral() { assertThat(ExpressionFormatter.formatExpression(new LongLiteral(1)), equalTo("1")); }
public SerializableFunction<T, Row> getToRowFunction() { return toRowFunction; }
@Test public void testRepeatedProtoToRow() throws InvalidProtocolBufferException { ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(RepeatPrimitive.getDescriptor()); SerializableFunction<DynamicMessage, Row> toRow = schemaProvider.getToRowFunction(); assertEquals(REPEATED_ROW, toRow.apply(toDynamic(REPEATED_PROTO))); }
public FactMapping addFactMapping(int index, FactMapping toClone) { FactMapping toReturn = toClone.cloneFactMapping(); factMappings.add(index, toReturn); return toReturn; }
@Test public void addFactMapping_byFactIdentifierAndExpressionIdentifier_fail() { modelDescriptor.addFactMapping(factIdentifier, expressionIdentifier); assertThatIllegalArgumentException().isThrownBy(() -> modelDescriptor.addFactMapping(factIdentifier, expressionIdentifier)); }
@Subscribe public void onGameTick(GameTick event) { final Player local = client.getLocalPlayer(); final Duration waitDuration = Duration.ofMillis(config.getIdleNotificationDelay()); lastCombatCountdown = Math.max(lastCombatCountdown - 1, 0); if (client.getGameState() != GameState.LOGGED_IN || local == null // If user has clicked in the last second then they're not idle so don't send idle notification || System.currentTimeMillis() - client.getMouseLastPressedMillis() < 1000 || client.getKeyboardIdleTicks() < 10) { resetTimers(); return; } if (checkIdleLogout()) { notifier.notify(config.logoutIdle(), "You are about to log out from idling too long!"); } if (check6hrLogout()) { notifier.notify("You are about to log out from being online for 6 hours!"); } if (checkAnimationIdle(waitDuration, local)) { notifier.notify(config.animationIdle(), "You are now idle!"); } if (checkMovementIdle(waitDuration, local)) { notifier.notify(config.movementIdle(), "You have stopped moving!"); } if (checkInteractionIdle(waitDuration, local)) { if (lastInteractWasCombat) { notifier.notify(config.interactionIdle(), "You are now out of combat!"); } else { notifier.notify(config.interactionIdle(), "You are now idle!"); } } if (checkLowHitpoints()) { notifier.notify(config.getHitpointsNotification(), "You have low hitpoints!"); } if (checkLowPrayer()) { notifier.notify(config.getPrayerNotification(), "You have low prayer!"); } if (checkLowEnergy()) { notifier.notify(config.getLowEnergyNotification(), "You have low run energy!"); } if (checkHighEnergy()) { notifier.notify(config.getHighEnergyNotification(), "You have restored run energy!"); } if (checkLowOxygen()) { notifier.notify(config.getOxygenNotification(), "You have low oxygen!"); } if (checkFullSpecEnergy()) { notifier.notify(config.getSpecNotification(), "You have restored spec energy!"); } }
@Test public void testSpecRegen() { when(config.getSpecNotification()).thenReturn(Notification.ON); when(config.getSpecEnergyThreshold()).thenReturn(50); when(client.getVarpValue(eq(VarPlayer.SPECIAL_ATTACK_PERCENT))).thenReturn(400); // 40% plugin.onGameTick(new GameTick()); // once to set lastSpecEnergy to 400 verify(notifier, never()).notify(any()); when(client.getVarpValue(eq(VarPlayer.SPECIAL_ATTACK_PERCENT))).thenReturn(500); // 50% plugin.onGameTick(new GameTick()); verify(notifier).notify(Notification.ON, "You have restored spec energy!"); }
public static String getMaskedStatement(final String query) { try { final ParseTree tree = DefaultKsqlParser.getParseTree(query); return new Visitor().visit(tree); } catch (final Exception | StackOverflowError e) { return fallbackMasking(query); } }
@Test public void shouldMaskFallbackInsertStatement() { // Given final String query = "--this is a comment. \n" + "INSERT INTO foo (KEY_COL, COL_A) VALUES" + "(\"key\", 0.125, '{something}', 1, C, 2.3E);"; // When final String maskedQuery = QueryMask.getMaskedStatement(query); // Then final String expected = "--this is a comment. \n" + "INSERT INTO foo (KEY_COL, COL_A) VALUES" + "('[value]','[value]','[value]','[value]','[value]','[value]');"; // Then assertThat(maskedQuery, is(expected)); }
public Host parse(final String uri) throws HostParserException { final Host host = new HostParser(factory).get(uri); if(input.hasOption(TerminalOptionsBuilder.Params.region.name())) { host.setRegion(input.getOptionValue(TerminalOptionsBuilder.Params.region.name())); } final Path directory = new CommandLinePathParser(input, factory).parse(uri); if(directory.isDirectory()) { host.setDefaultPath(directory.getAbsolute()); } else { host.setDefaultPath(directory.getParent().getAbsolute()); } if(input.hasOption(TerminalOptionsBuilder.Params.udt.name())) { host.setTransfer(Host.TransferType.udt); } if(log.isDebugEnabled()) { log.debug(String.format("Parsed %s as %s", uri, host)); } return host; }
@Test public void testProfile() throws Exception { final CommandLineParser parser = new PosixParser(); final CommandLine input = parser.parse(new Options(), new String[]{}); final ProtocolFactory factory = new ProtocolFactory(new LinkedHashSet<>(Collections.singleton(new SwiftProtocol()))); factory.register(new ProfilePlistReader(factory).read(this.getClass().getResourceAsStream("/Rackspace US.cyberduckprofile"))); assertEquals(0, new Host(factory.forName("rackspace"), "identity.api.rackspacecloud.com", 443, "/cdn.cyberduck.ch", new Credentials("u", null)) .compareTo(new CommandLineUriParser(input, factory).parse("rackspace://u@cdn.cyberduck.ch/"))); }
public static FlinkPod loadPodFromTemplateFile( FlinkKubeClient kubeClient, File podTemplateFile, String mainContainerName) { final KubernetesPod pod = kubeClient.loadPodFromTemplateFile(podTemplateFile); final List<Container> otherContainers = new ArrayList<>(); Container mainContainer = null; if (null != pod.getInternalResource().getSpec()) { for (Container container : pod.getInternalResource().getSpec().getContainers()) { if (mainContainerName.equals(container.getName())) { mainContainer = container; } else { otherContainers.add(container); } } pod.getInternalResource().getSpec().setContainers(otherContainers); } else { // Set an empty spec for pod template pod.getInternalResource().setSpec(new PodSpecBuilder().build()); } if (mainContainer == null) { LOG.info( "Could not find main container {} in pod template, using empty one to initialize.", mainContainerName); mainContainer = new ContainerBuilder().build(); } return new FlinkPod(pod.getInternalResource(), mainContainer); }
@Test void testLoadPodFromTemplateAndCheckInitContainer() { final FlinkPod flinkPod = KubernetesUtils.loadPodFromTemplateFile( flinkKubeClient, KubernetesPodTemplateTestUtils.getPodTemplateFile(), KubernetesPodTemplateTestUtils.TESTING_MAIN_CONTAINER_NAME); assertThat(flinkPod.getPodWithoutMainContainer().getSpec().getInitContainers()).hasSize(1); assertThat(flinkPod.getPodWithoutMainContainer().getSpec().getInitContainers().get(0)) .isEqualTo(KubernetesPodTemplateTestUtils.createInitContainer()); }
public void append(AbortedTxn abortedTxn) throws IOException { lastOffset.ifPresent(offset -> { if (offset >= abortedTxn.lastOffset()) throw new IllegalArgumentException("The last offset of appended transactions must increase sequentially, but " + abortedTxn.lastOffset() + " is not greater than current last offset " + offset + " of index " + file.getAbsolutePath()); }); lastOffset = OptionalLong.of(abortedTxn.lastOffset()); Utils.writeFully(channel(), abortedTxn.buffer.duplicate()); }
@Test public void testLastOffsetCannotDecrease() throws IOException { index.append(new AbortedTxn(1L, 5, 15, 13)); assertThrows(IllegalArgumentException.class, () -> index.append(new AbortedTxn(0L, 0, 10, 11))); }
public String failureMessage( int epoch, OptionalLong deltaUs, boolean isActiveController, long lastCommittedOffset ) { StringBuilder bld = new StringBuilder(); if (deltaUs.isPresent()) { bld.append("event failed with "); } else { bld.append("event unable to start processing because of "); } bld.append(internalException.getClass().getSimpleName()); if (externalException.isPresent()) { bld.append(" (treated as "). append(externalException.get().getClass().getSimpleName()).append(")"); } if (causesFailover()) { bld.append(" at epoch ").append(epoch); } if (deltaUs.isPresent()) { bld.append(" in ").append(deltaUs.getAsLong()).append(" microseconds"); } if (causesFailover()) { if (isActiveController) { bld.append(". Renouncing leadership and reverting to the last committed offset "); bld.append(lastCommittedOffset); } else { bld.append(". The controller is already in standby mode"); } } bld.append("."); if (!isFault && internalException.getMessage() != null) { bld.append(" Exception message: "); bld.append(internalException.getMessage()); } return bld.toString(); }
@Test public void testNullPointerExceptionFailureMessageWhenInactive() { assertEquals("event failed with NullPointerException (treated as UnknownServerException) " + "at epoch 123 in 40 microseconds. The controller is already in standby mode.", NULL_POINTER.failureMessage(123, OptionalLong.of(40L), false, 456L)); }
@Override public AvailabilityWithBacklog getAvailabilityAndBacklog(boolean isCreditAvailable) { synchronized (lock) { try { cacheBuffer(); } catch (IOException e) { throw new RuntimeException(e); } if (cachedBuffers.isEmpty()) { return new AvailabilityWithBacklog(false, 0); } return new AvailabilityWithBacklog( isCreditAvailable || cachedBuffers.peek().f0.buffer().getDataType().isEvent(), (int) cachedBuffers.stream() .filter(x -> x.f0.buffer().getDataType().isBuffer()) .count()); } }
@Test void testGetAvailabilityAndBacklog() throws IOException { view0.notifyDataAvailable(); view1.notifyDataAvailable(); ResultSubpartitionView.AvailabilityWithBacklog availabilityAndBacklog1 = view.getAvailabilityAndBacklog(false); assertThat(availabilityAndBacklog1.getBacklog()).isPositive(); assertThat(availabilityAndBacklog1.isAvailable()).isFalse(); ResultSubpartitionView.AvailabilityWithBacklog availabilityAndBacklog2 = view.getAvailabilityAndBacklog(true); assertThat(availabilityAndBacklog2.getBacklog()).isPositive(); assertThat(availabilityAndBacklog2.isAvailable()).isTrue(); for (int i = 1; i < buffers0.size() + buffers1.size(); i++) { view.getNextBuffer(); } ResultSubpartitionView.AvailabilityWithBacklog availabilityAndBacklog3 = view.getAvailabilityAndBacklog(false); assertThat(availabilityAndBacklog3.getBacklog()).isZero(); assertThat(availabilityAndBacklog3.isAvailable()).isTrue(); ResultSubpartitionView.AvailabilityWithBacklog availabilityAndBacklog4 = view.getAvailabilityAndBacklog(true); assertThat(availabilityAndBacklog4.getBacklog()).isZero(); assertThat(availabilityAndBacklog4.isAvailable()).isTrue(); }
private UiLinkId(ElementId a, PortNumber pa, ElementId b, PortNumber pb) { elementA = a; portA = pa; elementB = b; portB = pb; regionA = null; regionB = null; boolean isEdgeLink = (a instanceof HostId); // NOTE: for edgelinks, hosts are always element A idA = isEdgeLink ? a.toString() : a + ID_PORT_DELIMITER + pa; idB = b + ID_PORT_DELIMITER + pb; idStr = idA + CP_DELIMITER + idB; type = isEdgeLink ? Type.HOST_DEVICE : Type.DEVICE_DEVICE; }
@Test public void sameDevsDiffPorts() { title("sameDevsDiffPorts"); UiLinkId one = UiLinkId.uiLinkId(LINK_X1_TO_Y2); UiLinkId other = UiLinkId.uiLinkId(LINK_X1_TO_Y3); print("link one: %s", one); print("link other: %s", other); assertNotEquals("equiv?", one, other); }
@Override Class<?> getReturnType() { return null; }
@Test public void test_getReturnType() { Class<?> returnType = NullMultiValueGetter.NULL_MULTIVALUE_GETTER.getReturnType(); assertNull(returnType); }
public static ChangesRequiringRestart getChangesRequiringRestart(ConfigInstance from, ConfigInstance to) { Class<?> clazz = from.getClass(); if (!clazz.equals(to.getClass())) { throw new IllegalArgumentException(String.format("%s != %s", clazz, to.getClass())); } try { Method m = clazz.getDeclaredMethod("getChangesRequiringRestart", clazz); m.setAccessible(true); return (ChangesRequiringRestart) m.invoke(from, to); } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException e) { throw new RuntimeException(e); } }
@Test void requireThatGetChangesRequiringRestartValidatesParameterTypes() { assertThrows(IllegalArgumentException.class, () -> { ReflectionUtil.getChangesRequiringRestart(new RestartConfig(), new NonRestartConfig()); }); }
public static int compareVersions(String firstVersion, String secondVersion) { if (firstVersion.equals(secondVersion)) { return 0; } return getVersionFromStr(firstVersion).compareTo(getVersionFromStr(secondVersion)); }
@Test public void testVersionComparison() throws Exception { assertTrue(TransformUpgrader.compareVersions("2.53.0", "2.53.0") == 0); assertTrue(TransformUpgrader.compareVersions("2.53.0", "2.55.0") < 0); assertTrue(TransformUpgrader.compareVersions("2.53.0", "2.55.0-SNAPSHOT") < 0); assertTrue(TransformUpgrader.compareVersions("2.53.0", "2.55.0.dev") < 0); assertTrue(TransformUpgrader.compareVersions("2.55.0", "2.53.0") > 0); assertTrue(TransformUpgrader.compareVersions("2.55.0-SNAPSHOT", "2.53.0") > 0); assertTrue(TransformUpgrader.compareVersions("2.55.0.dev", "2.53.0") > 0); }
@Override public InsertValuesToken generateSQLToken(final InsertStatementContext insertStatementContext) { Optional<SQLToken> insertValuesToken = findPreviousSQLToken(InsertValuesToken.class); if (insertValuesToken.isPresent()) { processPreviousSQLToken(insertStatementContext, (InsertValuesToken) insertValuesToken.get()); return (InsertValuesToken) insertValuesToken.get(); } return generateNewSQLToken(insertStatementContext); }
@Test void assertGenerateSQLTokenFromPreviousSQLTokens() { generator.setDatabaseName("db-001"); generator.setPreviousSQLTokens(EncryptGeneratorFixtureBuilder.getPreviousSQLTokens()); generator.setDatabaseName("db_schema"); assertThat(generator.generateSQLToken(EncryptGeneratorFixtureBuilder.createInsertStatementContext(Arrays.asList(1, "Tom", 0, "123456"))).toString(), is("(?, ?, ?, ?, ?, ?)")); }
T getFunction(final List<SqlArgument> arguments) { // first try to get the candidates without any implicit casting Optional<T> candidate = findMatchingCandidate(arguments, false); if (candidate.isPresent()) { return candidate.get(); } else if (!supportsImplicitCasts) { throw createNoMatchingFunctionException(arguments); } // if none were found (candidate isn't present) try again with implicit casting candidate = findMatchingCandidate(arguments, true); if (candidate.isPresent()) { return candidate.get(); } throw createNoMatchingFunctionException(arguments); }
@Test public void shouldFindNonVarargWithNullValues() { // Given: givenFunctions( function(EXPECTED, -1, STRING) ); // When: final KsqlScalarFunction fun = udfIndex.getFunction(Collections.singletonList(null)); // Then: assertThat(fun.name(), equalTo(EXPECTED)); }
static void removeAllFromManagers(Iterable<DoFnLifecycleManager> managers) throws Exception { Collection<Exception> thrown = new ArrayList<>(); for (DoFnLifecycleManager manager : managers) { thrown.addAll(manager.removeAll()); } if (!thrown.isEmpty()) { Exception overallException = new Exception("Exceptions thrown while tearing down DoFns"); for (Exception e : thrown) { overallException.addSuppressed(e); } throw overallException; } }
@Test public void whenManagersSucceedSucceeds() throws Exception { PipelineOptions options = PipelineOptionsFactory.create(); DoFnLifecycleManager first = DoFnLifecycleManager.of(new EmptyFn(), options); DoFnLifecycleManager second = DoFnLifecycleManager.of(new EmptyFn(), options); DoFnLifecycleManager third = DoFnLifecycleManager.of(new EmptyFn(), options); first.get(); second.get(); third.get(); DoFnLifecycleManagers.removeAllFromManagers(ImmutableList.of(first, second, third)); }
@GET @Produces(MediaType.APPLICATION_JSON) @Operation(summary = "Get prekey count", description = "Gets the number of one-time prekeys uploaded for this device and still available") @ApiResponse(responseCode = "200", description = "Body contains the number of available one-time prekeys for the device.", useReturnTypeSchema = true) @ApiResponse(responseCode = "401", description = "Account authentication check failed.") public CompletableFuture<PreKeyCount> getStatus(@ReadOnly @Auth final AuthenticatedDevice auth, @QueryParam("identity") @DefaultValue("aci") final IdentityType identityType) { final CompletableFuture<Integer> ecCountFuture = keysManager.getEcCount(auth.getAccount().getIdentifier(identityType), auth.getAuthenticatedDevice().getId()); final CompletableFuture<Integer> pqCountFuture = keysManager.getPqCount(auth.getAccount().getIdentifier(identityType), auth.getAuthenticatedDevice().getId()); return ecCountFuture.thenCombine(pqCountFuture, PreKeyCount::new); }
@Test void checkKeysIncorrectDigestLength() { try (final Response response = resources.getJerseyTest() .target("/v2/keys/check") .request() .header("Authorization", AuthHelper.getAuthHeader(AuthHelper.VALID_UUID, AuthHelper.VALID_PASSWORD)) .post(Entity.entity(new CheckKeysRequest(IdentityType.ACI, new byte[31]), MediaType.APPLICATION_JSON_TYPE))) { assertEquals(422, response.getStatus()); } try (final Response response = resources.getJerseyTest() .target("/v2/keys/check") .request() .header("Authorization", AuthHelper.getAuthHeader(AuthHelper.VALID_UUID, AuthHelper.VALID_PASSWORD)) .post(Entity.entity(new CheckKeysRequest(IdentityType.ACI, new byte[33]), MediaType.APPLICATION_JSON_TYPE))) { assertEquals(422, response.getStatus()); } }
protected void stopServices() { this.statusBackingStore.stop(); this.configBackingStore.stop(); this.worker.stop(); this.connectorExecutor.shutdown(); Utils.closeQuietly(this.connectorClientConfigOverridePolicy, "connector client config override policy"); }
@Test public void testConnectorClientConfigOverridePolicyClose() { SampleConnectorClientConfigOverridePolicy noneConnectorClientConfigOverridePolicy = new SampleConnectorClientConfigOverridePolicy(); AbstractHerder herder = testHerder(noneConnectorClientConfigOverridePolicy); herder.stopServices(); assertTrue(noneConnectorClientConfigOverridePolicy.isClosed()); }
protected int getDiffSize() { return brokerConfigDiff.size(); }
@Test public void testChangedKRaftControllerConfig() { List<ConfigEntry> desiredControllerConfig = singletonList(new ConfigEntry("controller.quorum.election.timeout.ms", "5000")); List<ConfigEntry> currentControllerConfig = singletonList(new ConfigEntry("controller.quorum.election.timeout.ms", "1000")); KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(currentControllerConfig), getDesiredConfiguration(desiredControllerConfig), kafkaVersion, nodeRef); assertThat(kcd.getDiffSize(), is(0)); }
@Override public List<MenuDO> getMenuList() { return menuMapper.selectList(); }
@Test public void testGetMenuList_ids() { // mock 数据 MenuDO menu100 = randomPojo(MenuDO.class); menuMapper.insert(menu100); MenuDO menu101 = randomPojo(MenuDO.class); menuMapper.insert(menu101); // 准备参数 Collection<Long> ids = Collections.singleton(menu100.getId()); // 调用 List<MenuDO> list = menuService.getMenuList(ids); // 断言 assertEquals(1, list.size()); assertPojoEquals(menu100, list.get(0)); }
public SearchResults<GroupInformation> search(DbSession dbSession, GroupSearchRequest groupSearchRequest) { GroupDto defaultGroup = defaultGroupFinder.findDefaultGroup(dbSession); GroupQuery query = toGroupQuery(groupSearchRequest); int limit = dbClient.groupDao().countByQuery(dbSession, query); if (groupSearchRequest.page() == 0) { return new SearchResults<>(List.of(), limit); } List<GroupDto> groups = dbClient.groupDao().selectByQuery(dbSession, query, groupSearchRequest.page(), groupSearchRequest.pageSize()); List<String> groupUuids = extractGroupUuids(groups); Map<String, Boolean> groupUuidToIsManaged = managedInstanceService.getGroupUuidToManaged(dbSession, new HashSet<>(groupUuids)); List<GroupInformation> results = groups.stream() .map(groupDto -> toGroupInformation(groupDto, defaultGroup.getUuid(), groupUuidToIsManaged)) .toList(); return new SearchResults<>(results, limit); }
@Test public void search_whenSeveralGroupFound_returnsThem() { GroupDto groupDto1 = mockGroupDto("1"); GroupDto groupDto2 = mockGroupDto("2"); GroupDto defaultGroup = mockDefaultGroup(); when(dbClient.groupDao().selectByQuery(eq(dbSession), queryCaptor.capture(), eq(5), eq(24))) .thenReturn(List.of(groupDto1, groupDto2, defaultGroup)); Map<String, Boolean> groupUuidToManaged = Map.of( groupDto1.getUuid(), false, groupDto2.getUuid(), true, defaultGroup.getUuid(), false); when(managedInstanceService.getGroupUuidToManaged(dbSession, groupUuidToManaged.keySet())).thenReturn(groupUuidToManaged); when(dbClient.groupDao().countByQuery(eq(dbSession), any())).thenReturn(300); SearchResults<GroupInformation> searchResults = groupService.search(dbSession, new GroupSearchRequest("query", null, 5, 24)); assertThat(searchResults.total()).isEqualTo(300); Map<String, GroupInformation> uuidToGroupInformation = searchResults.searchResults().stream() .collect(Collectors.toMap(groupInformation -> groupInformation.groupDto().getUuid(), identity())); assertGroupInformation(uuidToGroupInformation, groupDto1, false, false); assertGroupInformation(uuidToGroupInformation, groupDto2, true, false); assertGroupInformation(uuidToGroupInformation, defaultGroup, false, true); assertThat(queryCaptor.getValue().getSearchText()).isEqualTo("%QUERY%"); assertThat(queryCaptor.getValue().getIsManagedSqlClause()).isNull(); }
public static String decode(InputStream qrCodeInputStream) { BufferedImage image = null; try{ image = ImgUtil.read(qrCodeInputStream); return decode(image); } finally { ImgUtil.flush(image); } }
@Test @Disabled public void decodeTest2() { // 条形码 final String decode = QrCodeUtil.decode(FileUtil.file("d:/test/90.png")); //Console.log(decode); }
@Override public void checkTopicAccess( final KsqlSecurityContext securityContext, final String topicName, final AclOperation operation ) { final Set<AclOperation> authorizedOperations = securityContext.getServiceContext() .getTopicClient().describeTopic(topicName).authorizedOperations(); // Kakfa 2.2 or lower do not support authorizedOperations(). In case of running on a // unsupported broker version, then the authorizeOperation will be null. if (authorizedOperations != null && !authorizedOperations.contains(operation)) { // This error message is similar to what Kafka throws when it cannot access the topic // due to an authorization error. I used this message to keep a consistent message. throw new KsqlTopicAuthorizationException(operation, Collections.singleton(topicName)); } }
@Test public void shouldAllowIfAuthorizedOperationsIsNull() { // Checks compatibility with unsupported Kafka authorization checks // Given: givenTopicPermissions(TOPIC_1, null); // When/Then: accessValidator.checkTopicAccess(securityContext, TOPIC_NAME_1, AclOperation.READ); }
protected String[] getQueryParamValues(MultiValuedTreeMap<String, String> qs, String key, boolean isCaseSensitive) { List<String> value = getQueryParamValuesAsList(qs, key, isCaseSensitive); if (value == null){ return null; } return value.toArray(new String[0]); }
@Test void queryParamValues_getQueryParamValues_caseInsensitive() { AwsProxyHttpServletRequest request = new AwsProxyHttpServletRequest(new AwsProxyRequest(), mockContext, null); MultiValuedTreeMap<String, String> map = new MultiValuedTreeMap<>(); map.add("test", "test"); map.add("test", "test2"); String[] result1 = request.getQueryParamValues(map, "test", false); assertArrayEquals(new String[]{"test", "test2"}, result1); String[] result2 = request.getQueryParamValues(map, "TEST", false); assertArrayEquals(new String[]{"test", "test2"}, result2); }
public String getString(String key, String _default) { Object object = map.get(key); return object instanceof String ? (String) object : _default; }
@Test public void keyCannotHaveAnyCasing() { PMap subject = new PMap("foo=valueA|bar=valueB"); assertEquals("valueA", subject.getString("foo", "")); assertEquals("", subject.getString("Foo", "")); }
@Override synchronized int numBuffered() { return wrapped.numBuffered(); }
@Test public void testNumBuffered() { final int numBuffered = 1; when(wrapped.numBuffered()).thenReturn(numBuffered); final int result = synchronizedPartitionGroup.numBuffered(); assertEquals(numBuffered, result); verify(wrapped, times(1)).numBuffered(); }
public static Version value2Version(int value) { int length = Version.values().length; if (value >= length) { return Version.values()[length - 1]; } return Version.values()[value]; }
@Test public void testValue2Version_HigherVersion() throws Exception { assertThat(MQVersion.value2Version(Integer.MAX_VALUE)).isEqualTo(MQVersion.Version.HIGHER_VERSION); }
public static RestSettingBuilder get(final String id) { return get(eq(checkId(id))); }
@Test public void should_get_resource_by_id_with_request_config() throws Exception { Plain resource1 = new Plain(); resource1.code = 1; resource1.message = "hello"; Plain resource2 = new Plain(); resource2.code = 2; resource2.message = "world"; server.resource("targets", get("1").request(eq(header(HttpHeaders.CONTENT_TYPE), "application/json")).response(json(resource1)), get("2").request(eq(header(HttpHeaders.CONTENT_TYPE), "application/json")).response(json(resource2)) ); running(server, () -> { ClassicHttpResponse response = helper.getResponseWithHeader(remoteUrl("/targets/1"), of(HttpHeaders.CONTENT_TYPE, "application/json")); Plain response1 = asPlain(response); assertThat(response1.code, is(1)); assertThat(response1.message, is("hello")); ClassicHttpResponse otherResponse = helper.getResponseWithHeader(remoteUrl("/targets/2"), of(HttpHeaders.CONTENT_TYPE, "application/json")); Plain response2 = asPlain(otherResponse); assertThat(response2.code, is(2)); assertThat(response2.message, is("world")); HttpResponse notFoundResponse = helper.getResponse(remoteUrl("/targets/1")); assertThat(notFoundResponse.getCode(), is(404)); }); }
@Override public void onDisConnect(Connection connection) { connected = false; LogUtils.NAMING_LOGGER.warn("Grpc connection disconnect, mark to redo"); synchronized (registeredInstances) { registeredInstances.values().forEach(instanceRedoData -> instanceRedoData.setRegistered(false)); } synchronized (subscribes) { subscribes.values().forEach(subscriberRedoData -> subscriberRedoData.setRegistered(false)); } LogUtils.NAMING_LOGGER.warn("mark to redo completed"); }
@Test void testOnDisConnect() { redoService.onConnected(new TestConnection(new RpcClient.ServerInfo())); redoService.cacheInstanceForRedo(SERVICE, GROUP, new Instance()); redoService.instanceRegistered(SERVICE, GROUP); redoService.cacheSubscriberForRedo(SERVICE, GROUP, CLUSTER); redoService.subscriberRegistered(SERVICE, GROUP, CLUSTER); assertTrue(redoService.isConnected()); assertTrue(redoService.findInstanceRedoData().isEmpty()); assertTrue(redoService.findSubscriberRedoData().isEmpty()); redoService.onDisConnect(new TestConnection(new RpcClient.ServerInfo())); assertFalse(redoService.isConnected()); assertFalse(redoService.findInstanceRedoData().isEmpty()); assertFalse(redoService.findSubscriberRedoData().isEmpty()); }
public abstract VoiceInstructionValue getConfigForDistance( double distance, String turnDescription, String thenVoiceInstruction);
@Test public void germanFixedDistanceInitialVICMetricTest() { FixedDistanceVoiceInstructionConfig configMetric = new FixedDistanceVoiceInstructionConfig(IN_HIGHER_DISTANCE_PLURAL.metric, trMap, Locale.GERMAN, 2000, 2); compareVoiceInstructionValues( 2000, "In 2 Kilometern abbiegen", configMetric.getConfigForDistance(2100, "abbiegen", " dann") ); compareVoiceInstructionValues( 2000, "In 2 Kilometern abbiegen", configMetric.getConfigForDistance(2000, "abbiegen", " dann") ); assertNull(configMetric.getConfigForDistance(1999, "abbiegen", " dann")); }
public static <T> AsList<T> asList() { return new AsList<>(null, false); }
@Test @Category(ValidatesRunner.class) public void testListWithRandomAccessSideInput() { final PCollectionView<List<Integer>> view = pipeline .apply("CreateSideInput", Create.of(11, 13, 17, 23)) .apply(View.<Integer>asList().withRandomAccess()); PCollection<Integer> output = pipeline .apply("CreateMainInput", Create.of(29, 31)) .apply( "OutputSideInputs", ParDo.of( new DoFn<Integer, Integer>() { @ProcessElement public void processElement(ProcessContext c) { checkArgument(c.sideInput(view).size() == 4); checkArgument( c.sideInput(view).get(0).equals(c.sideInput(view).get(0))); for (Integer i : c.sideInput(view)) { c.output(i); } } }) .withSideInputs(view)); PAssert.that(output).containsInAnyOrder(11, 13, 17, 23, 11, 13, 17, 23); pipeline.run(); }
static MessageListener create(MessageListener delegate, JmsTracing jmsTracing) { if (delegate == null) return null; if (delegate instanceof TracingMessageListener) return delegate; return new TracingMessageListener(delegate, jmsTracing, true); }
@Test void null_listener_if_delegate_is_null() { assertThat(TracingMessageListener.create(null, jmsTracing)) .isNull(); }
@Override public synchronized void stateChanged(CuratorFramework client, ConnectionState newState) { if (circuitBreaker.isOpen()) { handleOpenStateChange(newState); } else { handleClosedStateChange(newState); } }
@Test public void testResetsAfterReconnect() throws Exception { RecordingListener recordingListener = new RecordingListener(); TestRetryPolicy retryPolicy = new TestRetryPolicy(); CircuitBreakingConnectionStateListener listener = new CircuitBreakingConnectionStateListener(dummyClient, recordingListener, retryPolicy, service); synchronized (listener) // don't let retry policy run while we're pushing state changes { listener.stateChanged(dummyClient, ConnectionState.LOST); listener.stateChanged(dummyClient, ConnectionState.LOST); // second LOST ignored } assertEquals(timing.takeFromQueue(recordingListener.stateChanges), ConnectionState.LOST); assertTrue(recordingListener.stateChanges.isEmpty()); listener.stateChanged(dummyClient, ConnectionState.RECONNECTED); // causes circuit to close on next retry assertEquals(timing.takeFromQueue(recordingListener.stateChanges), ConnectionState.RECONNECTED); }
@Override public Collection<DatabasePacket> execute() throws SQLException { switch (packet.getType()) { case 'S': return describePreparedStatement(); case 'P': return Collections.singleton(portalContext.get(packet.getName()).describe()); default: throw new UnsupportedSQLOperationException("Unsupported describe type: " + packet.getType()); } }
@Test void assertDescribePreparedStatementInsertWithReturningClause() throws SQLException { when(packet.getType()).thenReturn('S'); final String statementId = "S_2"; when(packet.getName()).thenReturn(statementId); String sql = "insert into t_order (k, c, pad) values (?, ?, ?) " + "returning id, id alias_id, 'anonymous', 'OK' literal_string, 1 literal_int, 4294967296 literal_bigint, 1.1 literal_numeric, t_order.*, t_order, t_order alias_t_order"; SQLStatement sqlStatement = SQL_PARSER_ENGINE.parse(sql, false); List<PostgreSQLColumnType> parameterTypes = new ArrayList<>(sqlStatement.getParameterCount()); for (int i = 0; i < sqlStatement.getParameterCount(); i++) { parameterTypes.add(PostgreSQLColumnType.UNSPECIFIED); } SQLStatementContext sqlStatementContext = mock(InsertStatementContext.class); when(sqlStatementContext.getSqlStatement()).thenReturn(sqlStatement); ContextManager contextManager = mockContextManager(); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); List<Integer> parameterIndexes = IntStream.range(0, sqlStatement.getParameterCount()).boxed().collect(Collectors.toList()); connectionSession.getServerPreparedStatementRegistry().addPreparedStatement(statementId, new PostgreSQLServerPreparedStatement(sql, sqlStatementContext, new HintValueContext(), parameterTypes, parameterIndexes)); Collection<DatabasePacket> actualPackets = executor.execute(); assertThat(actualPackets.size(), is(2)); Iterator<DatabasePacket> actualPacketsIterator = actualPackets.iterator(); PostgreSQLParameterDescriptionPacket actualParameterDescription = (PostgreSQLParameterDescriptionPacket) actualPacketsIterator.next(); PostgreSQLPacketPayload mockPayload = mock(PostgreSQLPacketPayload.class); actualParameterDescription.write(mockPayload); verify(mockPayload).writeInt2(3); verify(mockPayload).writeInt4(23); verify(mockPayload, times(2)).writeInt4(18); DatabasePacket actualRowDescriptionPacket = actualPacketsIterator.next(); assertThat(actualRowDescriptionPacket, is(instanceOf(PostgreSQLRowDescriptionPacket.class))); assertRowDescriptions((PostgreSQLRowDescriptionPacket) actualRowDescriptionPacket); }
public Future<KafkaVersionChange> reconcile() { return getVersionFromController() .compose(i -> getPods()) .compose(this::detectToAndFromVersions) .compose(i -> prepareVersionChange()); }
@Test public void testNoopWithCustomMetadataVersion(VertxTestContext context) { String kafkaVersion = VERSIONS.defaultVersion().version(); String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); Kafka kafka = new KafkaBuilder(mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion)) .editSpec() .editKafka() .withMetadataVersion("3.5-IV2") .endKafka() .endSpec() .build(); VersionChangeCreator vcc = mockVersionChangeCreator( kafka, mockNewCluster( null, mockSps(kafkaVersion), mockUniformPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion) ) ); Checkpoint async = context.checkpoint(); vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { assertThat(c.from(), is(VERSIONS.defaultVersion())); assertThat(c.to(), is(VERSIONS.defaultVersion())); assertThat(c.interBrokerProtocolVersion(), nullValue()); assertThat(c.logMessageFormatVersion(), nullValue()); assertThat(c.metadataVersion(), is("3.5-IV2")); async.flag(); }))); }
@Override public String convert(ILoggingEvent event) { int nanos = event.getNanoseconds(); int millis_and_micros = nanos / 1000; int micros = millis_and_micros % 1000; if (micros >= 100) return Integer.toString(micros); else if (micros >= 10) return "0" + Integer.toString(micros); else return "00" + Integer.toString(micros); }
@Test public void smoke() { LoggingEvent le = new LoggingEvent(); Instant instant = Instant.parse("2011-12-03T10:15:30Z"); instant = instant.plusNanos(123_456_789); le.setInstant(instant); String result = mc.convert(le); assertEquals("456", result); }
@Restricted(NoExternalUse.class) public boolean supportsQuickRecursiveListing() { return false; }
@Test public void testSupportsQuickRecursiveListing_AbstractBase() { VirtualFile root = new VirtualFileMinimalImplementation(); assertFalse(root.supportsQuickRecursiveListing()); }
public static Schema getPinotSchemaFromAvroSchemaWithComplexTypeHandling(org.apache.avro.Schema avroSchema, @Nullable Map<String, FieldSpec.FieldType> fieldTypeMap, @Nullable TimeUnit timeUnit, List<String> fieldsToUnnest, String delimiter, ComplexTypeConfig.CollectionNotUnnestedToJson collectionNotUnnestedToJson) { Schema pinotSchema = new Schema(); for (Field field : avroSchema.getFields()) { extractSchemaWithComplexTypeHandling(field.schema(), fieldsToUnnest, delimiter, field.name(), pinotSchema, fieldTypeMap, timeUnit, collectionNotUnnestedToJson); } return pinotSchema; }
@Test public void testGetPinotSchemaFromAvroSchemaWithComplexType() throws IOException { // do not unnest collect org.apache.avro.Schema avroSchema = new org.apache.avro.Schema.Parser().parse(ClassLoader.getSystemResourceAsStream(AVRO_NESTED_SCHEMA)); Map<String, FieldSpec.FieldType> fieldSpecMap = new ImmutableMap.Builder<String, FieldSpec.FieldType>().put("d1", FieldType.DIMENSION) .put("hoursSinceEpoch", FieldType.DATE_TIME).put("m1", FieldType.METRIC).build(); Schema inferredPinotSchema = AvroUtils.getPinotSchemaFromAvroSchemaWithComplexTypeHandling(avroSchema, fieldSpecMap, TimeUnit.HOURS, new ArrayList<>(), ".", ComplexTypeConfig.CollectionNotUnnestedToJson.NON_PRIMITIVE); Schema expectedSchema = new Schema.SchemaBuilder().addSingleValueDimension("d1", DataType.STRING).addMetric("m1", DataType.INT) .addSingleValueDimension("tuple.streetaddress", DataType.STRING) .addSingleValueDimension("tuple.city", DataType.STRING).addSingleValueDimension("entries", DataType.STRING) .addMultiValueDimension("d2", DataType.INT) .addDateTime("hoursSinceEpoch", DataType.LONG, "1:HOURS:EPOCH", "1:HOURS").build(); assertEquals(inferredPinotSchema, expectedSchema); // unnest collection entries inferredPinotSchema = AvroUtils.getPinotSchemaFromAvroSchemaWithComplexTypeHandling(avroSchema, fieldSpecMap, TimeUnit.HOURS, Lists.newArrayList("entries"), ".", ComplexTypeConfig.CollectionNotUnnestedToJson.NON_PRIMITIVE); expectedSchema = new Schema.SchemaBuilder().addSingleValueDimension("d1", DataType.STRING).addMetric("m1", DataType.INT) .addSingleValueDimension("tuple.streetaddress", DataType.STRING) .addSingleValueDimension("tuple.city", DataType.STRING).addSingleValueDimension("entries.id", DataType.LONG) .addSingleValueDimension("entries.description", DataType.STRING).addMultiValueDimension("d2", DataType.INT) .addDateTime("hoursSinceEpoch", DataType.LONG, "1:HOURS:EPOCH", "1:HOURS").build(); assertEquals(inferredPinotSchema, expectedSchema); // change delimiter inferredPinotSchema = AvroUtils.getPinotSchemaFromAvroSchemaWithComplexTypeHandling(avroSchema, fieldSpecMap, TimeUnit.HOURS, Lists.newArrayList(), "_", ComplexTypeConfig.CollectionNotUnnestedToJson.NON_PRIMITIVE); expectedSchema = new Schema.SchemaBuilder().addSingleValueDimension("d1", DataType.STRING).addMetric("m1", DataType.INT) .addSingleValueDimension("tuple_streetaddress", DataType.STRING) .addSingleValueDimension("tuple_city", DataType.STRING).addSingleValueDimension("entries", DataType.STRING) .addMultiValueDimension("d2", DataType.INT) .addDateTime("hoursSinceEpoch", DataType.LONG, "1:HOURS:EPOCH", "1:HOURS").build(); assertEquals(inferredPinotSchema, expectedSchema); // change the handling of collection-to-json option, d2 will become string inferredPinotSchema = AvroUtils.getPinotSchemaFromAvroSchemaWithComplexTypeHandling(avroSchema, fieldSpecMap, TimeUnit.HOURS, Lists.newArrayList("entries"), ".", ComplexTypeConfig.CollectionNotUnnestedToJson.ALL); expectedSchema = new Schema.SchemaBuilder().addSingleValueDimension("d1", DataType.STRING).addMetric("m1", DataType.INT) .addSingleValueDimension("tuple.streetaddress", DataType.STRING) .addSingleValueDimension("tuple.city", DataType.STRING).addSingleValueDimension("entries.id", DataType.LONG) .addSingleValueDimension("entries.description", DataType.STRING) .addSingleValueDimension("d2", DataType.STRING) .addDateTime("hoursSinceEpoch", DataType.LONG, "1:HOURS:EPOCH", "1:HOURS").build(); assertEquals(inferredPinotSchema, expectedSchema); }
public static URI parse(String featureIdentifier) { requireNonNull(featureIdentifier, "featureIdentifier may not be null"); if (featureIdentifier.isEmpty()) { throw new IllegalArgumentException("featureIdentifier may not be empty"); } // Legacy from the Cucumber Eclipse plugin // Older versions of Cucumber allowed it. if (CLASSPATH_SCHEME_PREFIX.equals(featureIdentifier)) { return rootPackageUri(); } if (nonStandardPathSeparatorInUse(featureIdentifier)) { String standardized = replaceNonStandardPathSeparator(featureIdentifier); return parseAssumeFileScheme(standardized); } if (isWindowsOS() && pathContainsWindowsDrivePattern(featureIdentifier)) { return parseAssumeFileScheme(featureIdentifier); } if (probablyURI(featureIdentifier)) { return parseProbableURI(featureIdentifier); } return parseAssumeFileScheme(featureIdentifier); }
@Test @EnabledOnOs(WINDOWS) void can_parse_windows_file_path_with_standard_file_separator() { URI uri = FeaturePath.parse("C:/path/to/file.feature"); assertAll( () -> assertThat(uri.getScheme(), is("file")), () -> assertThat(uri.getSchemeSpecificPart(), is("/C:/path/to/file.feature"))); }
public boolean matchExcludeTable(@NotNull String tableName) { return matchTable(tableName, this.getExclude()); }
@Test void matchExcludeTableTest() { StrategyConfig.Builder strategyConfigBuilder = GeneratorBuilder.strategyConfigBuilder(); strategyConfigBuilder.addExclude("system", "user_1", "test[a|b]"); StrategyConfig strategyConfig = strategyConfigBuilder.build(); Assertions.assertTrue(strategyConfig.matchExcludeTable("system")); Assertions.assertFalse(strategyConfig.matchExcludeTable("test_exclude")); Assertions.assertTrue(strategyConfig.matchExcludeTable("testa")); Assertions.assertTrue(strategyConfig.matchExcludeTable("testb")); Assertions.assertFalse(strategyConfig.matchExcludeTable("testc")); }
@Override public void updateDiyPage(DiyPageUpdateReqVO updateReqVO) { // 校验存在 validateDiyPageExists(updateReqVO.getId()); // 校验名称唯一 validateNameUnique(updateReqVO.getId(), updateReqVO.getTemplateId(), updateReqVO.getName()); // 更新 DiyPageDO updateObj = DiyPageConvert.INSTANCE.convert(updateReqVO); diyPageMapper.updateById(updateObj); }
@Test public void testUpdateDiyPage_success() { // mock 数据 DiyPageDO dbDiyPage = randomPojo(DiyPageDO.class); diyPageMapper.insert(dbDiyPage);// @Sql: 先插入出一条存在的数据 // 准备参数 DiyPageUpdateReqVO reqVO = randomPojo(DiyPageUpdateReqVO.class, o -> { o.setId(dbDiyPage.getId()); // 设置更新的 ID }); // 调用 diyPageService.updateDiyPage(reqVO); // 校验是否更新正确 DiyPageDO diyPage = diyPageMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, diyPage); }
@Override public int hashCode() { return Objects.hash(targetImage, imageDigest, imageId, tags, imagePushed); }
@Test public void testEquality_differentTags() { JibContainer container1 = new JibContainer(targetImage1, digest1, digest1, tags1, true); JibContainer container2 = new JibContainer(targetImage1, digest1, digest1, tags2, true); Assert.assertNotEquals(container1, container2); Assert.assertNotEquals(container1.hashCode(), container2.hashCode()); }
ImmutableMap<PCollection<?>, FieldAccessDescriptor> getPCollectionFieldAccess() { return ImmutableMap.copyOf(pCollectionFieldAccess); }
@Test public void testFieldAccessUnknownMainInput() { Pipeline p = Pipeline.create(); FieldAccessVisitor fieldAccessVisitor = new FieldAccessVisitor(); Schema schema = Schema.of(Field.of("field1", FieldType.STRING), Field.of("field2", FieldType.STRING)); PCollection<Row> source = p.apply(Create.of(Row.withSchema(schema).addValues("foo", "bar").build())) .setRowSchema(schema); source.apply(ParDo.of(new UnknownDoFn())).setRowSchema(schema); p.traverseTopologically(fieldAccessVisitor); assertTrue(fieldAccessVisitor.getPCollectionFieldAccess().get(source).getAllFields()); }
@Override public SerializationServiceBuilder setVersion(byte version) { byte maxVersion = BuildInfoProvider.getBuildInfo().getSerializationVersion(); if (version > maxVersion) { throw new IllegalArgumentException( "Configured serialization version is higher than the max supported version: " + maxVersion); } this.version = version; return this; }
@Test(expected = IllegalArgumentException.class) public void test_exceptionThrown_whenVersionGreaterThanMax() { getSerializationServiceBuilder().setVersion(Byte.MAX_VALUE); }
@Override public AbstractWALEvent decode(final ByteBuffer data, final BaseLogSequenceNumber logSequenceNumber) { AbstractWALEvent result; byte[] bytes = new byte[data.remaining()]; data.get(bytes); String dataText = new String(bytes, StandardCharsets.UTF_8); if (decodeWithTX) { result = decodeDataWithTX(dataText); } else { result = decodeDataIgnoreTX(dataText); } result.setLogSequenceNumber(logSequenceNumber); return result; }
@Test void assertDecodeWitTinyint() { MppTableData tableData = new MppTableData(); tableData.setTableName("public.test"); tableData.setOpType("INSERT"); tableData.setColumnsName(new String[]{"data"}); tableData.setColumnsType(new String[]{"tinyint"}); tableData.setColumnsVal(new String[]{"255"}); ByteBuffer data = ByteBuffer.wrap(JsonUtils.toJsonString(tableData).getBytes()); WriteRowEvent actual = (WriteRowEvent) new MppdbDecodingPlugin(null, false, false).decode(data, logSequenceNumber); Object byteaObj = actual.getAfterRow().get(0); assertThat(byteaObj, is(255)); }
@JsonCreator public static WindowInfo of( @JsonProperty(value = "type", required = true) final WindowType type, @JsonProperty(value = "size") final Optional<Duration> size, @JsonProperty(value = "emitStrategy") final Optional<OutputRefinement> emitStrategy) { return new WindowInfo(type, size, emitStrategy); }
@Test(expected = IllegalArgumentException.class) public void shouldThrowIfSizeNegative() { WindowInfo.of(TUMBLING, Optional.of(Duration.ofSeconds(-1)), Optional.empty()); }