focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Flux<ReactiveRedisConnection.BooleanResponse<RenameCommand>> renameNX(Publisher<RenameCommand> commands) { return execute(commands, command -> { Assert.notNull(command.getKey(), "Key must not be null!"); Assert.notNull(command.getNewKey(), "New name must not be null!"); byte[] keyBuf = toByteArray(command.getKey()); byte[] newKeyBuf = toByteArray(command.getNewKey()); if (executorService.getConnectionManager().calcSlot(keyBuf) == executorService.getConnectionManager().calcSlot(newKeyBuf)) { return super.renameNX(commands); } return exists(command.getNewKey()) .zipWith(read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, keyBuf)) .filter(newKeyExistsAndDump -> !newKeyExistsAndDump.getT1() && Objects.nonNull(newKeyExistsAndDump.getT2())) .map(Tuple2::getT2) .zipWhen(value -> pTtl(command.getKey()) .filter(Objects::nonNull) .map(ttl -> Math.max(0, ttl)) .switchIfEmpty(Mono.just(0L)) ) .flatMap(valueAndTtl -> write(newKeyBuf, StringCodec.INSTANCE, RedisCommands.RESTORE, newKeyBuf, valueAndTtl.getT2(), valueAndTtl.getT1()) .then(Mono.just(true))) .switchIfEmpty(Mono.just(false)) .doOnSuccess(didRename -> { if (didRename) { del(command.getKey()); } }) .map(didRename -> new BooleanResponse<>(command, didRename)); }); }
@Test public void testRenameNX() { connection.stringCommands().set(originalKey, value).block(); if (hasTtl) { connection.keyCommands().expire(originalKey, Duration.ofSeconds(1000)).block(); } Integer originalSlot = getSlotForKey(originalKey); newKey = getNewKeyForSlot(new String(originalKey.array()), getTargetSlot(originalSlot)); Boolean result = connection.keyCommands().renameNX(originalKey, newKey).block(); assertThat(result).isTrue(); assertThat(connection.stringCommands().get(newKey).block()).isEqualTo(value); if (hasTtl) { assertThat(connection.keyCommands().ttl(newKey).block()).isGreaterThan(0); } else { assertThat(connection.keyCommands().ttl(newKey).block()).isEqualTo(-1); } connection.stringCommands().set(originalKey, value).block(); result = connection.keyCommands().renameNX(originalKey, newKey).block(); assertThat(result).isFalse(); }
private List<Token> parseTopic(String topic) throws ParseException { if (topic.length() == 0) { throw new ParseException("Bad format of topic, topic MUST be at least 1 character [MQTT-4.7.3-1] and " + "this was empty", 0); } List<Token> res = new ArrayList<>(); String[] splitted = topic.split("/"); if (splitted.length == 0) { res.add(Token.EMPTY); } if (topic.endsWith("/")) { // Add a fictious space String[] newSplitted = new String[splitted.length + 1]; System.arraycopy(splitted, 0, newSplitted, 0, splitted.length); newSplitted[splitted.length] = ""; splitted = newSplitted; } for (int i = 0; i < splitted.length; i++) { String s = splitted[i]; if (s.isEmpty()) { // if (i != 0) { // throw new ParseException("Bad format of topic, expetec topic name between // separators", i); // } res.add(Token.EMPTY); } else if (s.equals("#")) { // check that multi is the last symbol if (i != splitted.length - 1) { throw new ParseException( "Bad format of topic, the multi symbol (#) has to be the last one after a separator", i); } res.add(Token.MULTI); } else if (s.contains("#")) { throw new ParseException("Bad format of topic, invalid subtopic name: " + s, i); } else if (s.equals("+")) { res.add(Token.SINGLE); } else if (s.contains("+")) { throw new ParseException("Bad format of topic, invalid subtopic name: " + s, i); } else { res.add(new Token(s)); } } return res; }
@Test public void testParseTopic() { assertThat(new Topic("finance/stock/ibm")).containsToken("finance", "stock", "ibm"); assertThat(new Topic("/finance/stock/ibm")).containsToken(Token.EMPTY, "finance", "stock", "ibm"); assertThat(new Topic("/")).containsToken(Token.EMPTY, Token.EMPTY); }
public ConsumerBuilder client(String client) { this.client = client; return getThis(); }
@Test void client() { ConsumerBuilder builder = ConsumerBuilder.newBuilder(); builder.client("client"); Assertions.assertEquals("client", builder.build().getClient()); }
@Override public void setTimestamp(final Path file, final TransferStatus status) throws BackgroundException { final SMBSession.DiskShareWrapper share = session.openShare(file); try { if(file.isDirectory()) { try (final Directory entry = share.get().openDirectory(new SMBPathContainerService(session).getKey(file), Collections.singleton(AccessMask.FILE_WRITE_ATTRIBUTES), Collections.singleton(FileAttributes.FILE_ATTRIBUTE_DIRECTORY), Collections.singleton(SMB2ShareAccess.FILE_SHARE_READ), SMB2CreateDisposition.FILE_OPEN, Collections.singleton(SMB2CreateOptions.FILE_DIRECTORY_FILE))) { final FileBasicInformation updatedBasicInformation = new FileBasicInformation( status.getCreated() != null ? FileTime.ofEpochMillis(status.getCreated()) : FileBasicInformation.DONT_SET, FileBasicInformation.DONT_SET, status.getModified() != null ? FileTime.ofEpochMillis(status.getModified()) : FileBasicInformation.DONT_SET, FileBasicInformation.DONT_SET, FileAttributes.FILE_ATTRIBUTE_DIRECTORY.getValue()); entry.setFileInformation(updatedBasicInformation); } } else { try (final File entry = share.get().openFile(new SMBPathContainerService(session).getKey(file), Collections.singleton(AccessMask.FILE_WRITE_ATTRIBUTES), Collections.singleton(FileAttributes.FILE_ATTRIBUTE_NORMAL), Collections.singleton(SMB2ShareAccess.FILE_SHARE_READ), SMB2CreateDisposition.FILE_OPEN, Collections.singleton(SMB2CreateOptions.FILE_NON_DIRECTORY_FILE))) { final FileBasicInformation updatedBasicInformation = new FileBasicInformation( status.getCreated() != null ? FileTime.ofEpochMillis(status.getCreated()) : FileBasicInformation.DONT_SET, FileBasicInformation.DONT_SET, status.getModified() != null ? FileTime.ofEpochMillis(status.getModified()) : FileBasicInformation.DONT_SET, FileBasicInformation.DONT_SET, FileAttributes.FILE_ATTRIBUTE_NORMAL.getValue()); entry.setFileInformation(updatedBasicInformation); } } } catch(SMBRuntimeException e) { throw new SMBExceptionMappingService().map("Cannot change timestamp of {0}", e, file); } finally { session.releaseShare(share); } }
@Test public void testTimestampFile() throws Exception { final TransferStatus status = new TransferStatus(); final Path home = new DefaultHomeFinderService(session).find(); final Path test = new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final int length = 100; final byte[] content = RandomUtils.nextBytes(length); status.setLength(content.length); status.setModified(System.currentTimeMillis()); final Write writer = new SMBWriteFeature(session); status.setChecksum(writer.checksum(test, status).compute(new ByteArrayInputStream(content), status)); final OutputStream out = writer.write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out); // make sure timestamps are different final PathAttributes attributes = new SMBAttributesFinderFeature(session).find(test); assertNotEquals(-1L, attributes.getModificationDate()); long oldTime = attributes.getModificationDate(); status.setModified(oldTime + 2000); new SMBTimestampFeature(session).setTimestamp(test, status); PathAttributes newAttributes = new SMBAttributesFinderFeature(session).find(test); assertEquals(status.getModified().longValue(), newAttributes.getModificationDate()); assertEquals(content.length, newAttributes.getSize()); new SMBDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@VisibleForTesting static LookupResult parseBody(JsonPath singleJsonPath, @Nullable JsonPath multiJsonPath, InputStream body) { try { final DocumentContext documentContext = JsonPath.parse(body); LookupResult.Builder builder = LookupResult.builder().cacheTTL(Long.MAX_VALUE); if (multiJsonPath != null) { try { final Object multiValue = documentContext.read(multiJsonPath); if (multiValue instanceof Map) { //noinspection unchecked builder = builder.multiValue((Map<Object, Object>) multiValue); } else if (multiValue instanceof List) { //noinspection unchecked final List<String> stringList = ((List<Object>) multiValue).stream().map(Object::toString).collect(Collectors.toList()); builder = builder.stringListValue(stringList); // for backwards compatibility builder = builder.multiSingleton(multiValue); } else { builder = builder.multiSingleton(multiValue); } } catch (PathNotFoundException e) { LOG.warn("Couldn't read multi JSONPath from response - skipping multi value ({})", e.getMessage()); } } try { final Object singleValue = documentContext.read(singleJsonPath); if (singleValue instanceof CharSequence) { return builder.single((CharSequence) singleValue).build(); } else if (singleValue instanceof Number) { return builder.single((Number) singleValue).build(); } else if (singleValue instanceof Boolean) { return builder.single((Boolean) singleValue).build(); } else { throw new IllegalArgumentException("Single value data type cannot be: " + singleValue.getClass().getCanonicalName()); } } catch (PathNotFoundException e) { LOG.warn("Couldn't read single JSONPath from response - returning empty result ({})", e.getMessage()); return null; } } catch (InvalidJsonException e) { LOG.error("Couldn't parse JSON response", e); return null; } catch (ClassCastException e) { LOG.error("Couldn't assign value type", e); return null; } catch (Exception e) { LOG.error("Unexpected error parsing JSON response", e); return null; } }
@Test public void parseBodyWithListMultiValue() throws Exception { final JsonPath singlePath = JsonPath.compile("$.hello"); final JsonPath multiPath = JsonPath.compile("$.list"); final LookupResult result = HTTPJSONPathDataAdapter.parseBody(singlePath, multiPath, body); assertThat(result.isEmpty()).isFalse(); assertThat(result.hasError()).isFalse(); assertThat(result.singleValue()).isEqualTo("world"); assertThat(result.multiValue()).isNotNull(); assertThat(result.multiValue()).isInstanceOf(Map.class); assertThat(result.multiValue()).containsKey("value"); //noinspection ConstantConditions assertThat(result.multiValue().get("value")).isInstanceOf(Collection.class); //noinspection unchecked,ConstantConditions assertThat((Collection) result.multiValue().get("value")).containsOnly("a", "b", "c"); assertThat(result.stringListValue()).containsOnly("a", "b", "c"); }
@GET @Path("{path:.*}") @Produces({MediaType.APPLICATION_OCTET_STREAM + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8}) public Response get(@PathParam("path") String path, @Context UriInfo uriInfo, @QueryParam(OperationParam.NAME) OperationParam op, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException { // Restrict access to only GETFILESTATUS and LISTSTATUS in write-only mode if((op.value() != HttpFSFileSystem.Operation.GETFILESTATUS) && (op.value() != HttpFSFileSystem.Operation.LISTSTATUS) && accessMode == AccessMode.WRITEONLY) { return Response.status(Response.Status.FORBIDDEN).build(); } UserGroupInformation user = HttpUserGroupInformation.get(); Response response; path = makeAbsolute(path); MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name()); MDC.put("hostname", request.getRemoteAddr()); switch (op.value()) { case OPEN: { Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { //Invoking the command directly using an unmanaged FileSystem that is // released by the FileSystemReleaseFilter final FSOperations.FSOpen command = new FSOperations.FSOpen(path); final FileSystem fs = createFileSystem(user); InputStream is = null; UserGroupInformation ugi = UserGroupInformation .createProxyUser(user.getShortUserName(), UserGroupInformation.getLoginUser()); try { is = ugi.doAs(new PrivilegedExceptionAction<InputStream>() { @Override public InputStream run() throws Exception { return command.execute(fs); } }); } catch (InterruptedException ie) { LOG.warn("Open interrupted.", ie); Thread.currentThread().interrupt(); } Long offset = params.get(OffsetParam.NAME, OffsetParam.class); Long len = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[] { path, offset, len }); InputStreamEntity entity = new InputStreamEntity(is, offset, len); response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM) .build(); } break; } case GETFILESTATUS: { FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS: { String filter = params.get(FilterParam.NAME, FilterParam.class); FSOperations.FSListStatus command = new FSOperations.FSListStatus(path, filter); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] filter [{}]", path, (filter != null) ? filter : "-"); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETHOMEDIRECTORY: { enforceRootPath(op.value(), path); FSOperations.FSHomeDir command = new FSOperations.FSHomeDir(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("Home Directory for [{}]", user); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case INSTRUMENTATION: { enforceRootPath(op.value(), path); Groups groups = HttpFSServerWebApp.get().get(Groups.class); Set<String> userGroups = groups.getGroupsSet(user.getShortUserName()); if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) { throw new AccessControlException( "User not in HttpFSServer admin group"); } Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class); Map snapshot = instrumentation.getSnapshot(); response = Response.ok(snapshot).build(); break; } case GETCONTENTSUMMARY: { FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Content summary for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETQUOTAUSAGE: { FSOperations.FSQuotaUsage command = new FSOperations.FSQuotaUsage(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Quota Usage for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILECHECKSUM: { FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path); Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); AUDIT_LOG.info("[{}]", path); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { Map json = fsExecute(user, command); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); } break; } case GETFILEBLOCKLOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocations command = new FSOperations.FSFileBlockLocations(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("BlockLocations", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETACLSTATUS: { FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("ACL status for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETXATTRS: { List<String> xattrNames = params.getValues(XAttrNameParam.NAME, XAttrNameParam.class); XAttrCodec encoding = params.get(XAttrEncodingParam.NAME, XAttrEncodingParam.class); FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, xattrNames, encoding); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttrs for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTXATTRS: { FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttr names for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS_BATCH: { String startAfter = params.get( HttpFSParametersProvider.StartAfterParam.NAME, HttpFSParametersProvider.StartAfterParam.class); byte[] token = HttpFSUtils.EMPTY_BYTES; if (startAfter != null) { token = startAfter.getBytes(StandardCharsets.UTF_8); } FSOperations.FSListStatusBatch command = new FSOperations .FSListStatusBatch(path, token); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] token [{}]", path, token); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOT: { FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETALLSTORAGEPOLICY: { FSOperations.FSGetAllStoragePolicies command = new FSOperations.FSGetAllStoragePolicies(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTORAGEPOLICY: { FSOperations.FSGetStoragePolicy command = new FSOperations.FSGetStoragePolicy(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFF: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); FSOperations.FSGetSnapshotDiff command = new FSOperations.FSGetSnapshotDiff(path, oldSnapshotName, snapshotName); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFFLISTING: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); String snapshotDiffStartPath = params .get(HttpFSParametersProvider.SnapshotDiffStartPathParam.NAME, HttpFSParametersProvider.SnapshotDiffStartPathParam.class); Integer snapshotDiffIndex = params.get(HttpFSParametersProvider.SnapshotDiffIndexParam.NAME, HttpFSParametersProvider.SnapshotDiffIndexParam.class); FSOperations.FSGetSnapshotDiffListing command = new FSOperations.FSGetSnapshotDiffListing(path, oldSnapshotName, snapshotName, snapshotDiffStartPath, snapshotDiffIndex); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTTABLEDIRECTORYLIST: { FSOperations.FSGetSnapshottableDirListing command = new FSOperations.FSGetSnapshottableDirListing(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTLIST: { FSOperations.FSGetSnapshotListing command = new FSOperations.FSGetSnapshotListing(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSERVERDEFAULTS: { FSOperations.FSGetServerDefaults command = new FSOperations.FSGetServerDefaults(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case CHECKACCESS: { String mode = params.get(FsActionParam.NAME, FsActionParam.class); FsActionParam fsparam = new FsActionParam(mode); FSOperations.FSAccess command = new FSOperations.FSAccess(path, FsAction.getFsAction(fsparam.value())); fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok().build(); break; } case GETECPOLICY: { FSOperations.FSGetErasureCodingPolicy command = new FSOperations.FSGetErasureCodingPolicy(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECPOLICIES: { FSOperations.FSGetErasureCodingPolicies command = new FSOperations.FSGetErasureCodingPolicies(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECCODECS: { FSOperations.FSGetErasureCodingCodecs command = new FSOperations.FSGetErasureCodingCodecs(); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GET_BLOCK_LOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocationsLegacy command = new FSOperations.FSFileBlockLocationsLegacy(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("LocatedBlocks", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILELINKSTATUS: { FSOperations.FSFileLinkStatus command = new FSOperations.FSFileLinkStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTATUS: { FSOperations.FSStatus command = new FSOperations.FSStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOTS: { Boolean allUsers = params.get(AllUsersParam.NAME, AllUsersParam.class); FSOperations.FSGetTrashRoots command = new FSOperations.FSGetTrashRoots(allUsers); Map json = fsExecute(user, command); AUDIT_LOG.info("allUsers [{}]", allUsers); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } default: { throw new IOException( MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value())); } } return response; }
@Test @TestDir @TestJetty @TestHdfs public void testXAttrs() throws Exception { final String name1 = "user.a1"; final byte[] value1 = new byte[]{0x31, 0x32, 0x33}; final String name2 = "user.a2"; final byte[] value2 = new byte[]{0x41, 0x42, 0x43}; final String dir = "/xattrTest"; final String path = dir + "/file"; createHttpFSServer(false, false); FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); fs.mkdirs(new Path(dir)); createWithHttp(path, null); String statusJson = getStatus(path, "GETXATTRS"); Map<String, byte[]> xAttrs = getXAttrs(statusJson); Assert.assertEquals(0, xAttrs.size()); // Set two xattrs putCmd(path, "SETXATTR", setXAttrParam(name1, value1)); putCmd(path, "SETXATTR", setXAttrParam(name2, value2)); statusJson = getStatus(path, "GETXATTRS"); xAttrs = getXAttrs(statusJson); Assert.assertEquals(2, xAttrs.size()); Assert.assertArrayEquals(value1, xAttrs.get(name1)); Assert.assertArrayEquals(value2, xAttrs.get(name2)); // Remove one xattr putCmd(path, "REMOVEXATTR", "xattr.name=" + name1); statusJson = getStatus(path, "GETXATTRS"); xAttrs = getXAttrs(statusJson); Assert.assertEquals(1, xAttrs.size()); Assert.assertArrayEquals(value2, xAttrs.get(name2)); // Remove another xattr, then there is no xattr putCmd(path, "REMOVEXATTR", "xattr.name=" + name2); statusJson = getStatus(path, "GETXATTRS"); xAttrs = getXAttrs(statusJson); Assert.assertEquals(0, xAttrs.size()); }
@Override public int offer(E e) { @SuppressWarnings("deprecation") long z = mix64(Thread.currentThread().getId()); int increment = ((int) (z >>> 32)) | 1; int h = (int) z; int mask; int result; Buffer<E> buffer; boolean uncontended = true; Buffer<E>[] buffers = table; if ((buffers == null) || ((mask = buffers.length - 1) < 0) || ((buffer = buffers[h & mask]) == null) || !(uncontended = ((result = buffer.offer(e)) != Buffer.FAILED))) { return expandOrRetry(e, h, increment, uncontended); } return result; }
@Test(dataProvider = "buffers") @SuppressWarnings("ThreadPriorityCheck") public void produce(FakeBuffer<Integer> buffer) { ConcurrentTestHarness.timeTasks(NCPU, () -> { for (int i = 0; i < 10; i++) { assertThat(buffer.offer(ELEMENT)).isAnyOf(Buffer.SUCCESS, Buffer.FULL, Buffer.FAILED); Thread.yield(); } }); assertThat(buffer.table.length).isAtMost(MAXIMUM_TABLE_SIZE); }
@Override public Long createSocialClient(SocialClientSaveReqVO createReqVO) { // 校验重复 validateSocialClientUnique(null, createReqVO.getUserType(), createReqVO.getSocialType()); // 插入 SocialClientDO client = BeanUtils.toBean(createReqVO, SocialClientDO.class); socialClientMapper.insert(client); return client.getId(); }
@Test public void testCreateSocialClient_success() { // 准备参数 SocialClientSaveReqVO reqVO = randomPojo(SocialClientSaveReqVO.class, o -> o.setSocialType(randomEle(SocialTypeEnum.values()).getType()) .setUserType(randomEle(UserTypeEnum.values()).getValue()) .setStatus(randomCommonStatus())) .setId(null); // 防止 id 被赋值 // 调用 Long socialClientId = socialClientService.createSocialClient(reqVO); // 断言 assertNotNull(socialClientId); // 校验记录的属性是否正确 SocialClientDO socialClient = socialClientMapper.selectById(socialClientId); assertPojoEquals(reqVO, socialClient, "id"); }
public static KeyId ofBytes(byte[] keyIdBytes) { Objects.requireNonNull(keyIdBytes); return new KeyId(Arrays.copyOf(keyIdBytes, keyIdBytes.length)); }
@Test void too_big_key_id_throws() { byte[] tooBigIdBytes = new byte[KeyId.MAX_KEY_ID_UTF8_LENGTH + 1]; Arrays.fill(tooBigIdBytes, (byte)'A'); assertThrows(IllegalArgumentException.class, () -> KeyId.ofBytes(tooBigIdBytes)); }
@SuppressWarnings("FutureReturnValueIgnored") public void start() { running.set(true); configFetcher.start(); memoryMonitor.start(); streamingWorkerHarness.start(); sampler.start(); workerStatusReporter.start(); activeWorkRefresher.start(); }
@Test public void testAssignWindows() throws Exception { Duration gapDuration = Duration.standardSeconds(1); CloudObject spec = CloudObject.forClassName("AssignWindowsDoFn"); SdkComponents sdkComponents = SdkComponents.create(); sdkComponents.registerEnvironment(Environments.JAVA_SDK_HARNESS_ENVIRONMENT); addString( spec, PropertyNames.SERIALIZED_FN, StringUtils.byteArrayToJsonString( WindowingStrategyTranslation.toMessageProto( WindowingStrategy.of(FixedWindows.of(gapDuration)), sdkComponents) .toByteArray())); ParallelInstruction addWindowsInstruction = new ParallelInstruction() .setSystemName("AssignWindows") .setName("AssignWindows") .setOriginalName("AssignWindowsOriginal") .setParDo( new ParDoInstruction() .setInput(new InstructionInput().setProducerInstructionIndex(0).setOutputNum(0)) .setNumOutputs(1) .setUserFn(spec)) .setOutputs( Collections.singletonList( new InstructionOutput() .setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME) .setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME) .setName("output") .setCodec( CloudObjects.asCloudObject( WindowedValue.getFullCoder( StringUtf8Coder.of(), IntervalWindow.getCoder()), /* sdkComponents= */ null)))); List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(StringUtf8Coder.of()), addWindowsInstruction, makeSinkInstruction(StringUtf8Coder.of(), 1)); int timestamp1 = 0; int timestamp2 = 1000000; server .whenGetWorkCalled() .thenReturn(makeInput(timestamp1, timestamp1)) .thenReturn(makeInput(timestamp2, timestamp2)); StreamingDataflowWorker worker = makeWorker(defaultWorkerParams().setInstructions(instructions).build()); worker.start(); Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(2); assertThat( removeDynamicFields(result.get((long) timestamp1)), equalTo( setMessagesMetadata( PaneInfo.NO_FIRING, intervalWindowBytes(WINDOW_AT_ZERO), makeExpectedOutput(timestamp1, timestamp1)) .build())); assertThat( removeDynamicFields(result.get((long) timestamp2)), equalTo( setMessagesMetadata( PaneInfo.NO_FIRING, intervalWindowBytes(WINDOW_AT_ONE_SECOND), makeExpectedOutput(timestamp2, timestamp2)) .build())); }
@Override public Optional<ConfigItem> resolve(final String propertyName, final boolean strict) { if (propertyName.startsWith(KSQL_REQUEST_CONFIG_PROPERTY_PREFIX)) { return resolveRequestConfig(propertyName); } else if (propertyName.startsWith(KSQL_CONFIG_PROPERTY_PREFIX) && !propertyName.startsWith(KSQL_STREAMS_PREFIX)) { return resolveKsqlConfig(propertyName); } return resolveStreamsConfig(propertyName, strict); }
@Test public void shouldResolveProducerPrefixedProducerConfig() { assertThat(resolver.resolve( StreamsConfig.PRODUCER_PREFIX + ProducerConfig.BUFFER_MEMORY_CONFIG, true), is(resolvedItem(ProducerConfig.BUFFER_MEMORY_CONFIG, PRODUCER_CONFIG_DEF))); }
public static ParseResult parse(String text) { Map<String, String> localProperties = new HashMap<>(); String intpText = ""; String scriptText = null; Matcher matcher = REPL_PATTERN.matcher(text); if (matcher.find()) { String headingSpace = matcher.group(1); intpText = matcher.group(2); int startPos = headingSpace.length() + intpText.length() + 1; if (startPos < text.length() && text.charAt(startPos) == '(') { startPos = parseLocalProperties(text, startPos, localProperties); } scriptText = text.substring(startPos); } else { intpText = ""; scriptText = text; } return new ParseResult(intpText, removeLeadingWhiteSpaces(scriptText), localProperties); }
@Test void testParagraphTextLocalPropertiesAndText() { ParagraphTextParser.ParseResult parseResult = ParagraphTextParser.parse("%spark.pyspark(pool=pool_1) sc.version"); assertEquals("spark.pyspark", parseResult.getIntpText()); assertEquals(1, parseResult.getLocalProperties().size()); assertEquals("pool_1", parseResult.getLocalProperties().get("pool")); assertEquals("sc.version", parseResult.getScriptText()); }
public double getBoundingBoxZoom(final BoundingBox pBoundingBox, final int pScreenWidth, final int pScreenHeight) { final double longitudeZoom = getLongitudeZoom(pBoundingBox.getLonEast(), pBoundingBox.getLonWest(), pScreenWidth); final double latitudeZoom = getLatitudeZoom(pBoundingBox.getLatNorth(), pBoundingBox.getLatSouth(), pScreenHeight); if (longitudeZoom == Double.MIN_VALUE) { return latitudeZoom; } if (latitudeZoom == Double.MIN_VALUE) { return longitudeZoom; } return Math.min(latitudeZoom, longitudeZoom); }
@Test public void testGetBoundingBoxZoom() { final int tileSize = 256; final int screenWidth = tileSize * 2; final int screenHeight = screenWidth * 2; TileSystem.setTileSize(tileSize); final int iterations = 2000; for (int i = 0; i < iterations; i++) { final double north = getRandomLatitude(); final double south = getRandomLatitude(); final double east = getRandomLongitude(); final double west = getRandomLongitude(); final BoundingBox boundingBox = new BoundingBox(north, east, south, west); final double zoom = tileSystem.getBoundingBoxZoom(boundingBox, screenWidth, screenHeight); if (zoom == Double.MIN_VALUE) { Assert.assertTrue(north <= south || east == west); continue; } final double mapSize = TileSystem.MapSize(zoom); final long left = tileSystem.getMercatorXFromLongitude(west, mapSize, true); final long top = tileSystem.getMercatorYFromLatitude(north, mapSize, true); final long right = tileSystem.getMercatorXFromLongitude(east, mapSize, true); final long bottom = tileSystem.getMercatorYFromLatitude(south, mapSize, true); long width = right - left; if (east < west) { width += mapSize; } final long height = bottom - top; checkSize(width, height, screenWidth, screenHeight); } }
public static double conversion(String expression) { return (new Calculator()).calculate(expression); }
@Test public void conversationTest3(){ final double conversion = Calculator.conversion("1"); assertEquals(1, conversion, 0); }
public static byte[] getBytesUtf8(String input) { return input.getBytes(Charsets.UTF_8); }
@Test public void getBytesUtf8() { assertThat(Tools.getBytesUtf8("Hi!"), is(equalTo(new byte[] {0x48, 0x69, 0x21}))); }
protected void getInfo( DatabaseMeta meta ) { getControls(); if ( this.databaseMeta != null && this.databaseMeta != meta ) { meta.initializeVariablesFrom( this.databaseMeta ); } // Let's not remove any (default) options or attributes // We just need to display the correct ones for the database type below... // // In fact, let's just clear the database port... // // TODO: what about the port number? // Name: meta.setName( connectionNameBox.getValue() ); // Display Name: (PDI-12292) meta.setDisplayName( connectionNameBox.getValue() ); // Connection type: Object connection = connectionBox.getSelectedItem(); if ( connection != null ) { meta.setDatabaseType( (String) connection ); } // Access type: Object access = accessBox.getSelectedItem(); if ( access != null ) { meta.setAccessType( DatabaseMeta.getAccessType( (String) access ) ); } getConnectionSpecificInfo( meta ); // Port number: if ( portNumberBox != null ) { meta.setDBPort( portNumberBox.getValue() ); } // Option parameters: if ( optionsParameterTree != null ) { Object[][] values = optionsParameterTree.getValues(); for ( int i = 0; i < values.length; i++ ) { String parameter = (String) values[ i ][ 0 ]; String value = (String) values[ i ][ 1 ]; if ( value == null ) { value = ""; } String dbType = meta.getPluginId(); // Only if parameter are supplied, we will add to the map... if ( ( parameter != null ) && ( parameter.trim().length() > 0 ) ) { if ( value.trim().length() <= 0 ) { value = DatabaseMeta.EMPTY_OPTIONS_STRING; } meta.addExtraOption( dbType, parameter, value ); } } } // Advanced panel settings: if ( supportBooleanDataType != null ) { meta.setSupportsBooleanDataType( supportBooleanDataType.isChecked() ); } if ( supportTimestampDataType != null ) { meta.setSupportsTimestampDataType( supportTimestampDataType.isChecked() ); } if ( quoteIdentifiersCheck != null ) { meta.setQuoteAllFields( quoteIdentifiersCheck.isChecked() ); } if ( lowerCaseIdentifiersCheck != null ) { meta.setForcingIdentifiersToLowerCase( lowerCaseIdentifiersCheck.isChecked() ); } if ( upperCaseIdentifiersCheck != null ) { meta.setForcingIdentifiersToUpperCase( upperCaseIdentifiersCheck.isChecked() ); } if ( preserveReservedCaseCheck != null ) { meta.setPreserveReservedCase( preserveReservedCaseCheck.isChecked() ); } if ( strictBigNumberInterpretaion != null && meta.getDatabaseInterface() instanceof OracleDatabaseMeta ) { ( (OracleDatabaseMeta) meta.getDatabaseInterface() ) .setStrictBigNumberInterpretation( strictBigNumberInterpretaion.isChecked() ); } if ( preferredSchemaName != null ) { meta.setPreferredSchemaName( preferredSchemaName.getValue() ); } if ( sqlBox != null ) { meta.setConnectSQL( sqlBox.getValue() ); } // Cluster panel settings if ( clusteringCheck != null ) { meta.setPartitioned( clusteringCheck.isChecked() ); } if ( ( clusterParameterTree != null ) && ( meta.isPartitioned() ) ) { Object[][] values = clusterParameterTree.getValues(); List<PartitionDatabaseMeta> pdms = new ArrayList<PartitionDatabaseMeta>(); for ( int i = 0; i < values.length; i++ ) { String partitionId = (String) values[ i ][ 0 ]; if ( ( partitionId == null ) || ( partitionId.trim().length() <= 0 ) ) { continue; } String hostname = (String) values[ i ][ 1 ]; String port = (String) values[ i ][ 2 ]; String dbName = (String) values[ i ][ 3 ]; String username = (String) values[ i ][ 4 ]; String password = (String) values[ i ][ 5 ]; PartitionDatabaseMeta pdm = new PartitionDatabaseMeta( partitionId, hostname, port, dbName ); pdm.setUsername( username ); pdm.setPassword( password ); pdms.add( pdm ); } PartitionDatabaseMeta[] pdmArray = new PartitionDatabaseMeta[ pdms.size() ]; meta.setPartitioningInformation( pdms.toArray( pdmArray ) ); } if ( poolingCheck != null ) { meta.setUsingConnectionPool( poolingCheck.isChecked() ); } if ( meta.isUsingConnectionPool() ) { if ( poolSizeBox != null ) { try { meta.setInitialPoolSizeString( poolSizeBox.getValue() ); } catch ( NumberFormatException e ) { // TODO log exception and move on ... } } if ( maxPoolSizeBox != null ) { try { meta.setMaximumPoolSizeString( maxPoolSizeBox.getValue() ); } catch ( NumberFormatException e ) { // TODO log exception and move on ... } } if ( poolParameterTree != null ) { Object[][] values = poolParameterTree.getValues(); Properties properties = new Properties(); for ( int i = 0; i < values.length; i++ ) { boolean isChecked = false; if ( values[ i ][ 0 ] instanceof Boolean ) { isChecked = ( (Boolean) values[ i ][ 0 ] ).booleanValue(); } else { isChecked = Boolean.valueOf( (String) values[ i ][ 0 ] ); } if ( !isChecked ) { continue; } String parameter = (String) values[ i ][ 1 ]; String value = (String) values[ i ][ 2 ]; if ( ( parameter != null ) && ( parameter.trim().length() > 0 ) && ( value != null ) && ( value.trim().length() > 0 ) ) { properties.setProperty( parameter, value ); } } meta.setConnectionPoolingProperties( properties ); } } }
@Test public void testGetInfo() throws Exception { }
Future<Boolean> canRoll(int podId) { LOGGER.debugCr(reconciliation, "Determining whether broker {} can be rolled", podId); return canRollBroker(descriptions, podId); }
@Test public void testAboveMinIsr(VertxTestContext context) { KSB ksb = new KSB() .addNewTopic("A", false) .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1") .addNewPartition(0) .replicaOn(0, 1, 2) .leader(0) .isr(0, 1, 2) .endPartition() .endTopic() .addNewTopic("B", false) .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1") .addNewPartition(0) .replicaOn(0, 1, 2) .leader(1) .isr(0, 1, 2) .endPartition() .endTopic() .addBroker(3); KafkaAvailability kafkaAvailability = new KafkaAvailability(new Reconciliation("dummy", "kind", "namespace", "A"), ksb.ac()); Checkpoint a = context.checkpoint(ksb.brokers.size()); for (Integer brokerId : ksb.brokers.keySet()) { kafkaAvailability.canRoll(brokerId).onComplete(context.succeeding(canRoll -> context.verify(() -> { assertTrue(canRoll, "broker " + brokerId + " should be rollable, being minisr = 1 and having two brokers in its isr"); a.flag(); }))); } }
@Override public Registry createRegistry(URL url) { return new MulticastRegistry(url, applicationModel); }
@Test void shouldCreateRegistry() { Registry registry = new MulticastRegistryFactory().createRegistry(URL.valueOf("multicast://239.255.255.255/")); assertThat(registry, not(nullValue())); assertThat(registry.isAvailable(), is(true)); }
public IntIterator iterator() { IntIterator iterator = this.iterator; if (null == iterator) { iterator = new IntIterator(); if (shouldAvoidAllocation) { this.iterator = iterator; } } return iterator.reset(); }
@Test void iteratorHasNoElements() { assertFalse(testSet.iterator().hasNext()); }
protected Collection<Identity> filter(final Credentials credentials, final Collection<Identity> identities) { if(credentials.isPublicKeyAuthentication()) { final Local selected = credentials.getIdentity(); for(Identity identity : identities) { if(identity.getComment() != null) { final String candidate = new String(identity.getComment(), StandardCharsets.UTF_8); if(selected.getAbsolute().equals(candidate)) { if(log.isDebugEnabled()) { log.debug(String.format("Matching identity %s found", candidate)); } return Collections.singletonList(identity); } } } } return identities; }
@Test public void filterIdentitiesNoMatch() { final SFTPAgentAuthentication authentication = new SFTPAgentAuthentication(new SSHClient(), new OpenSSHAgentAuthenticator(new AgentProxy(null))); final Credentials credentials = new Credentials("user").withIdentity(new Local("mykey") { @Override public boolean exists() { return true; } }); final List<Identity> identities = new ArrayList<>(); final Identity nomatch = mock(Identity.class); when(nomatch.getComment()).thenReturn(StringUtils.getBytes("comment1", StandardCharsets.UTF_8)); identities.add(nomatch); identities.add(nomatch); final Collection<Identity> filtered = authentication.filter(credentials, identities); assertEquals(2, filtered.size()); }
@Override public int run(String[] args) throws Exception { try { webServiceClient = WebServiceClient.getWebServiceClient().createClient(); return runCommand(args); } finally { if (yarnClient != null) { yarnClient.close(); } if (webServiceClient != null) { webServiceClient.destroy(); } } }
@Test (timeout = 15000) public void testFetchApplictionLogsHar() throws Exception { String remoteLogRootDir = "target/logs/"; conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true); conf .set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogRootDir); conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); conf.set(YarnConfiguration.YARN_ADMIN_ACL, "admin"); FileSystem fs = FileSystem.get(conf); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); URL harUrl = ClassLoader.getSystemClassLoader() .getResource("application_1440536969523_0001.har"); assertNotNull(harUrl); Path path = new Path(remoteLogRootDir + ugi.getShortUserName() + "/bucket-logs-tfile/0001/application_1440536969523_0001"); if (fs.exists(path)) { fs.delete(path, true); } assertTrue(fs.mkdirs(path)); Path harPath = new Path(path, "application_1440536969523_0001.har"); fs.copyFromLocalFile(false, new Path(harUrl.toURI()), harPath); assertTrue(fs.exists(harPath)); YarnClient mockYarnClient = createMockYarnClient(YarnApplicationState.FINISHED, ugi.getShortUserName()); LogsCLI cli = new LogsCLIForTest(mockYarnClient); cli.setConf(conf); int exitCode = cli.run(new String[]{"-applicationId", "application_1440536969523_0001"}); assertTrue(exitCode == 0); String out = sysOutStream.toString(); assertTrue( out.contains("container_1440536969523_0001_01_000001 on host1_1111")); assertTrue(out.contains("Hello stderr")); assertTrue(out.contains("Hello stdout")); assertTrue(out.contains("Hello syslog")); assertTrue( out.contains("container_1440536969523_0001_01_000002 on host2_2222")); assertTrue(out.contains("Goodbye stderr")); assertTrue(out.contains("Goodbye stdout")); assertTrue(out.contains("Goodbye syslog")); sysOutStream.reset(); fs.delete(new Path(remoteLogRootDir), true); }
private CircuitBreakerMetrics(int slidingWindowSize, CircuitBreakerConfig.SlidingWindowType slidingWindowType, CircuitBreakerConfig circuitBreakerConfig, Clock clock) { if (slidingWindowType == CircuitBreakerConfig.SlidingWindowType.COUNT_BASED) { this.metrics = new FixedSizeSlidingWindowMetrics(slidingWindowSize); this.minimumNumberOfCalls = Math .min(circuitBreakerConfig.getMinimumNumberOfCalls(), slidingWindowSize); } else { this.metrics = new SlidingTimeWindowMetrics(slidingWindowSize, clock); this.minimumNumberOfCalls = circuitBreakerConfig.getMinimumNumberOfCalls(); } this.failureRateThreshold = circuitBreakerConfig.getFailureRateThreshold(); this.slowCallRateThreshold = circuitBreakerConfig.getSlowCallRateThreshold(); this.slowCallDurationThresholdInNanos = circuitBreakerConfig.getSlowCallDurationThreshold() .toNanos(); this.numberOfNotPermittedCalls = new LongAdder(); }
@Test public void testCircuitBreakerMetrics() { CircuitBreakerConfig circuitBreakerConfig = CircuitBreakerConfig.custom() .slidingWindow(10, 10, CircuitBreakerConfig.SlidingWindowType.COUNT_BASED) .build(); CircuitBreakerMetrics circuitBreakerMetrics = CircuitBreakerMetrics .forClosed(circuitBreakerConfig, MockClock.at(2019, 1, 1, 12, 0, 0, ZoneId.of("UTC"))); circuitBreakerMetrics.onSuccess(0, TimeUnit.NANOSECONDS); circuitBreakerMetrics.onSuccess(0, TimeUnit.NANOSECONDS); circuitBreakerMetrics.onError(0, TimeUnit.NANOSECONDS); Result result = circuitBreakerMetrics.onError(0, TimeUnit.NANOSECONDS); circuitBreakerMetrics.onCallNotPermitted(); circuitBreakerMetrics.onCallNotPermitted(); assertThat(circuitBreakerMetrics.getNumberOfBufferedCalls()).isEqualTo(4); assertThat(circuitBreakerMetrics.getNumberOfFailedCalls()).isEqualTo(2); assertThat(circuitBreakerMetrics.getNumberOfSuccessfulCalls()).isEqualTo(2); assertThat(circuitBreakerMetrics.getNumberOfNotPermittedCalls()).isEqualTo(2); // The failure rate must be -1, because the number of measured calls is below the buffer size of 10 assertThat(circuitBreakerMetrics.getFailureRate()).isEqualTo(-1); assertThat(result) .isEqualTo(CircuitBreakerMetrics.Result.BELOW_MINIMUM_CALLS_THRESHOLD); circuitBreakerMetrics.onError(0, TimeUnit.NANOSECONDS); circuitBreakerMetrics.onError(0, TimeUnit.NANOSECONDS); circuitBreakerMetrics.onError(0, TimeUnit.NANOSECONDS); circuitBreakerMetrics.onError(0, TimeUnit.NANOSECONDS); circuitBreakerMetrics.onSuccess(0, TimeUnit.NANOSECONDS); circuitBreakerMetrics.onSuccess(0, TimeUnit.NANOSECONDS); circuitBreakerMetrics.onSuccess(0, TimeUnit.NANOSECONDS); result = circuitBreakerMetrics.onSuccess(0, TimeUnit.NANOSECONDS); // 12 calls have been recorded, but only 10 are stored in the sliding window. 4 successes and 6 failures. // The failure rate must be 60%, because the number of measured calls is above the minimum number of measured calls. assertThat(circuitBreakerMetrics.getNumberOfBufferedCalls()).isEqualTo(10); assertThat(circuitBreakerMetrics.getNumberOfFailedCalls()).isEqualTo(6); assertThat(circuitBreakerMetrics.getNumberOfSuccessfulCalls()).isEqualTo(4); assertThat(circuitBreakerMetrics.getFailureRate()).isEqualTo(60); assertThat(Result.hasExceededThresholds(result)).isTrue(); circuitBreakerMetrics.onSuccess(0, TimeUnit.NANOSECONDS); circuitBreakerMetrics.onSuccess(0, TimeUnit.NANOSECONDS); result = circuitBreakerMetrics.onSuccess(0, TimeUnit.NANOSECONDS); assertThat(result).isEqualTo(CircuitBreakerMetrics.Result.BELOW_THRESHOLDS); assertThat(circuitBreakerMetrics.getFailureRate()).isEqualTo(30); }
@Override public void configure(ResourceInfo resourceInfo, FeatureContext context) { final Method resourceMethod = resourceInfo.getResourceMethod(); final Class resourceClass = resourceInfo.getResourceClass(); if ((resourceMethod != null && (resourceMethod.isAnnotationPresent(SupportedSearchVersion.class) || resourceMethod.isAnnotationPresent(SupportedSearchVersions.class))) || (resourceClass != null && (resourceClass.isAnnotationPresent(SupportedSearchVersion.class) || resourceClass.isAnnotationPresent(SupportedSearchVersions.class)))) { context.register(SupportedSearchVersionFilter.class); } }
@Test public void configureDoesNotRegisterResponseFilterIfAnnotationIsAbsent() throws Exception { final Method method = TestResourceWithOutAnnotation.class.getMethod("methodWithoutAnnotation"); final Class clazz = TestResourceWithOutAnnotation.class; when(resourceInfo.getResourceMethod()).thenReturn(method); when(resourceInfo.getResourceClass()).thenReturn(clazz); supportedSearchVersionDynamicFeature.configure(resourceInfo, featureContext); verify(featureContext, never()).register(SupportedSearchVersionFilter.class); }
@Override public Optional<DevOpsProjectCreator> getDevOpsProjectCreator(DbSession dbSession, Map<String, String> characteristics) { return delegates.stream() .flatMap(delegate -> delegate.getDevOpsProjectCreator(dbSession, characteristics).stream()) .findFirst(); }
@Test public void getDevOpsProjectDescriptor_whenNoDelegates_shouldReturnEmptyOptional() { DelegatingDevOpsProjectCreatorFactory noDelegates = new DelegatingDevOpsProjectCreatorFactory(emptySet()); Optional<DevOpsProjectCreator> devOpsProjectCreator = noDelegates.getDevOpsProjectCreator(DB_SESSION, CHARACTERISTICS); assertThat(devOpsProjectCreator).isEmpty(); }
public String getLogicColumnByCipherColumn(final String cipherColumnName) { for (Entry<String, EncryptColumn> entry : columns.entrySet()) { if (entry.getValue().getCipher().getName().equalsIgnoreCase(cipherColumnName)) { return entry.getValue().getName(); } } throw new EncryptLogicColumnNotFoundException(cipherColumnName); }
@Test void assertGetLogicColumnByCipherColumnWhenNotFind() { assertThrows(EncryptLogicColumnNotFoundException.class, () -> encryptTable.getLogicColumnByCipherColumn("invalidColumn")); }
public TargetAssignmentResult build() throws PartitionAssignorException { Map<String, MemberSubscriptionAndAssignmentImpl> memberSpecs = new HashMap<>(); // Prepare the member spec for all members. members.forEach((memberId, member) -> memberSpecs.put(memberId, createMemberSubscriptionAndAssignment( member, targetAssignment.getOrDefault(memberId, Assignment.EMPTY), topicsImage )) ); // Update the member spec if updated or deleted members. updatedMembers.forEach((memberId, updatedMemberOrNull) -> { if (updatedMemberOrNull == null) { memberSpecs.remove(memberId); } else { Assignment assignment = targetAssignment.getOrDefault(memberId, Assignment.EMPTY); // A new static member joins and needs to replace an existing departed one. if (updatedMemberOrNull.instanceId() != null) { String previousMemberId = staticMembers.get(updatedMemberOrNull.instanceId()); if (previousMemberId != null && !previousMemberId.equals(memberId)) { assignment = targetAssignment.getOrDefault(previousMemberId, Assignment.EMPTY); } } memberSpecs.put(memberId, createMemberSubscriptionAndAssignment( updatedMemberOrNull, assignment, topicsImage )); } }); // Prepare the topic metadata. Map<Uuid, TopicMetadata> topicMetadataMap = new HashMap<>(); subscriptionMetadata.forEach((topicName, topicMetadata) -> topicMetadataMap.put( topicMetadata.id(), topicMetadata ) ); // Compute the assignment. GroupAssignment newGroupAssignment = assignor.assign( new GroupSpecImpl( Collections.unmodifiableMap(memberSpecs), subscriptionType, invertedTargetAssignment ), new SubscribedTopicDescriberImpl(topicMetadataMap) ); // Compute delta from previous to new target assignment and create the // relevant records. List<CoordinatorRecord> records = new ArrayList<>(); for (String memberId : memberSpecs.keySet()) { Assignment oldMemberAssignment = targetAssignment.get(memberId); Assignment newMemberAssignment = newMemberAssignment(newGroupAssignment, memberId); if (!newMemberAssignment.equals(oldMemberAssignment)) { // If the member had no assignment or had a different assignment, we // create a record for the new assignment. records.add(targetAssignmentRecordBuilder.build( groupId, memberId, newMemberAssignment.partitions() )); } } // Bump the target assignment epoch. records.add(targetAssignmentEpochRecordBuilder.build(groupId, groupEpoch)); return new TargetAssignmentResult(records, newGroupAssignment.members()); }
@Test public void testDeleteMember() { TargetAssignmentBuilderTestContext context = new TargetAssignmentBuilderTestContext( "my-group", 20 ); Uuid fooTopicId = context.addTopicMetadata("foo", 6, Collections.emptyMap()); Uuid barTopicId = context.addTopicMetadata("bar", 6, Collections.emptyMap()); context.addGroupMember("member-1", Arrays.asList("foo", "bar", "zar"), mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) )); context.addGroupMember("member-2", Arrays.asList("foo", "bar", "zar"), mkAssignment( mkTopicAssignment(fooTopicId, 3, 4), mkTopicAssignment(barTopicId, 3, 4) )); context.addGroupMember("member-3", Arrays.asList("foo", "bar", "zar"), mkAssignment( mkTopicAssignment(fooTopicId, 5, 6), mkTopicAssignment(barTopicId, 5, 6) )); context.removeMemberSubscription("member-3"); context.prepareMemberAssignment("member-1", mkAssignment( mkTopicAssignment(fooTopicId, 1, 2, 3), mkTopicAssignment(barTopicId, 1, 2, 3) )); context.prepareMemberAssignment("member-2", mkAssignment( mkTopicAssignment(fooTopicId, 4, 5, 6), mkTopicAssignment(barTopicId, 4, 5, 6) )); TargetAssignmentBuilder.TargetAssignmentResult result = context.build(); assertEquals(3, result.records().size()); assertUnorderedListEquals(Arrays.asList( newConsumerGroupTargetAssignmentRecord("my-group", "member-1", mkAssignment( mkTopicAssignment(fooTopicId, 1, 2, 3), mkTopicAssignment(barTopicId, 1, 2, 3) )), newConsumerGroupTargetAssignmentRecord("my-group", "member-2", mkAssignment( mkTopicAssignment(fooTopicId, 4, 5, 6), mkTopicAssignment(barTopicId, 4, 5, 6) )) ), result.records().subList(0, 2)); assertEquals(newConsumerGroupTargetAssignmentEpochRecord( "my-group", 20 ), result.records().get(2)); Map<String, MemberAssignment> expectedAssignment = new HashMap<>(); expectedAssignment.put("member-1", new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2, 3), mkTopicAssignment(barTopicId, 1, 2, 3) ))); expectedAssignment.put("member-2", new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 4, 5, 6), mkTopicAssignment(barTopicId, 4, 5, 6) ))); assertEquals(expectedAssignment, result.targetAssignment()); }
@CanIgnoreReturnValue public Caffeine<K, V> executor(Executor executor) { requireState(this.executor == null, "executor was already set to %s", this.executor); this.executor = requireNonNull(executor); return this; }
@Test public void executor() { var builder = Caffeine.newBuilder().executor(directExecutor()); assertThat(builder.getExecutor()).isSameInstanceAs(directExecutor()); assertThat(builder.build()).isNotNull(); }
public boolean accept(DefaultIssue issue, Component component) { if (component.getType() != FILE || (exclusionPatterns.isEmpty() && inclusionPatterns.isEmpty())) { return true; } if (isExclude(issue, component)) { return false; } return isInclude(issue, component); }
@Test public void include_some_rule_and_component() { IssueFilter underTest = newIssueFilter(newSettings(Collections.emptyList(), asList("xoo:x1", "**/xoo/File1*"))); assertThat(underTest.accept(ISSUE_1, COMPONENT_1)).isTrue(); assertThat(underTest.accept(ISSUE_1, COMPONENT_2)).isFalse(); // Issues on other rule are accepted assertThat(underTest.accept(ISSUE_2, COMPONENT_1)).isTrue(); assertThat(underTest.accept(ISSUE_2, COMPONENT_2)).isTrue(); }
@Override public long getTotalSetLatency() { throw new UnsupportedOperationException("Set operation on replicated maps is not supported."); }
@Test(expected = UnsupportedOperationException.class) public void testTotalSetLatency() { localReplicatedMapStats.getTotalSetLatency(); }
@Override public Long sendSingleMail(String mail, Long userId, Integer userType, String templateCode, Map<String, Object> templateParams) { // 校验邮箱模版是否合法 MailTemplateDO template = validateMailTemplate(templateCode); // 校验邮箱账号是否合法 MailAccountDO account = validateMailAccount(template.getAccountId()); // 校验邮箱是否存在 mail = validateMail(mail); validateTemplateParams(template, templateParams); // 创建发送日志。如果模板被禁用,则不发送短信,只记录日志 Boolean isSend = CommonStatusEnum.ENABLE.getStatus().equals(template.getStatus()); String title = mailTemplateService.formatMailTemplateContent(template.getTitle(), templateParams); String content = mailTemplateService.formatMailTemplateContent(template.getContent(), templateParams); Long sendLogId = mailLogService.createMailLog(userId, userType, mail, account, template, content, templateParams, isSend); // 发送 MQ 消息,异步执行发送短信 if (isSend) { mailProducer.sendMailSendMessage(sendLogId, mail, account.getId(), template.getNickname(), title, content); } return sendLogId; }
@Test public void testSendSingleMail_successWhenMailTemplateEnable() { // 准备参数 String mail = randomEmail(); Long userId = randomLongId(); Integer userType = randomEle(UserTypeEnum.values()).getValue(); String templateCode = RandomUtils.randomString(); Map<String, Object> templateParams = MapUtil.<String, Object>builder().put("code", "1234") .put("op", "login").build(); // mock MailTemplateService 的方法 MailTemplateDO template = randomPojo(MailTemplateDO.class, o -> { o.setStatus(CommonStatusEnum.ENABLE.getStatus()); o.setContent("验证码为{code}, 操作为{op}"); o.setParams(Lists.newArrayList("code", "op")); }); when(mailTemplateService.getMailTemplateByCodeFromCache(eq(templateCode))).thenReturn(template); String title = RandomUtils.randomString(); when(mailTemplateService.formatMailTemplateContent(eq(template.getTitle()), eq(templateParams))) .thenReturn(title); String content = RandomUtils.randomString(); when(mailTemplateService.formatMailTemplateContent(eq(template.getContent()), eq(templateParams))) .thenReturn(content); // mock MailAccountService 的方法 MailAccountDO account = randomPojo(MailAccountDO.class); when(mailAccountService.getMailAccountFromCache(eq(template.getAccountId()))).thenReturn(account); // mock MailLogService 的方法 Long mailLogId = randomLongId(); when(mailLogService.createMailLog(eq(userId), eq(userType), eq(mail), eq(account), eq(template), eq(content), eq(templateParams), eq(true))).thenReturn(mailLogId); // 调用 Long resultMailLogId = mailSendService.sendSingleMail(mail, userId, userType, templateCode, templateParams); // 断言 assertEquals(mailLogId, resultMailLogId); // 断言调用 verify(mailProducer).sendMailSendMessage(eq(mailLogId), eq(mail), eq(account.getId()), eq(template.getNickname()), eq(title), eq(content)); }
public static String cleanComment(String xmlContent) { if (xmlContent == null) { return null; } return xmlContent.replaceAll(COMMENT_REGEX, StrUtil.EMPTY); }
@Test public void cleanCommentTest() { final String xmlContent = "<info><title>hutool</title><!-- 这是注释 --><lang>java</lang></info>"; final String ret = XmlUtil.cleanComment(xmlContent); assertEquals("<info><title>hutool</title><lang>java</lang></info>", ret); }
public OptExpression next() { // For logic scan to physical scan, we only need to match once if (isPatternWithoutChildren && groupExpressionIndex.get(0) > 0) { return null; } OptExpression expression; do { this.groupTraceKey = 0; // Match with the next groupExpression of the last group node int lastNode = this.groupExpressionIndex.size() - 1; int lastNodeIndex = this.groupExpressionIndex.get(lastNode); this.groupExpressionIndex.set(lastNode, lastNodeIndex + 1); expression = match(pattern, groupExpression); } while (expression == null && this.groupExpressionIndex.size() != 1); nextIdx++; return expression; }
@Test public void testBinderMulti3() { OptExpression expr1 = OptExpression.create(new MockOperator(OperatorType.LOGICAL_JOIN, 0), OptExpression.create(new MockOperator(OperatorType.LOGICAL_PROJECT, 1)), OptExpression.create(new MockOperator(OperatorType.LOGICAL_OLAP_SCAN, 2)), OptExpression.create(new MockOperator(OperatorType.LOGICAL_OLAP_SCAN, 3)), OptExpression.create(new MockOperator(OperatorType.LOGICAL_OLAP_SCAN, 4)), OptExpression.create(new MockOperator(OperatorType.LOGICAL_PROJECT, 5))); Memo memo = new Memo(); GroupExpression ge = memo.init(expr1); Pattern pattern = Pattern.create(OperatorType.LOGICAL_JOIN) .addChildren(Pattern.create(OperatorType.LOGICAL_PROJECT)) .addChildren(Pattern.create(OperatorType.PATTERN_MULTI_LEAF)) .addChildren(Pattern.create(OperatorType.LOGICAL_PROJECT)); Binder binder = new Binder(pattern, ge); OptExpression result; result = binder.next(); assertEquals(OperatorType.LOGICAL_JOIN, result.getOp().getOpType()); assertEquals(OperatorType.LOGICAL_PROJECT, result.inputAt(0).getOp().getOpType()); assertEquals(1, ((MockOperator) result.inputAt(0).getOp()).getValue()); assertEquals(OperatorType.LOGICAL_OLAP_SCAN, result.inputAt(1).getOp().getOpType()); assertEquals(2, ((MockOperator) result.inputAt(1).getOp()).getValue()); assertEquals(OperatorType.LOGICAL_OLAP_SCAN, result.inputAt(2).getOp().getOpType()); assertEquals(3, ((MockOperator) result.inputAt(2).getOp()).getValue()); assertEquals(OperatorType.LOGICAL_OLAP_SCAN, result.inputAt(3).getOp().getOpType()); assertEquals(4, ((MockOperator) result.inputAt(3).getOp()).getValue()); assertEquals(OperatorType.LOGICAL_PROJECT, result.inputAt(4).getOp().getOpType()); assertEquals(5, ((MockOperator) result.inputAt(4).getOp()).getValue()); assertNull(binder.next()); }
public void enqueue(SimplifiedReconciliation reconciliation) { if (!queue.contains(reconciliation)) { LOGGER.debug("Enqueueing {} {} in namespace {}", reconciliation.kind, reconciliation.name, reconciliation.namespace); if (!queue.offer(reconciliation)) { LOGGER.warn("Failed to enqueue an event because the controller queue is full"); } } else { metrics.alreadyEnqueuedReconciliationsCounter(reconciliation.namespace).increment(); // Increase the metrics counter LOGGER.debug("{} {} in namespace {} is already enqueued => ignoring", reconciliation.kind, reconciliation.name, reconciliation.namespace); } }
@Test public void testEnqueueingEnqueued() { MeterRegistry metricsRegistry = new SimpleMeterRegistry(); MetricsProvider metrics = new MicrometerMetricsProvider(metricsRegistry); ControllerQueue q = new ControllerQueue(10, new ControllerMetricsHolder("kind", Labels.EMPTY, metrics)); SimplifiedReconciliation r1 = new SimplifiedReconciliation("kind", "my-namespace", "my-name", "watch"); SimplifiedReconciliation r2 = new SimplifiedReconciliation("kind", "my-namespace", "my-name", "timer"); SimplifiedReconciliation r3 = new SimplifiedReconciliation("kind", "my-namespace", "my-other-name", "watch"); q.enqueue(r1); q.enqueue(r3); q.enqueue(r2); assertThat(q.queue.size(), is(2)); assertThat(q.queue.contains(r1), is(true)); assertThat(q.queue.contains(r3), is(true)); // Test metric assertThat(metricsRegistry.get(ControllerMetricsHolder.METRICS_RECONCILIATIONS_ALREADY_ENQUEUED).tag("kind", "kind").tag("namespace", "my-namespace").counter().count(), is(1.0)); }
@Override public Path copy(final Path source, final Path target, final TransferStatus status, final ConnectionCallback prompt, final StreamListener listener) throws BackgroundException { final SMBSession.DiskShareWrapper share = session.openShare(source); try { try (final File sourceFile = share.get().openFile(new SMBPathContainerService(session).getKey(source), new HashSet<>(Arrays.asList(AccessMask.FILE_READ_DATA, AccessMask.FILE_READ_ATTRIBUTES)), Collections.singleton(FileAttributes.FILE_ATTRIBUTE_NORMAL), Collections.singleton(SMB2ShareAccess.FILE_SHARE_READ), SMB2CreateDisposition.FILE_OPEN, Collections.singleton(SMB2CreateOptions.FILE_NON_DIRECTORY_FILE)); final File targetFile = share.get().openFile(new SMBPathContainerService(session).getKey(target), Collections.singleton(AccessMask.MAXIMUM_ALLOWED), Collections.singleton(FileAttributes.FILE_ATTRIBUTE_NORMAL), Collections.singleton(SMB2ShareAccess.FILE_SHARE_READ), status.isExists() ? SMB2CreateDisposition.FILE_OVERWRITE : SMB2CreateDisposition.FILE_CREATE, Collections.singleton(SMB2CreateOptions.FILE_NON_DIRECTORY_FILE))) { sourceFile.remoteCopyTo(targetFile); } listener.sent(status.getLength()); } catch(IOException e) { throw new SMBTransportExceptionMappingService().map("Cannot copy {0}", e, source); } catch(SMBRuntimeException e) { throw new SMBExceptionMappingService().map("Cannot copy {0}", e, source); } catch(BufferException e) { throw new BackgroundException(e); } finally { session.releaseShare(share); } return target; }
@Test public void testCopyToExistingFile() throws Exception { final Path home = new DefaultHomeFinderService(session).find(); final Path sourceFolder = new SMBDirectoryFeature(session).mkdir( new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final Path destinationFolder = new SMBDirectoryFeature(session).mkdir( new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final Path file = new SMBTouchFeature(session).touch(new Path(sourceFolder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); final Path copy = new Path(destinationFolder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new SMBTouchFeature(session).touch(copy, new TransferStatus()); new SMBCopyFeature(session).copy(file, copy, new TransferStatus().exists(true), new DisabledConnectionCallback(), new DisabledStreamListener()); ListService list = new SMBListService(session); assertTrue(list.list(sourceFolder, new DisabledListProgressListener()).contains(file)); assertTrue(list.list(destinationFolder, new DisabledListProgressListener()).contains(copy)); new SMBDeleteFeature(session).delete(Arrays.asList(file, sourceFolder, copy, destinationFolder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public GrokPattern save(GrokPattern pattern) throws ValidationException { try { if (!validate(pattern)) { throw new ValidationException("Invalid pattern " + pattern); } } catch (GrokException | PatternSyntaxException e) { throw new ValidationException("Invalid pattern " + pattern + "\n" + e.getMessage()); } if (loadByName(pattern.name()).isPresent()) { throw new ValidationException("Grok pattern " + pattern.name() + " already exists"); } final WriteResult<GrokPattern, ObjectId> result = dbCollection.save(pattern); final GrokPattern savedGrokPattern = result.getSavedObject(); clusterBus.post(GrokPatternsUpdatedEvent.create(ImmutableSet.of(savedGrokPattern.name()))); return savedGrokPattern; }
@Test public void saveInvalidGrokPattern() { assertThat(collection.countDocuments()).isEqualTo(0); assertThatThrownBy(() -> service.save(GrokPattern.create("Test", "%{"))) .isInstanceOf(ValidationException.class); assertThat(collection.countDocuments()).isEqualTo(0); assertThatThrownBy(() -> service.save(GrokPattern.create("", "[a-z]+"))) .isInstanceOf(ValidationException.class); assertThat(collection.countDocuments()).isEqualTo(0); assertThatThrownBy(() -> service.save(GrokPattern.create("Test", ""))) .isInstanceOf(IllegalArgumentException.class); assertThat(collection.countDocuments()).isEqualTo(0); verify(clusterEventBus, never()).post(any(GrokPatternsUpdatedEvent.class)); }
@ScalarOperator(EQUAL) @SqlType(StandardTypes.BOOLEAN) @SqlNullable public static Boolean equal(@SqlType(StandardTypes.INTEGER) long left, @SqlType(StandardTypes.INTEGER) long right) { return left == right; }
@Test public void testEqual() { assertFunction("INTEGER'37' = INTEGER'37'", BOOLEAN, true); assertFunction("INTEGER'37' = INTEGER'17'", BOOLEAN, false); assertFunction("INTEGER'17' = INTEGER'37'", BOOLEAN, false); assertFunction("INTEGER'17' = INTEGER'17'", BOOLEAN, true); }
public Object getCell(final int columnIndex) { Preconditions.checkArgument(columnIndex > 0 && columnIndex < data.size() + 1); return data.get(columnIndex - 1); }
@Test void assertGetCellWithNullValue() { LocalDataQueryResultRow actual = new LocalDataQueryResultRow(null, null); assertThat(actual.getCell(1), is("")); assertThat(actual.getCell(2), is("")); }
public static void maybeMeasureLatency(final Runnable actionToMeasure, final Time time, final Sensor sensor) { if (sensor.shouldRecord() && sensor.hasMetrics()) { final long startNs = time.nanoseconds(); try { actionToMeasure.run(); } finally { sensor.record(time.nanoseconds() - startNs); } } else { actionToMeasure.run(); } }
@Test public void shouldMeasureLatency() { final long startTime = 6; final long endTime = 10; final Sensor sensor = mock(Sensor.class); when(sensor.shouldRecord()).thenReturn(true); when(sensor.hasMetrics()).thenReturn(true); doNothing().when(sensor).record(endTime - startTime); final Time time = mock(Time.class); when(time.nanoseconds()).thenReturn(startTime).thenReturn(endTime); StreamsMetricsImpl.maybeMeasureLatency(() -> { }, time, sensor); }
@Override @Deprecated public void process(final org.apache.kafka.streams.processor.ProcessorSupplier<? super K, ? super V> processorSupplier, final String... stateStoreNames) { process(processorSupplier, Named.as(builder.newProcessorName(PROCESSOR_NAME)), stateStoreNames); }
@Test public void shouldNotAllowNullProcessSupplierOnProcessValuesWithNamed() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.process((ProcessorSupplier<? super String, ? super String, Void, Void>) null, Named.as("processor"))); assertThat(exception.getMessage(), equalTo("processorSupplier can't be null")); }
public static String toUnderline(String src, boolean isUpper) { return toUnderline(src, '_', isUpper); }
@Test public void testToUnderline2() { String result = FieldUtils.toUnderline("ToUnderline", '-', true); Assert.assertEquals("TO-UNDERLINE", result); String result1 = FieldUtils.toUnderline("ToUnderline", '-', false); Assert.assertEquals("to-underline", result1); }
@Override public void logException(StatementContext context, SQLException ex) { log(context); }
@Test public void logsExceptionTime() { final MetricRegistry mockRegistry = mock(MetricRegistry.class); final StatementNameStrategy mockNameStrategy = mock(StatementNameStrategy.class); final InstrumentedSqlLogger logger = new InstrumentedSqlLogger(mockRegistry, mockNameStrategy); final StatementContext mockContext = mock(StatementContext.class); final Timer mockTimer = mock(Timer.class); final String statementName = "my-fake-name"; final long fakeElapsed = 1234L; when(mockNameStrategy.getStatementName(mockContext)).thenReturn(statementName); when(mockRegistry.timer(statementName)).thenReturn(mockTimer); when(mockContext.getElapsedTime(ChronoUnit.NANOS)).thenReturn(fakeElapsed); logger.logException(mockContext, new SQLException()); verify(mockTimer).update(fakeElapsed, TimeUnit.NANOSECONDS); }
public boolean write(final int msgTypeId, final DirectBuffer srcBuffer, final int offset, final int length) { checkTypeId(msgTypeId); checkMsgLength(length); final AtomicBuffer buffer = this.buffer; final int recordLength = length + HEADER_LENGTH; final int recordIndex = claimCapacity(buffer, recordLength); if (INSUFFICIENT_CAPACITY == recordIndex) { return false; } buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength); MemoryAccess.releaseFence(); buffer.putBytes(encodedMsgOffset(recordIndex), srcBuffer, offset, length); buffer.putInt(typeOffset(recordIndex), msgTypeId); buffer.putIntOrdered(lengthOffset(recordIndex), recordLength); return true; }
@Test void shouldInsertPaddingRecordPlusMessageOnBufferWrap() { final int length = 200; final int recordLength = length + HEADER_LENGTH; final int alignedRecordLength = align(recordLength, ALIGNMENT); final long tail = CAPACITY - HEADER_LENGTH; final long head = tail - (ALIGNMENT * 4); when(buffer.getLongVolatile(HEAD_COUNTER_INDEX)).thenReturn(head); when(buffer.getLong(TAIL_COUNTER_INDEX)).thenReturn(tail); final UnsafeBuffer srcBuffer = new UnsafeBuffer(allocateDirect(1024)); final int srcIndex = 0; assertTrue(ringBuffer.write(MSG_TYPE_ID, srcBuffer, srcIndex, length)); final InOrder inOrder = inOrder(buffer); inOrder.verify(buffer).putLongOrdered(TAIL_COUNTER_INDEX, tail + alignedRecordLength + HEADER_LENGTH); inOrder.verify(buffer).putLong(0, 0L); inOrder.verify(buffer).putIntOrdered(lengthOffset((int)tail), -HEADER_LENGTH); inOrder.verify(buffer).putInt(typeOffset((int)tail), PADDING_MSG_TYPE_ID); inOrder.verify(buffer).putIntOrdered(lengthOffset((int)tail), HEADER_LENGTH); inOrder.verify(buffer).putLong(alignedRecordLength, 0L); inOrder.verify(buffer).putIntOrdered(lengthOffset(0), -recordLength); inOrder.verify(buffer).putBytes(encodedMsgOffset(0), srcBuffer, srcIndex, length); inOrder.verify(buffer).putInt(typeOffset(0), MSG_TYPE_ID); inOrder.verify(buffer).putIntOrdered(lengthOffset(0), recordLength); }
public <T> List<T> apply(T[] a) { return apply(Arrays.asList(a)); }
@Test public void maxOnlyRange() { Range r = new Range(-1,2); assertEquals("[a, b]", toS(r.apply(array))); assertEquals("[a, b]", toS(r.apply(list))); assertEquals("[a, b]", toS(r.apply(set))); }
@Override public List<Badge> convert( final List<Locale> acceptableLanguages, final List<AccountBadge> accountBadges, final boolean isSelf) { if (accountBadges.isEmpty() && badgeIdsEnabledForAll.isEmpty()) { return List.of(); } final Instant now = clock.instant(); final ResourceBundle resourceBundle = headerControlledResourceBundleLookup.getResourceBundle(BASE_NAME, acceptableLanguages); List<Badge> badges = accountBadges.stream() .filter(accountBadge -> (isSelf || accountBadge.isVisible()) && now.isBefore(accountBadge.getExpiration()) && knownBadges.containsKey(accountBadge.getId())) .map(accountBadge -> { BadgeConfiguration configuration = knownBadges.get(accountBadge.getId()); return newBadge( isSelf, accountBadge.getId(), configuration.getCategory(), resourceBundle.getString(accountBadge.getId() + "_name"), resourceBundle.getString(accountBadge.getId() + "_description"), configuration.getSprites(), configuration.getSvg(), configuration.getSvgs(), accountBadge.getExpiration(), accountBadge.isVisible()); }) .collect(Collectors.toCollection(ArrayList::new)); badges.addAll(badgeIdsEnabledForAll.stream().filter(knownBadges::containsKey).map(id -> { BadgeConfiguration configuration = knownBadges.get(id); return newBadge( isSelf, id, configuration.getCategory(), resourceBundle.getString(id + "_name"), resourceBundle.getString(id + "_description"), configuration.getSprites(), configuration.getSvg(), configuration.getSvgs(), now.plus(Duration.ofDays(1)), true); }).collect(Collectors.toList())); return badges; }
@Test void testCustomControl() { BadgesConfiguration badgesConfiguration = createBadges(1); ConfiguredProfileBadgeConverter badgeConverter = new ConfiguredProfileBadgeConverter(clock, badgesConfiguration, new HeaderControlledResourceBundleLookup(resourceBundleFactory)); Locale defaultLocale = Locale.getDefault(); Locale enGb = new Locale("en", "GB"); Locale en = new Locale("en"); Locale esUs = new Locale("es", "US"); ArgumentCaptor<Control> controlArgumentCaptor = setupResourceBundle(enGb); badgeConverter.convert(List.of(enGb, en, esUs), List.of(new AccountBadge(idFor(0), Instant.ofEpochSecond(43), true)), false); Control control = controlArgumentCaptor.getValue(); assertThatNullPointerException().isThrownBy(() -> control.getFormats(null)); assertThatNullPointerException().isThrownBy(() -> control.getFallbackLocale(null, enGb)); assertThatNullPointerException().isThrownBy( () -> control.getFallbackLocale(ConfiguredProfileBadgeConverter.BASE_NAME, null)); assertThat(control.getFormats(ConfiguredProfileBadgeConverter.BASE_NAME)).isNotNull().hasSize(1).containsOnly( Control.FORMAT_PROPERTIES.toArray(new String[0])); try { // temporarily override for purpose of ensuring this test doesn't change based on system default locale Locale.setDefault(new Locale("xx", "XX")); assertThat(control.getFallbackLocale(ConfiguredProfileBadgeConverter.BASE_NAME, enGb)).isEqualTo(en); assertThat(control.getFallbackLocale(ConfiguredProfileBadgeConverter.BASE_NAME, en)).isEqualTo(esUs); assertThat(control.getFallbackLocale(ConfiguredProfileBadgeConverter.BASE_NAME, esUs)).isEqualTo( Locale.getDefault()); assertThat(control.getFallbackLocale(ConfiguredProfileBadgeConverter.BASE_NAME, Locale.getDefault())).isNull(); // now test what happens if the system default locale is in the list // this should always terminate at the system default locale since the development defined bundle should get // returned at that point anyhow badgeConverter.convert(List.of(enGb, Locale.getDefault(), en, esUs), List.of(new AccountBadge(idFor(0), Instant.ofEpochSecond(43), true)), false); Control control2 = controlArgumentCaptor.getValue(); assertThat(control2.getFallbackLocale(ConfiguredProfileBadgeConverter.BASE_NAME, enGb)).isEqualTo( Locale.getDefault()); assertThat(control2.getFallbackLocale(ConfiguredProfileBadgeConverter.BASE_NAME, Locale.getDefault())).isNull(); } finally { Locale.setDefault(defaultLocale); } }
public static String getBatchSegmentIngestionFrequency(TableConfig tableConfig) { String segmentIngestionFrequency = null; if (tableConfig.getIngestionConfig() != null) { BatchIngestionConfig batchIngestionConfig = tableConfig.getIngestionConfig().getBatchIngestionConfig(); if (batchIngestionConfig != null) { segmentIngestionFrequency = batchIngestionConfig.getSegmentIngestionFrequency(); } } if (segmentIngestionFrequency == null) { segmentIngestionFrequency = tableConfig.getValidationConfig().getSegmentPushFrequency(); } return segmentIngestionFrequency; }
@Test public void testGetPushFrequency() { // get from ingestion config, when not present in segmentsConfig IngestionConfig ingestionConfig = new IngestionConfig(); ingestionConfig.setBatchIngestionConfig(new BatchIngestionConfig(null, "APPEND", "HOURLY")); TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName("myTable").setIngestionConfig(ingestionConfig).build(); Assert.assertEquals(IngestionConfigUtils.getBatchSegmentIngestionFrequency(tableConfig), "HOURLY"); // get from ingestion config, even if present in segmentsConfig SegmentsValidationAndRetentionConfig segmentsValidationAndRetentionConfig = new SegmentsValidationAndRetentionConfig(); segmentsValidationAndRetentionConfig.setSegmentPushFrequency("DAILY"); tableConfig.setValidationConfig(segmentsValidationAndRetentionConfig); Assert.assertEquals(IngestionConfigUtils.getBatchSegmentIngestionFrequency(tableConfig), "HOURLY"); // get from segmentsConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName("myTable").build(); tableConfig.setValidationConfig(segmentsValidationAndRetentionConfig); Assert.assertEquals(IngestionConfigUtils.getBatchSegmentIngestionFrequency(tableConfig), "DAILY"); // present nowhere segmentsValidationAndRetentionConfig.setSegmentPushFrequency(null); Assert.assertNull(IngestionConfigUtils.getBatchSegmentIngestionFrequency(tableConfig)); }
public Connection createConnection() throws JMSException { return createConnection(info.copy()); }
@Test(timeout = 60000) public void testSerializability() throws Exception { ActiveMQConnectionFactory factory = new ActiveMQConnectionFactory(mcf, new ConnectionManagerAdapter(), info); ByteArrayOutputStream bos = new ByteArrayOutputStream(); ObjectOutputStream oos = new ObjectOutputStream(bos); oos.writeObject(factory); oos.close(); byte[] byteArray = bos.toByteArray(); ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(byteArray)); ActiveMQConnectionFactory deserializedFactory = (ActiveMQConnectionFactory) ois.readObject(); ois.close(); Connection con = deserializedFactory.createConnection("defaultUser", "defaultPassword"); ActiveMQConnection connection = ((ActiveMQConnection) ((ManagedConnectionProxy) con).getManagedConnection().getPhysicalConnection()); assertEquals(100, connection.getPrefetchPolicy().getQueuePrefetch()); assertNotNull("Connection object returned by ActiveMQConnectionFactory.createConnection() is null", con); connection.close(); }
public static void calculateChunkedSumsByteArray(int bytesPerSum, int checksumType, byte[] sums, int sumsOffset, byte[] data, int dataOffset, int dataLength) { nativeComputeChunkedSumsByteArray(bytesPerSum, checksumType, sums, sumsOffset, data, dataOffset, dataLength, "", 0, false); }
@Test public void testCalculateChunkedSumsByteArraySuccess() throws ChecksumException { allocateArrayByteBuffers(); fillDataAndValidChecksums(); NativeCrc32.calculateChunkedSumsByteArray(bytesPerChecksum, checksumType.id, checksums.array(), checksums.position(), data.array(), data.position(), data.remaining()); }
public static double div(float v1, float v2) { return div(v1, v2, DEFAULT_DIV_SCALE); }
@Test public void divIntegerTest(){ assertEquals(1001013, NumberUtil.div(100101300, (Number) 100).intValue()); }
public static Comparator<StructLike> forType(Types.StructType struct) { return new StructLikeComparator(struct); }
@Test public void testInt() { assertComparesCorrectly(Comparators.forType(Types.IntegerType.get()), 0, 1); }
public static DENCLUE fit(double[][] data, double sigma, int m) { int n = data.length; return fit(data, sigma, m, 1E-2, Math.max(10, n/200)); }
@Test public void testGaussianMixture() throws Exception { System.out.println("Gaussian Mixture"); double[][] x = GaussianMixture.x; int[] y = GaussianMixture.y; MathEx.setSeed(19650218); // to get repeatable results. DENCLUE model = DENCLUE.fit(x, 0.85, 100); System.out.println(model); double r = RandIndex.of(y, model.y); double r2 = AdjustedRandIndex.of(y, model.y); System.out.println("The number of clusters: " + model.k); System.out.format("Training rand index = %.2f%%, adjusted rand index = %.2f%%%n", 100.0 * r, 100.0 * r2); assertEquals(0.6080, r, 1E-4); assertEquals(0.2460, r2, 1E-4); System.out.format("MI = %.2f%n", MutualInformation.of(y, model.y)); System.out.format("NMI.joint = %.2f%%%n", 100 * NormalizedMutualInformation.joint(y, model.y)); System.out.format("NMI.max = %.2f%%%n", 100 * NormalizedMutualInformation.max(y, model.y)); System.out.format("NMI.min = %.2f%%%n", 100 * NormalizedMutualInformation.min(y, model.y)); System.out.format("NMI.sum = %.2f%%%n", 100 * NormalizedMutualInformation.sum(y, model.y)); System.out.format("NMI.sqrt = %.2f%%%n", 100 * NormalizedMutualInformation.sqrt(y, model.y)); java.nio.file.Path temp = Write.object(model); Read.object(temp); }
@Override public String getVersion() { return version.get().toString(); }
@Test public void test_getVersion() { Version version = Version.create(6, 1); when(sonarQubeVersion.get()).thenReturn(version); assertThat(underTest.getVersion()).isEqualTo(version.toString()); }
@Override public void registerStore(final StateStore store, final StateRestoreCallback stateRestoreCallback, final CommitCallback commitCallback) { final String storeName = store.name(); // TODO (KAFKA-12887): we should not trigger user's exception handler for illegal-argument but always // fail-crash; in this case we would not need to immediately close the state store before throwing if (CHECKPOINT_FILE_NAME.equals(storeName)) { store.close(); throw new IllegalArgumentException(format("%sIllegal store name: %s, which collides with the pre-defined " + "checkpoint file name", logPrefix, storeName)); } if (stores.containsKey(storeName)) { store.close(); throw new IllegalArgumentException(format("%sStore %s has already been registered.", logPrefix, storeName)); } if (stateRestoreCallback instanceof StateRestoreListener) { log.warn("The registered state restore callback is also implementing the state restore listener interface, " + "which is not expected and would be ignored"); } final StateStoreMetadata storeMetadata = isLoggingEnabled(storeName) ? new StateStoreMetadata( store, getStorePartition(storeName), stateRestoreCallback, commitCallback, converterForStore(store)) : new StateStoreMetadata(store, commitCallback); // register the store first, so that if later an exception is thrown then eventually while we call `close` // on the state manager this state store would be closed as well stores.put(storeName, storeMetadata); if (!stateUpdaterEnabled) { maybeRegisterStoreWithChangelogReader(storeName); } log.debug("Registered state store {} to its state manager", storeName); }
@Test public void shouldPreserveStreamsExceptionOnFlushIfStoreThrows() { final StreamsException exception = new StreamsException("KABOOM!"); final ProcessorStateManager stateManager = getStateManager(Task.TaskType.ACTIVE); final MockKeyValueStore stateStore = new MockKeyValueStore(persistentStoreName, true) { @Override public void flush() { throw exception; } }; stateManager.registerStore(stateStore, stateStore.stateRestoreCallback, null); final StreamsException thrown = assertThrows(StreamsException.class, stateManager::flush); assertEquals(exception, thrown); }
public static Snapshot oldestAncestor(Table table) { Snapshot lastSnapshot = null; for (Snapshot snapshot : currentAncestors(table)) { lastSnapshot = snapshot; } return lastSnapshot; }
@Test public void oldestAncestor() { Snapshot snapshot = SnapshotUtil.oldestAncestor(table); assertThat(snapshot.snapshotId()).isEqualTo(snapshotBaseId); snapshot = SnapshotUtil.oldestAncestorOf(table, snapshotMain2Id); assertThat(snapshot.snapshotId()).isEqualTo(snapshotBaseId); snapshot = SnapshotUtil.oldestAncestorAfter(table, snapshotBaseTimestamp + 1); assertThat(snapshot.snapshotId()).isEqualTo(snapshotMain1Id); }
public ShareGroupDescribeResponseData.Member asShareGroupDescribeMember( TopicsImage topicsImage ) { return new ShareGroupDescribeResponseData.Member() .setMemberEpoch(memberEpoch) .setMemberId(memberId) .setAssignment(new ShareGroupDescribeResponseData.Assignment() .setTopicPartitions(topicPartitionsFromMap(assignedPartitions, topicsImage))) .setClientHost(clientHost) .setClientId(clientId) .setRackId(rackId) .setSubscribedTopicNames(subscribedTopicNames == null ? null : new ArrayList<>(subscribedTopicNames)); }
@Test public void testAsShareGroupDescribeMember() { Uuid topicId1 = Uuid.randomUuid(); Uuid topicId2 = Uuid.randomUuid(); MetadataImage metadataImage = new MetadataImageBuilder() .addTopic(topicId1, "topic1", 3) .addTopic(topicId2, "topic2", 3) .build(); List<String> subscribedTopicNames = Arrays.asList("topic1", "topic2"); List<Integer> assignedPartitions = Arrays.asList(0, 1, 2); int epoch = 10; ShareGroupMemberMetadataValue record = new ShareGroupMemberMetadataValue() .setClientId("client-id") .setClientHost("host-id") .setRackId("rack-id") .setSubscribedTopicNames(subscribedTopicNames); String memberId = Uuid.randomUuid().toString(); ShareGroupMember member = new ShareGroupMember.Builder(memberId) .updateWith(record) .setMemberEpoch(epoch) .setAssignedPartitions(mkAssignment( mkTopicAssignment(topicId1, 0, 1, 2))) .build(); ShareGroupDescribeResponseData.Member actual = member.asShareGroupDescribeMember(metadataImage.topics()); ShareGroupDescribeResponseData.Member expected = new ShareGroupDescribeResponseData.Member() .setMemberId(memberId) .setMemberEpoch(epoch) .setClientId("client-id") .setRackId("rack-id") .setClientHost("host-id") .setSubscribedTopicNames(subscribedTopicNames) .setAssignment( new ShareGroupDescribeResponseData.Assignment() .setTopicPartitions(Collections.singletonList(new ShareGroupDescribeResponseData.TopicPartitions() .setTopicId(topicId1) .setTopicName("topic1") .setPartitions(assignedPartitions) )) ); assertEquals(expected, actual); }
public String join(final Stream<?> parts) { return join(parts.iterator()); }
@Test public void shouldHandleEmptyList() { assertThat(joiner.join(ImmutableList.of()), is("")); }
@VisibleForTesting void submitTrade(int slot, GrandExchangeOffer offer) { GrandExchangeOfferState state = offer.getState(); if (state != GrandExchangeOfferState.CANCELLED_BUY && state != GrandExchangeOfferState.CANCELLED_SELL && state != GrandExchangeOfferState.BUYING && state != GrandExchangeOfferState.SELLING) { return; } SavedOffer savedOffer = getOffer(slot); boolean login = client.getTickCount() <= lastLoginTick + GE_LOGIN_BURST_WINDOW; if (savedOffer == null && (state == GrandExchangeOfferState.BUYING || state == GrandExchangeOfferState.SELLING) && offer.getQuantitySold() == 0) { // new offer GrandExchangeTrade grandExchangeTrade = new GrandExchangeTrade(); grandExchangeTrade.setBuy(state == GrandExchangeOfferState.BUYING); grandExchangeTrade.setItemId(offer.getItemId()); grandExchangeTrade.setTotal(offer.getTotalQuantity()); grandExchangeTrade.setOffer(offer.getPrice()); grandExchangeTrade.setSlot(slot); grandExchangeTrade.setWorldType(getGeWorldType()); grandExchangeTrade.setLogin(login); grandExchangeTrade.setSeq(tradeSeq++); grandExchangeTrade.setResetTime(getLimitResetTime(offer.getItemId())); log.debug("Submitting new trade: {}", grandExchangeTrade); grandExchangeClient.submit(grandExchangeTrade); return; } if (savedOffer == null || savedOffer.getItemId() != offer.getItemId() || savedOffer.getPrice() != offer.getPrice() || savedOffer.getTotalQuantity() != offer.getTotalQuantity()) { // desync return; } if (savedOffer.getState() == offer.getState() && savedOffer.getQuantitySold() == offer.getQuantitySold()) { // no change return; } if (state == GrandExchangeOfferState.CANCELLED_BUY || state == GrandExchangeOfferState.CANCELLED_SELL) { GrandExchangeTrade grandExchangeTrade = new GrandExchangeTrade(); grandExchangeTrade.setBuy(state == GrandExchangeOfferState.CANCELLED_BUY); grandExchangeTrade.setCancel(true); grandExchangeTrade.setItemId(offer.getItemId()); grandExchangeTrade.setQty(offer.getQuantitySold()); grandExchangeTrade.setTotal(offer.getTotalQuantity()); grandExchangeTrade.setSpent(offer.getSpent()); grandExchangeTrade.setOffer(offer.getPrice()); grandExchangeTrade.setSlot(slot); grandExchangeTrade.setWorldType(getGeWorldType()); grandExchangeTrade.setLogin(login); grandExchangeTrade.setSeq(tradeSeq++); grandExchangeTrade.setResetTime(getLimitResetTime(offer.getItemId())); log.debug("Submitting cancelled: {}", grandExchangeTrade); grandExchangeClient.submit(grandExchangeTrade); saveTrade(grandExchangeTrade); return; } final int qty = offer.getQuantitySold() - savedOffer.getQuantitySold(); final int dspent = offer.getSpent() - savedOffer.getSpent(); if (qty <= 0 || dspent <= 0) { return; } GrandExchangeTrade grandExchangeTrade = new GrandExchangeTrade(); grandExchangeTrade.setBuy(state == GrandExchangeOfferState.BUYING); grandExchangeTrade.setItemId(offer.getItemId()); grandExchangeTrade.setQty(offer.getQuantitySold()); grandExchangeTrade.setDqty(qty); grandExchangeTrade.setTotal(offer.getTotalQuantity()); grandExchangeTrade.setDspent(dspent); grandExchangeTrade.setSpent(offer.getSpent()); grandExchangeTrade.setOffer(offer.getPrice()); grandExchangeTrade.setSlot(slot); grandExchangeTrade.setWorldType(getGeWorldType()); grandExchangeTrade.setLogin(login); grandExchangeTrade.setSeq(tradeSeq++); grandExchangeTrade.setResetTime(getLimitResetTime(offer.getItemId())); log.debug("Submitting trade: {}", grandExchangeTrade); grandExchangeClient.submit(grandExchangeTrade); saveTrade(grandExchangeTrade); }
@Test public void testCancelTrade() { SavedOffer savedOffer = new SavedOffer(); savedOffer.setItemId(ItemID.ABYSSAL_WHIP); savedOffer.setQuantitySold(1); savedOffer.setTotalQuantity(10); savedOffer.setPrice(1000); savedOffer.setSpent(25); savedOffer.setState(GrandExchangeOfferState.BUYING); when(configManager.getRSProfileConfiguration("geoffer", "0")).thenReturn(gson.toJson(savedOffer)); GrandExchangeOffer grandExchangeOffer = mock(GrandExchangeOffer.class); when(grandExchangeOffer.getQuantitySold()).thenReturn(1); when(grandExchangeOffer.getItemId()).thenReturn(ItemID.ABYSSAL_WHIP); when(grandExchangeOffer.getTotalQuantity()).thenReturn(10); when(grandExchangeOffer.getPrice()).thenReturn(1000); when(grandExchangeOffer.getSpent()).thenReturn(25); when(grandExchangeOffer.getState()).thenReturn(GrandExchangeOfferState.CANCELLED_BUY); grandExchangePlugin.submitTrade(0, grandExchangeOffer); ArgumentCaptor<GrandExchangeTrade> captor = ArgumentCaptor.forClass(GrandExchangeTrade.class); verify(grandExchangeClient).submit(captor.capture()); GrandExchangeTrade trade = captor.getValue(); assertTrue(trade.isBuy()); assertTrue(trade.isCancel()); assertEquals(ItemID.ABYSSAL_WHIP, trade.getItemId()); assertEquals(1, trade.getQty()); assertEquals(10, trade.getTotal()); assertEquals(25, trade.getSpent()); }
@Override public String[] split(String text) { if (splitContraction) { text = WONT_CONTRACTION.matcher(text).replaceAll("$1ill not"); text = SHANT_CONTRACTION.matcher(text).replaceAll("$1ll not"); text = AINT_CONTRACTION.matcher(text).replaceAll("$1m not"); for (Pattern regexp : NOT_CONTRACTIONS) { text = regexp.matcher(text).replaceAll("$1 not"); } for (Pattern regexp : CONTRACTIONS2) { text = regexp.matcher(text).replaceAll("$1 $2"); } for (Pattern regexp : CONTRACTIONS3) { text = regexp.matcher(text).replaceAll("$1 $2 $3"); } } text = DELIMITERS[0].matcher(text).replaceAll(" $1 "); text = DELIMITERS[1].matcher(text).replaceAll(" $1"); text = DELIMITERS[2].matcher(text).replaceAll(" $1"); text = DELIMITERS[3].matcher(text).replaceAll(" . "); text = DELIMITERS[4].matcher(text).replaceAll(" $1 "); String[] words = WHITESPACE.split(text); if (words.length > 1 && words[words.length-1].equals(".")) { if (EnglishAbbreviations.contains(words[words.length-2])) { words[words.length-2] = words[words.length-2] + "."; } } ArrayList<String> result = new ArrayList<>(); for (String token : words) { if (!token.isEmpty()) { result.add(token); } } return result.toArray(new String[0]); }
@Test public void testSplitAbbreviation() { System.out.println("tokenize abbreviation"); String text = "Here are some examples of abbreviations: A.B., abbr., " + "acad., A.D., alt., A.M., B.C., etc."; String[] expResult = {"Here", "are", "some", "examples", "of", "abbreviations", ":", "A.B.", ",", "abbr.", ",", "acad.", ",", "A.D.", ",", "alt.", ",", "A.M.", ",", "B.C.", ",", "etc.", "."}; SimpleTokenizer instance = new SimpleTokenizer(); String[] result = instance.split(text); assertEquals(expResult.length, result.length); for (int i = 0; i < result.length; i++) { assertEquals(expResult[i], result[i]); } }
@Override public boolean processRow( StepMetaInterface smi, StepDataInterface sdi ) throws KettleException { Object[] r = null; r = getRow(); if ( r == null ) { // no more rows to be expected from the previous step(s) setOutputDone(); return false; } if ( data.firstRow ) { // The output meta is the original input meta + the // additional constant fields. data.firstRow = false; data.outputMeta = getInputRowMeta().clone(); meta.getFields( data.outputMeta, getStepname(), null, null, this, repository, metaStore ); } // Add the constant data to the end of the row. r = RowDataUtil.addRowData( r, getInputRowMeta().size(), data.getConstants().getData() ); putRow( data.outputMeta, r ); if ( log.isRowLevel() ) { logRowlevel( BaseMessages.getString( PKG, "Constant.Log.Wrote.Row", Long.toString( getLinesWritten() ), getInputRowMeta().getString( r ) ) ); } if ( checkFeedback( getLinesWritten() ) ) { if ( log.isBasic() ) { logBasic( BaseMessages.getString( PKG, "Constant.Log.LineNr", Long.toString( getLinesWritten() ) ) ); } } return true; }
@Test public void testProcessRow_fail() throws Exception { doReturn( null ).when( constantSpy ).getRow(); doReturn( null ).when( constantSpy ).getInputRowMeta(); boolean success = constantSpy.processRow( constantMeta, constantData ); assertFalse( success ); }
public RemotingDesc getRemotingBeanDesc(Object bean) { return remotingServiceMap.get(bean); }
@Test public void testGetRemotingBeanDesc() { SimpleRemoteBean remoteBean = new SimpleRemoteBean(); remotingParser.parserRemotingServiceInfo(remoteBean, remoteBean.getClass().getName(), new SimpleRemotingParser()); assertNotNull(remotingParser.getRemotingBeanDesc(remoteBean)); }
protected List<MavenArtifact> processResponse(Dependency dependency, HttpURLConnection conn) throws IOException { final List<MavenArtifact> result = new ArrayList<>(); try (InputStreamReader streamReader = new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8); JsonParser parser = objectReader.getFactory().createParser(streamReader)) { if (init(parser) && parser.nextToken() == com.fasterxml.jackson.core.JsonToken.START_OBJECT) { // at least one result do { final FileImpl file = objectReader.readValue(parser); checkHashes(dependency, file.getChecksums()); final Matcher pathMatcher = PATH_PATTERN.matcher(file.getPath()); if (!pathMatcher.matches()) { throw new IllegalStateException("Cannot extract the Maven information from the path " + "retrieved in Artifactory " + file.getPath()); } final String groupId = pathMatcher.group("groupId").replace('/', '.'); final String artifactId = pathMatcher.group("artifactId"); final String version = pathMatcher.group("version"); result.add(new MavenArtifact(groupId, artifactId, version, file.getDownloadUri(), MavenArtifact.derivePomUrl(artifactId, version, file.getDownloadUri()))); } while (parser.nextToken() == com.fasterxml.jackson.core.JsonToken.START_OBJECT); } else { throw new FileNotFoundException("Artifact " + dependency + " not found in Artifactory"); } } return result; }
@Test public void shouldProcessCorrectlyArtifactoryAnswerWithMultipleMatches() throws IOException { // Given Dependency dependency = new Dependency(); dependency.setSha1sum("94a9ce681a42d0352b3ad22659f67835e560d107"); dependency.setMd5sum("03dcfdd88502505cc5a805a128bfdd8d"); final HttpURLConnection urlConnection = mock(HttpURLConnection.class); final byte[] payload = multipleMatchesPayload(); when(urlConnection.getInputStream()).thenReturn(new ByteArrayInputStream(payload)); // When final List<MavenArtifact> mavenArtifacts = searcher.processResponse(dependency, urlConnection); // Then assertEquals(2, mavenArtifacts.size()); final MavenArtifact artifact1 = mavenArtifacts.get(0); assertEquals("axis", artifact1.getGroupId()); assertEquals("axis", artifact1.getArtifactId()); assertEquals("1.4", artifact1.getVersion()); assertEquals("https://artifactory.techno.ingenico.com/artifactory/gradle-libs-cache/axis/axis/1.4/axis-1.4.jar", artifact1.getArtifactUrl()); assertEquals("https://artifactory.techno.ingenico.com/artifactory/gradle-libs-cache/axis/axis/1.4/axis-1.4.pom", artifact1.getPomUrl()); final MavenArtifact artifact2 = mavenArtifacts.get(1); assertEquals("org.apache.axis", artifact2.getGroupId()); assertEquals("axis", artifact2.getArtifactId()); assertEquals("1.4", artifact2.getVersion()); assertEquals("https://artifactory.techno.ingenico.com/artifactory/gradle-libs-cache/org/apache/axis/axis/1.4/axis-1.4.jar", artifact2.getArtifactUrl()); assertEquals("https://artifactory.techno.ingenico.com/artifactory/gradle-libs-cache/org/apache/axis/axis/1.4/axis-1.4.pom", artifact2.getPomUrl()); }
public static Interval of(String interval, TimeRange timeRange) { switch (timeRange.type()) { case TimeRange.KEYWORD: return timestampInterval(interval); case TimeRange.ABSOLUTE: return ofAbsoluteRange(interval, (AbsoluteRange)timeRange); case TimeRange.RELATIVE: return ofRelativeRange(interval, (RelativeRange)timeRange); } throw new RuntimeException("Unable to parse time range type: " + timeRange.type()); }
@Test public void approximatesAutoIntervalWithScalingIfRelativeRangeAndBeyondLimits() { final RelativeRange relativeRange = RelativeRange.create(7200); final Interval interval = ApproximatedAutoIntervalFactory.of("minute", relativeRange); assertThat(interval).isEqualTo(AutoInterval.create(2.0)); }
public void expand(String key, long value, RangeHandler rangeHandler, EdgeHandler edgeHandler) { if (value < lowerBound || value > upperBound) { // Value outside bounds -> expand to nothing. return; } int maxLevels = value > 0 ? maxPositiveLevels : maxNegativeLevels; int sign = value > 0 ? 1 : -1; // Append key to feature string builder StringBuilder builder = new StringBuilder(128); builder.append(key).append('='); long levelSize = arity; long edgeInterval = (value / arity) * arity; edgeHandler.handleEdge(createEdgeFeatureHash(builder, edgeInterval), (int) Math.abs(value - edgeInterval)); for (int i = 0; i < maxLevels; ++i) { long start = (value / levelSize) * levelSize; if (Math.abs(start) + levelSize - 1 < 0) { // overflow break; } rangeHandler.handleRange(createRangeFeatureHash(builder, start, start + sign * (levelSize - 1))); levelSize *= arity; if (levelSize <= 0 && levelSize != Long.MIN_VALUE) { //overflow break; } } }
@Test void requireThatUpperBoundIsUsed() { PredicateRangeTermExpander expander = new PredicateRangeTermExpander(10, -99, 9999); Iterator<String> expectedLabels = List.of( "key=40-49", "key=0-99", "key=0-999", "key=0-9999").iterator(); expander.expand("key", 42, range -> assertEquals(PredicateHash.hash64(expectedLabels.next()), range), (edge, value) -> { assertEquals(PredicateHash.hash64("key=40"), edge); assertEquals(2, value); }); assertFalse(expectedLabels.hasNext()); }
public static <Req extends RpcRequest> Matcher<Req> methodEquals(String method) { if (method == null) throw new NullPointerException("method == null"); if (method.isEmpty()) throw new NullPointerException("method is empty"); return new RpcMethodEquals<Req>(method); }
@Test void methodEquals_unmatched_null() { assertThat(methodEquals("Check").matches(request)).isFalse(); }
static Node selectReferenceNode(FlowRule rule, Context context, DefaultNode node) { String refResource = rule.getRefResource(); int strategy = rule.getStrategy(); if (StringUtil.isEmpty(refResource)) { return null; } if (strategy == RuleConstant.STRATEGY_RELATE) { return ClusterBuilderSlot.getClusterNode(refResource); } if (strategy == RuleConstant.STRATEGY_CHAIN) { if (!refResource.equals(context.getName())) { return null; } return node; } // No node. return null; }
@Test public void testSelectReferenceNodeForContextEntrance() { String contextName = "good_context"; DefaultNode node = mock(DefaultNode.class); Context context = mock(Context.class); FlowRule rule = new FlowRule("testSelectReferenceNodeForContextEntrance") .setCount(1) .setStrategy(RuleConstant.STRATEGY_CHAIN) .setRefResource(contextName); when(context.getName()).thenReturn(contextName); assertEquals(node, FlowRuleChecker.selectReferenceNode(rule, context, node)); when(context.getName()).thenReturn("other_context"); assertNull(FlowRuleChecker.selectReferenceNode(rule, context, node)); }
@Override public void releasePartitionsLocally(Collection<ResultPartitionID> partitionIds) { ioExecutor.execute( () -> { for (ResultPartitionID partitionId : partitionIds) { resultPartitionManager.releasePartition(partitionId, null); } }); }
@Test void testSlowIODoesNotBlockRelease() throws Exception { BlockerSync sync = new BlockerSync(); ResultPartitionManager blockingResultPartitionManager = new ResultPartitionManager() { @Override public void releasePartition(ResultPartitionID partitionId, Throwable cause) { sync.blockNonInterruptible(); super.releasePartition(partitionId, cause); } }; NettyShuffleEnvironment shuffleEnvironment = new NettyShuffleEnvironmentBuilder() .setResultPartitionManager(blockingResultPartitionManager) .setIoExecutor(Executors.newFixedThreadPool(1)) .build(); shuffleEnvironment.releasePartitionsLocally(Collections.singleton(new ResultPartitionID())); sync.awaitBlocker(); sync.releaseBlocker(); }
@SuppressWarnings("unchecked") @Override public <VIn> CogroupedKStream<K, VOut> cogroup(final KGroupedStream<K, VIn> groupedStream, final Aggregator<? super K, ? super VIn, VOut> aggregator) { Objects.requireNonNull(groupedStream, "groupedStream can't be null"); Objects.requireNonNull(aggregator, "aggregator can't be null"); groupPatterns.put((KGroupedStreamImpl<K, ?>) groupedStream, (Aggregator<? super K, ? super Object, VOut>) aggregator); return this; }
@Test public void shouldThrowNPEInCogroupIfKGroupedStreamIsNull() { assertThrows(NullPointerException.class, () -> cogroupedStream.cogroup(null, MockAggregator.TOSTRING_ADDER)); }
public double pairingThreshold() { /* * We use 7000 because this equals 7 seconds (in milliseconds). Radar hits are normally * updated every 13 seconds or less. Thus, we know any two aircraft will have radar hits * within 6.5 seconds of each other. 6500 is rounded up to 7000 because...why not. */ final double timeComponent = timeCoef() * 7_000; final double distComponent = distCoef() * trackPairingDistanceInNM() * Spherical.feetPerNM(); final double pairingThreshold = timeComponent + distComponent; return pairingThreshold; }
@Test public void testDerivedPairThresholdReflectsDistCoef() { double TOLERANCE = 0.0001; PairingConfig noDistProps = new PairingConfig(timeWindow, 10, 1, 0); assertEquals( noDistProps.pairingThreshold(), 7000.0, TOLERANCE ); }
public String findMsh18(byte[] hl7Message, Charset charset) { String answer = ""; if (hl7Message != null && hl7Message.length > 0) { List<Integer> fieldSeparatorIndexes = findFieldSeparatorIndicesInSegment(hl7Message, 0); if (fieldSeparatorIndexes.size() > 17) { int startOfMsh19 = fieldSeparatorIndexes.get(16) + 1; int length = fieldSeparatorIndexes.get(17) - fieldSeparatorIndexes.get(16) - 1; if (length > 0) { answer = new String(hl7Message, startOfMsh19, length, charset); } } } return answer; }
@Test public void testFindMsh18WhenExistsWithTrailingPipe() { final String testMessage = MSH_SEGMENT + "||||||8859/1|" + '\r' + REMAINING_SEGMENTS; assertEquals("8859/1", hl7util.findMsh18(testMessage.getBytes(), charset)); }
static ArgumentParser argParser() { ArgumentParser parser = ArgumentParsers .newArgumentParser("producer-performance") .defaultHelp(true) .description("This tool is used to verify the producer performance. To enable transactions, " + "you can specify a transaction id or set a transaction duration using --transaction-duration-ms. " + "There are three ways to specify the transaction id: set transaction.id=<id> via --producer-props, " + "set transaction.id=<id> in the config file via --producer.config, or use --transaction-id <id>."); MutuallyExclusiveGroup payloadOptions = parser .addMutuallyExclusiveGroup() .required(true) .description("either --record-size or --payload-file must be specified but not both."); parser.addArgument("--topic") .action(store()) .required(true) .type(String.class) .metavar("TOPIC") .help("produce messages to this topic"); parser.addArgument("--num-records") .action(store()) .required(true) .type(Long.class) .metavar("NUM-RECORDS") .dest("numRecords") .help("number of messages to produce"); payloadOptions.addArgument("--record-size") .action(store()) .required(false) .type(Integer.class) .metavar("RECORD-SIZE") .dest("recordSize") .help("message size in bytes. Note that you must provide exactly one of --record-size or --payload-file " + "or --payload-monotonic."); payloadOptions.addArgument("--payload-file") .action(store()) .required(false) .type(String.class) .metavar("PAYLOAD-FILE") .dest("payloadFile") .help("file to read the message payloads from. This works only for UTF-8 encoded text files. " + "Payloads will be read from this file and a payload will be randomly selected when sending messages. " + "Note that you must provide exactly one of --record-size or --payload-file or --payload-monotonic."); payloadOptions.addArgument("--payload-monotonic") .action(storeTrue()) .type(Boolean.class) .metavar("PAYLOAD-MONOTONIC") .dest("payloadMonotonic") .help("payload is monotonically increasing integer. Note that you must provide exactly one of --record-size " + "or --payload-file or --payload-monotonic."); parser.addArgument("--payload-delimiter") .action(store()) .required(false) .type(String.class) .metavar("PAYLOAD-DELIMITER") .dest("payloadDelimiter") .setDefault("\\n") .help("provides delimiter to be used when --payload-file is provided. " + "Defaults to new line. " + "Note that this parameter will be ignored if --payload-file is not provided."); parser.addArgument("--throughput") .action(store()) .required(true) .type(Double.class) .metavar("THROUGHPUT") .help("throttle maximum message throughput to *approximately* THROUGHPUT messages/sec. Set this to -1 to disable throttling."); parser.addArgument("--producer-props") .nargs("+") .required(false) .metavar("PROP-NAME=PROP-VALUE") .type(String.class) .dest("producerConfig") .help("kafka producer related configuration properties like bootstrap.servers,client.id etc. " + "These configs take precedence over those passed via --producer.config."); parser.addArgument("--producer.config") .action(store()) .required(false) .type(String.class) .metavar("CONFIG-FILE") .dest("producerConfigFile") .help("producer config properties file."); parser.addArgument("--print-metrics") .action(storeTrue()) .type(Boolean.class) .metavar("PRINT-METRICS") .dest("printMetrics") .help("print out metrics at the end of the test."); parser.addArgument("--transactional-id") .action(store()) .required(false) .type(String.class) .metavar("TRANSACTIONAL-ID") .dest("transactionalId") .help("The transactional id to use. This config takes precedence over the transactional.id " + "specified via --producer.config or --producer-props. Note that if the transactional id " + "is not specified while --transaction-duration-ms is provided, the default value for the " + "transactional id will be performance-producer- followed by a random uuid."); parser.addArgument("--transaction-duration-ms") .action(store()) .required(false) .type(Long.class) .metavar("TRANSACTION-DURATION") .dest("transactionDurationMs") .help("The max age of each transaction. The commitTransaction will be called after this time has elapsed. " + "The value should be greater than 0. If the transactional id is specified via --producer-props, " + "--producer.config, or --transactional-id but --transaction-duration-ms is not specified, " + "the default value will be 3000."); return parser; }
@Test public void testConfigPostProcessor() throws IOException, ArgumentParserException { ArgumentParser parser = ProducerPerformance.argParser(); String[] args = new String[]{ "--topic", "Hello-Kafka", "--num-records", "5", "--throughput", "100", "--record-size", "100", "--print-metrics", "--producer-props", "bootstrap.servers=localhost:9000", "--transactional-id", "foobar", "--transaction-duration-ms", "5000", }; ProducerPerformance.ConfigPostProcessor configs = new ProducerPerformance.ConfigPostProcessor(parser, args); assertEquals("Hello-Kafka", configs.topicName); assertEquals(5, configs.numRecords); assertEquals(100, configs.throughput); assertEquals(100, configs.recordSize); assertFalse(configs.payloadMonotonic); assertTrue(configs.shouldPrintMetrics); assertTrue(configs.payloadByteList.isEmpty()); Properties props = configs.producerProps; assertEquals(5, props.size()); assertTrue(configs.transactionsEnabled); assertEquals(5000, configs.transactionDurationMs); }
@ApiOperation(value = "Get a single deadletter job", tags = { "Jobs" }) @ApiResponses(value = { @ApiResponse(code = 200, message = "Indicates the suspended job exists and is returned."), @ApiResponse(code = 404, message = "Indicates the requested job does not exist.") }) @GetMapping(value = "/cmmn-management/deadletter-jobs/{jobId}", produces = "application/json") public JobResponse getDeadletterJob(@ApiParam(name = "jobId") @PathVariable String jobId) { Job job = getDeadLetterJobById(jobId); return restResponseFactory.createDeadLetterJobResponse(job); }
@Test @CmmnDeployment(resources = { "org/flowable/cmmn/rest/service/api/management/timerEventListenerCase.cmmn" }) public void testGetDeadLetterJob() throws Exception { CaseInstance caseInstance = runtimeService.createCaseInstanceBuilder().caseDefinitionKey("testTimerExpression").start(); Job timerJob = managementService.createTimerJobQuery().caseInstanceId(caseInstance.getId()).singleResult(); assertThat(timerJob).isNotNull(); Job deadLetterJob = managementService.createDeadLetterJobQuery().caseInstanceId(caseInstance.getId()).singleResult(); assertThat(deadLetterJob).isNull(); managementService.moveJobToDeadLetterJob(timerJob.getId()); timerJob = managementService.createTimerJobQuery().caseInstanceId(caseInstance.getId()).singleResult(); assertThat(timerJob).isNull(); deadLetterJob = managementService.createDeadLetterJobQuery().caseInstanceId(caseInstance.getId()).singleResult(); assertThat(deadLetterJob).isNotNull(); CloseableHttpResponse response = executeRequest( new HttpGet(SERVER_URL_PREFIX + CmmnRestUrls.createRelativeResourceUrl(CmmnRestUrls.URL_DEADLETTER_JOB, deadLetterJob.getId())), HttpStatus.SC_OK); JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent()); closeResponse(response); assertThat(responseNode).isNotNull(); assertThatJson(responseNode) .when(Option.IGNORING_EXTRA_FIELDS) .isEqualTo("{" + "id: '" + deadLetterJob.getId() + "'," + "exceptionMessage: " + deadLetterJob.getExceptionMessage() + "," + "planItemInstanceId: '" + deadLetterJob.getSubScopeId() + "'," + "caseDefinitionId: '" + deadLetterJob.getScopeDefinitionId() + "'," + "caseInstanceId: '" + deadLetterJob.getScopeId() + "'," + "elementId: 'timerListener'," + "elementName: 'Timer listener'," + "handlerType: 'cmmn-trigger-timer'," + "retries: " + deadLetterJob.getRetries() + "," + "dueDate: " + new TextNode(getISODateStringWithTZ(deadLetterJob.getDuedate())) + "," + "tenantId: ''" + "}"); assertThat(responseNode.path("url").asText(null)) .endsWith(CmmnRestUrls.createRelativeResourceUrl(CmmnRestUrls.URL_DEADLETTER_JOB, deadLetterJob.getId())); }
public void runExtractor(Message msg) { try(final Timer.Context ignored = completeTimer.time()) { final String field; try (final Timer.Context ignored2 = conditionTimer.time()) { // We can only work on Strings. if (!(msg.getField(sourceField) instanceof String)) { conditionMissesCounter.inc(); return; } field = (String) msg.getField(sourceField); // Decide if to extract at all. if (conditionType.equals(ConditionType.STRING)) { if (field.contains(conditionValue)) { conditionHitsCounter.inc(); } else { conditionMissesCounter.inc(); return; } } else if (conditionType.equals(ConditionType.REGEX)) { if (regexConditionPattern.matcher(field).find()) { conditionHitsCounter.inc(); } else { conditionMissesCounter.inc(); return; } } } try (final Timer.Context ignored2 = executionTimer.time()) { Result[] results; try { results = run(field); } catch (ExtractorException e) { final String error = "Could not apply extractor <" + getTitle() + " (" + getId() + ")>"; msg.addProcessingError(new Message.ProcessingError( ProcessingFailureCause.ExtractorException, error, ExceptionUtils.getRootCauseMessage(e))); return; } if (results == null || results.length == 0 || Arrays.stream(results).anyMatch(result -> result.getValue() == null)) { return; } else if (results.length == 1 && results[0].target == null) { // results[0].target is null if this extractor cannot produce multiple fields use targetField in that case msg.addField(targetField, results[0].getValue()); } else { for (final Result result : results) { msg.addField(result.getTarget(), result.getValue()); } } // Remove original from message? if (cursorStrategy.equals(CursorStrategy.CUT) && !targetField.equals(sourceField) && !Message.RESERVED_FIELDS.contains(sourceField) && results[0].beginIndex != -1) { final StringBuilder sb = new StringBuilder(field); final List<Result> reverseList = Arrays.stream(results) .sorted(Comparator.<Result>comparingInt(result -> result.endIndex).reversed()) .collect(Collectors.toList()); // remove all from reverse so that the indices still match for (final Result result : reverseList) { sb.delete(result.getBeginIndex(), result.getEndIndex()); } final String builtString = sb.toString(); final String finalResult = builtString.trim().isEmpty() ? "fullyCutByExtractor" : builtString; msg.removeField(sourceField); // TODO don't add an empty field back, or rather don't add fullyCutByExtractor msg.addField(sourceField, finalResult); } runConverters(msg); } } }
@Test // Test for https://github.com/Graylog2/graylog2-server/issues/11495 // The Extractor returns a string that is not directly assignable to the timestamp field. // The converter however, will parse that string and everything is fine. public void testConvertersWithTimestamp() throws Exception { final Converter converter = new DateConverter(ImmutableMap.of( "date_format", "yyyy-MM-dd HH:mm:ss,SSS" )); final TestExtractor extractor = new TestExtractor.Builder() .targetField("timestamp") .converters(Collections.singletonList(converter)) .callback(() -> new Result[]{new Result("2021-10-20 09:05:39,892", -1, -1)}) .build(); final Message msg = createMessage("the message"); extractor.runExtractor(msg); assertThat(msg.getTimestamp()).isEqualTo(new DateTime(2021, 10, 20, 9, 5, 39, 892, UTC)); }
public static UriTemplate create(String template, Charset charset) { return new UriTemplate(template, true, charset); }
@Test void skipAlreadyEncodedVariable() { String template = "https://www.example.com/testing/{foo}"; UriTemplate uriTemplate = UriTemplate.create(template, Util.UTF_8); String encodedVariable = UriUtils.encode("Johnny Appleseed", Util.UTF_8); Map<String, Object> variables = new LinkedHashMap<>(); variables.put("foo", encodedVariable); assertThat(uriTemplate.expand(variables)) .isEqualToIgnoringCase("https://www.example.com/testing/" + encodedVariable); }
static String findParentHost(String nodeName) { if (expectedNodeName1.matcher(nodeName).matches()) { return replaceNodeName1.matcher(nodeName).replaceFirst("."); } if (expectedNodeName2.matcher(nodeName).matches()) { return replaceNodeName2.matcher(nodeName).replaceFirst("."); } return null; }
@Test void testFindParentHost() { String result; result = OpenTelemetryConfigGenerator.findParentHost("n1234c.foo.bar.some.cloud"); assertEquals("n1234.foo.bar.some.cloud", result); result = OpenTelemetryConfigGenerator.findParentHost("n1234-v6-7.foo.bar.some.cloud"); assertEquals("n1234.foo.bar.some.cloud", result); result = OpenTelemetryConfigGenerator.findParentHost("2000a.foo.bar.some.cloud"); assertEquals("2000.foo.bar.some.cloud", result); result = OpenTelemetryConfigGenerator.findParentHost("2000-v6-10.foo.bar.some.cloud"); assertEquals("2000.foo.bar.some.cloud", result); result = OpenTelemetryConfigGenerator.findParentHost("foobar.some.cloud"); assertNull(result); result = OpenTelemetryConfigGenerator.findParentHost("foo123bar.some.cloud"); assertNull(result); result = OpenTelemetryConfigGenerator.findParentHost("foo123.some.cloud"); assertNull(result); }
@Override public Optional<String> resolveQueryFailure(QueryStats controlQueryStats, QueryException queryException, Optional<QueryObjectBundle> test) { if (!test.isPresent()) { return Optional.empty(); } // Decouple from com.facebook.presto.hive.HiveErrorCode.HIVE_TOO_MANY_OPEN_PARTITIONS ErrorCodeSupplier errorCodeSupplier = new ErrorCodeSupplier() { @Override public ErrorCode toErrorCode() { int errorCodeMask = 0x0100_0000; return new ErrorCode(21 + errorCodeMask, "HIVE_TOO_MANY_OPEN_PARTITIONS", ErrorType.USER_ERROR); } }; return mapMatchingPrestoException(queryException, TEST_MAIN, ImmutableSet.of(errorCodeSupplier), e -> { try { ShowCreate showCreate = new ShowCreate(TABLE, test.get().getObjectName()); String showCreateResult = getOnlyElement(prestoAction.execute(showCreate, DESCRIBE, resultSet -> Optional.of(resultSet.getString(1))).getResults()); CreateTable createTable = (CreateTable) sqlParser.createStatement(showCreateResult, ParsingOptions.builder().setDecimalLiteralTreatment(AS_DOUBLE).build()); List<Property> bucketCountProperty = createTable.getProperties().stream() .filter(property -> property.getName().getValue().equals("bucket_count")) .collect(toImmutableList()); if (bucketCountProperty.size() != 1) { return Optional.empty(); } long bucketCount = ((LongLiteral) getOnlyElement(bucketCountProperty).getValue()).getValue(); int testClusterSize = this.testClusterSizeSupplier.get(); if (testClusterSize * maxBucketPerWriter < bucketCount) { return Optional.of("Not enough workers on test cluster"); } return Optional.empty(); } catch (Throwable t) { log.warn(t, "Exception when resolving HIVE_TOO_MANY_OPEN_PARTITIONS"); return Optional.empty(); } }); }
@Test public void testUnresolvedSufficientWorker() { createTable.set(format("CREATE TABLE %s (x varchar, ds varchar) WITH (partitioned_by = ARRAY[\"ds\"], bucket_count = 100)", TABLE_NAME)); getFailureResolver().resolveQueryFailure(CONTROL_QUERY_STATS, HIVE_TOO_MANY_OPEN_PARTITIONS_EXCEPTION, Optional.of(TEST_BUNDLE)); assertFalse(getFailureResolver().resolveQueryFailure(CONTROL_QUERY_STATS, HIVE_TOO_MANY_OPEN_PARTITIONS_EXCEPTION, Optional.of(TEST_BUNDLE)).isPresent()); }
@Override public Job save(Job jobToSave) { try (final Connection conn = dataSource.getConnection(); final Transaction transaction = new Transaction(conn)) { final Job savedJob = jobTable(conn).save(jobToSave); transaction.commit(); notifyJobStatsOnChangeListeners(); return savedJob; } catch (SQLException e) { throw new StorageException(e); } }
@Test void saveJobs_NotAllJobsAreSavedThenThrowConcurrentSqlModificationException() throws SQLException { when(preparedStatement.executeBatch()).thenReturn(new int[]{}); assertThatThrownBy(() -> jobStorageProvider.save(singletonList(anEnqueuedJob().build()))).isInstanceOf(JobRunrException.class); }
@Override public WindowStoreIterator<V> backwardFetch(final K key, final Instant timeFrom, final Instant timeTo) throws IllegalArgumentException { Objects.requireNonNull(key, "key can't be null"); final List<ReadOnlyWindowStore<K, V>> stores = provider.stores(storeName, windowStoreType); for (final ReadOnlyWindowStore<K, V> windowStore : stores) { try { final WindowStoreIterator<V> result = windowStore.backwardFetch(key, timeFrom, timeTo); if (!result.hasNext()) { result.close(); } else { return result; } } catch (final InvalidStateStoreException e) { throw new InvalidStateStoreException( "State store is not available anymore and may have been migrated to another instance; " + "please re-discover its location from the state metadata."); } } return KeyValueIterators.emptyWindowStoreIterator(); }
@Test public void shouldNotGetValuesBackwardFromOtherStores() { otherUnderlyingStore.put("some-key", "some-value", 0L); underlyingWindowStore.put("some-key", "my-value", 1L); final List<KeyValue<Long, String>> results = StreamsTestUtils.toList(windowStore.backwardFetch("some-key", ofEpochMilli(0L), ofEpochMilli(2L))); assertEquals(Collections.singletonList(new KeyValue<>(1L, "my-value")), results); }
@Override public void create(String namespaceId, String serviceName, ServiceMetadata metadata) throws NacosException { Service service = getServiceFromGroupedServiceName(namespaceId, serviceName, metadata.isEphemeral()); create(service, metadata); }
@Test void testCreate() throws NacosException { serviceOperatorV2.create("A", "B", new ServiceMetadata()); Mockito.verify(metadataOperateService).updateServiceMetadata(Mockito.any(), Mockito.any()); }
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { final Map<Path, List<ObjectKeyAndVersion>> map = new HashMap<>(); final List<Path> containers = new ArrayList<>(); for(Path file : files.keySet()) { if(containerService.isContainer(file)) { containers.add(file); continue; } callback.delete(file); final Path bucket = containerService.getContainer(file); if(file.getType().contains(Path.Type.upload)) { // In-progress multipart upload try { multipartService.delete(new MultipartUpload(file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))); } catch(NotfoundException ignored) { log.warn(String.format("Ignore failure deleting multipart upload %s", file)); } } else { final List<ObjectKeyAndVersion> keys = new ArrayList<>(); // Always returning 204 even if the key does not exist. Does not return 404 for non-existing keys keys.add(new ObjectKeyAndVersion(containerService.getKey(file), file.attributes().getVersionId())); if(map.containsKey(bucket)) { map.get(bucket).addAll(keys); } else { map.put(bucket, keys); } } } // Iterate over all containers and delete list of keys for(Map.Entry<Path, List<ObjectKeyAndVersion>> entry : map.entrySet()) { final Path container = entry.getKey(); final List<ObjectKeyAndVersion> keys = entry.getValue(); this.delete(container, keys, prompt); } for(Path file : containers) { callback.delete(file); // Finally delete bucket itself try { final String bucket = containerService.getContainer(file).getName(); session.getClient().deleteBucket(bucket); session.getClient().getRegionEndpointCache().removeRegionForBucketName(bucket); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Cannot delete {0}", e, file); } } }
@Test public void testDeletePlaceholder() throws Exception { final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final S3AccessControlListFeature acl = new S3AccessControlListFeature(session); final Path test = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir( new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); assertTrue(new S3FindFeature(session, acl).find(test)); assertTrue(new DefaultFindFeature(session).find(test)); new S3MultipleDeleteFeature(session, acl).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(new S3FindFeature(session, acl).find(test)); }
public static ObjectNode convertFromGHResponse(GHResponse ghResponse, TranslationMap translationMap, Locale locale, DistanceConfig distanceConfig) { ObjectNode json = JsonNodeFactory.instance.objectNode(); if (ghResponse.hasErrors()) throw new IllegalStateException( "If the response has errors, you should use the method NavigateResponseConverter#convertFromGHResponseError"); PointList waypoints = ghResponse.getBest().getWaypoints(); final ArrayNode routesJson = json.putArray("routes"); List<ResponsePath> paths = ghResponse.getAll(); for (int i = 0; i < paths.size(); i++) { ResponsePath path = paths.get(i); ObjectNode pathJson = routesJson.addObject(); putRouteInformation(pathJson, path, i, translationMap, locale, distanceConfig); } final ArrayNode waypointsJson = json.putArray("waypoints"); for (int i = 0; i < waypoints.size(); i++) { ObjectNode waypointJson = waypointsJson.addObject(); // TODO get names waypointJson.put("name", ""); putLocation(waypoints.getLat(i), waypoints.getLon(i), waypointJson); } json.put("code", "Ok"); // TODO: Maybe we need a different format... uuid: "cji4ja4f8004o6xrsta8w4p4h" json.put("uuid", UUID.randomUUID().toString().replaceAll("-", "")); return json; }
@Test public void arriveGeometryTest() { GHResponse rsp = hopper.route(new GHRequest(42.554851, 1.536198, 42.510071, 1.548128).setProfile(profile)); ObjectNode json = NavigateResponseConverter.convertFromGHResponse(rsp, trMap, Locale.ENGLISH, distanceConfig); JsonNode steps = json.get("routes").get(0).get("legs").get(0).get("steps"); // Step 17 is the last before arrival JsonNode step = steps.get(17); PointList expectedArrivePointList = rsp.getBest().getInstructions().get(17).getPoints().clone(false); PointList ghArrive = rsp.getBest().getInstructions().get(18).getPoints(); // We expect that the Mapbox compatible response builds the geometry to the // arrival coordinate expectedArrivePointList.add(ghArrive); String encodedExpected = ResponsePathSerializer.encodePolyline(expectedArrivePointList, false, 1e6); assertEquals(encodedExpected, step.get("geometry").asText()); }
public static ImmutableList<HttpRequest> fuzzGetParametersExpectingPathValues( HttpRequest request, String payload) { return fuzzGetParameters( request, payload, Optional.empty(), ImmutableSet.of(FuzzingModifier.FUZZING_PATHS)); }
@Test public void fuzzGetParametersExpectingPathValues_whenGetParameterValueHasFileExtension_appendsFileExtensionToPayload() { HttpRequest requestWithFileExtension = HttpRequest.get("https://google.com?key=value.jpg").withEmptyHeaders().build(); HttpRequest requestWithFuzzedGetParameterWithFileExtension = HttpRequest.get("https://google.com?key=<payload>%00.jpg").withEmptyHeaders().build(); assertThat( FuzzingUtils.fuzzGetParametersExpectingPathValues( requestWithFileExtension, "<payload>")) .contains(requestWithFuzzedGetParameterWithFileExtension); }
@Override public void storeRowInCache( DatabaseLookupMeta meta, RowMetaInterface lookupMeta, Object[] lookupRow, Object[] add ) { throw new UnsupportedOperationException( "This cache is read-only" ); }
@Test( expected = UnsupportedOperationException.class ) public void storeRowInCache_ThrowsException() throws Exception { buildCache( "" ).storeRowInCache( new DatabaseLookupMeta(), keysMeta.clone(), keys[ 0 ], data[ 0 ] ); }
@Override public boolean isLeader() { return isLeader; }
@Test void handlesConsistentFailure() { Lock lock = mock(Lock.class); when(lockService.lock(any(), isNull())) .thenReturn(Optional.of(lock)) .thenThrow(new RuntimeException("ouch")); leaderElectionService.startAsync().awaitRunning(); verify(eventBus, timeout(10_000).times(2)).post(any(LeaderChangedEvent.class)); assertThat(leaderElectionService.isLeader()).isFalse(); }
@Override public int hashCode() { int result = super.hashCode(); result = 31 * result + (int) (uniqueId ^ (uniqueId >>> 32)); return result; }
@Test public void testHashCode() { assertEquals(notifyKey.hashCode(), notifyKey.hashCode()); assertEquals(notifyKey.hashCode(), notifyKeySameAttributes.hashCode()); assumeDifferentHashCodes(); assertNotEquals(notifyKey.hashCode(), notifyKeyOtherUniqueId.hashCode()); assertNotEquals(notifyKey.hashCode(), notifyKeyOtherName.hashCode()); }
public CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> commitOffset( RequestContext context, OffsetCommitRequestData request ) throws ApiException { Group group = validateOffsetCommit(context, request); // In the old consumer group protocol, the offset commits maintain the session if // the group is in Stable or PreparingRebalance state. if (group.type() == Group.GroupType.CLASSIC) { ClassicGroup classicGroup = (ClassicGroup) group; if (classicGroup.isInState(ClassicGroupState.STABLE) || classicGroup.isInState(ClassicGroupState.PREPARING_REBALANCE)) { groupMetadataManager.rescheduleClassicGroupMemberHeartbeat( classicGroup, classicGroup.member(request.memberId()) ); } } final OffsetCommitResponseData response = new OffsetCommitResponseData(); final List<CoordinatorRecord> records = new ArrayList<>(); final long currentTimeMs = time.milliseconds(); final OptionalLong expireTimestampMs = expireTimestampMs(request.retentionTimeMs(), currentTimeMs); request.topics().forEach(topic -> { final OffsetCommitResponseTopic topicResponse = new OffsetCommitResponseTopic().setName(topic.name()); response.topics().add(topicResponse); topic.partitions().forEach(partition -> { if (isMetadataInvalid(partition.committedMetadata())) { topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code())); } else { log.debug("[GroupId {}] Committing offsets {} for partition {}-{} from member {} with leader epoch {}.", request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(), request.memberId(), partition.committedLeaderEpoch()); topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.NONE.code())); final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest( partition, currentTimeMs, expireTimestampMs ); records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord( request.groupId(), topic.name(), partition.partitionIndex(), offsetAndMetadata, metadataImage.features().metadataVersion() )); } }); }); if (!records.isEmpty()) { metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size()); } return new CoordinatorResult<>(records, response); }
@Test public void testGenericGroupOffsetCommitMaintainsSession() { OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build(); // Create a group. ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup( "foo", true ); // Add member. ClassicGroupMember member = mkGenericMember("member", Optional.empty()); group.add(member); // Transition to next generation. group.transitionTo(ClassicGroupState.PREPARING_REBALANCE); group.initNextGeneration(); assertEquals(1, group.generationId()); group.transitionTo(ClassicGroupState.STABLE); // Schedule session timeout. This would be normally done when // the group transitions to stable. context.groupMetadataManager.rescheduleClassicGroupMemberHeartbeat(group, member); // Advance time by half of the session timeout. No timeouts are // expired. assertEquals(Collections.emptyList(), context.sleep(5000 / 2)); // Commit. context.commitOffset( new OffsetCommitRequestData() .setGroupId("foo") .setMemberId("member") .setGenerationIdOrMemberEpoch(1) .setRetentionTimeMs(1234L) .setTopics(Collections.singletonList( new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("bar") .setPartitions(Collections.singletonList( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(100L) )) )) ); // Advance time by half of the session timeout. No timeouts are // expired. assertEquals(Collections.emptyList(), context.sleep(5000 / 2)); // Advance time by half of the session timeout again. The timeout should // expire and the member is removed from the group. List<MockCoordinatorTimer.ExpiredTimeout<Void, CoordinatorRecord>> timeouts = context.sleep(5000 / 2); assertEquals(1, timeouts.size()); assertFalse(group.hasMember(member.memberId())); }
public static <T extends PipelineOptions> T as(Class<T> klass) { return new Builder().as(klass); }
@Test public void testGettersAnnotatedWithConsistentDefault() throws Exception { GetterWithConsistentDefault options = PipelineOptionsFactory.as(GetterWithDefault.class).as(GetterWithConsistentDefault.class); assertEquals(1, options.getObject()); }
@Override public void preCommit(TransactionState txnState, List<TabletCommitInfo> finishedTablets, List<TabletFailInfo> failedTablets) throws TransactionException { Preconditions.checkState(txnState.getTransactionStatus() != TransactionStatus.COMMITTED); txnState.clearAutomaticPartitionSnapshot(); if (!finishedTablets.isEmpty()) { txnState.setTabletCommitInfos(finishedTablets); } if (table.getState() == OlapTable.OlapTableState.RESTORE) { throw new TransactionCommitFailedException("Cannot write RESTORE state table \"" + table.getName() + "\""); } dirtyPartitionSet = Sets.newHashSet(); invalidDictCacheColumns = Sets.newHashSet(); validDictCacheColumns = Maps.newHashMap(); Set<Long> finishedTabletsOfThisTable = Sets.newHashSet(); TabletInvertedIndex tabletInvertedIndex = dbTxnMgr.getGlobalStateMgr().getTabletInvertedIndex(); List<Long> tabletIds = finishedTablets.stream().map(TabletCommitInfo::getTabletId).collect(Collectors.toList()); List<TabletMeta> tabletMetaList = tabletInvertedIndex.getTabletMetaList(tabletIds); for (int i = 0; i < tabletMetaList.size(); i++) { TabletMeta tabletMeta = tabletMetaList.get(i); if (tabletMeta == TabletInvertedIndex.NOT_EXIST_TABLET_META) { continue; } if (tabletMeta.getTableId() != table.getId()) { continue; } if (table.getPhysicalPartition(tabletMeta.getPartitionId()) == null) { // this can happen when partitionId == -1 (tablet being dropping) or partition really not exist. continue; } dirtyPartitionSet.add(tabletMeta.getPartitionId()); // Invalid column set should union invalidDictCacheColumns.addAll(finishedTablets.get(i).getInvalidDictCacheColumns()); // Valid column set should intersect and remove all invalid columns // Only need to add valid column set once if (validDictCacheColumns.isEmpty() && !finishedTablets.get(i).getValidDictCacheColumns().isEmpty()) { TabletCommitInfo tabletCommitInfo = finishedTablets.get(i); List<Long> validDictCollectedVersions = tabletCommitInfo.getValidDictCollectedVersions(); List<ColumnId> validDictCacheColumns = tabletCommitInfo.getValidDictCacheColumns(); for (int j = 0; j < validDictCacheColumns.size(); j++) { long version = 0; // validDictCollectedVersions != validDictCacheColumns means be has not upgrade if (validDictCollectedVersions.size() == validDictCacheColumns.size()) { version = validDictCollectedVersions.get(j); } this.validDictCacheColumns.put(validDictCacheColumns.get(i), version); } } if (i == tabletMetaList.size() - 1) { validDictCacheColumns.entrySet().removeIf(entry -> invalidDictCacheColumns.contains(entry.getKey())); } finishedTabletsOfThisTable.add(finishedTablets.get(i).getTabletId()); } if (enableIngestSlowdown()) { long currentTimeMs = System.currentTimeMillis(); new CommitRateLimiter(compactionMgr, txnState, table.getId()).check(dirtyPartitionSet, currentTimeMs); } List<Long> unfinishedTablets = null; for (Long partitionId : dirtyPartitionSet) { PhysicalPartition partition = table.getPhysicalPartition(partitionId); List<MaterializedIndex> allIndices = txnState.getPartitionLoadedTblIndexes(table.getId(), partition); for (MaterializedIndex index : allIndices) { Optional<Tablet> unfinishedTablet = index.getTablets().stream().filter(t -> !finishedTabletsOfThisTable.contains(t.getId())) .findAny(); if (!unfinishedTablet.isPresent()) { continue; } if (unfinishedTablets == null) { unfinishedTablets = Lists.newArrayList(); } unfinishedTablets.add(unfinishedTablet.get().getId()); } } if (unfinishedTablets != null && !unfinishedTablets.isEmpty()) { throw new TransactionCommitFailedException( "table '" + table.getName() + "\" has unfinished tablets: " + unfinishedTablets); } }
@Test public void testCommitRateExceeded() { new MockUp<LakeTableTxnStateListener>() { @Mock boolean enableIngestSlowdown() { return true; } }; LakeTable table = buildLakeTable(); DatabaseTransactionMgr databaseTransactionMgr = addDatabaseTransactionMgr(); LakeTableTxnStateListener listener = new LakeTableTxnStateListener(databaseTransactionMgr, table); makeCompactionScoreExceedSlowdownThreshold(); Assert.assertThrows(CommitRateExceededException.class, () -> { listener.preCommit(newTransactionState(), buildFullTabletCommitInfo(), Collections.emptyList()); }); }
@SuppressWarnings("unchecked") @Override public OUT[] extract(Object in) { OUT[] output = (OUT[]) Array.newInstance(clazz, order.length); for (int i = 0; i < order.length; i++) { output[i] = (OUT) Array.get(in, this.order[i]); } return output; }
@Test void testIntegerArray() { // check single field extraction for (int i = 0; i < testIntegerArray.length; i++) { Integer[] tmp = {testIntegerArray[i]}; arrayEqualityCheck( tmp, new FieldsFromArray<>(Integer.class, i).extract(testIntegerArray)); } // check reverse order Integer[] reverseOrder = new Integer[testIntegerArray.length]; for (int i = 0; i < testIntegerArray.length; i++) { reverseOrder[i] = testIntegerArray[testIntegerArray.length - i - 1]; } arrayEqualityCheck( reverseOrder, new FieldsFromArray<>(Integer.class, 4, 3, 2, 1, 0).extract(testIntegerArray)); // check picking fields and reorder Integer[] crazyOrder = {testIntegerArray[4], testIntegerArray[1], testIntegerArray[2]}; arrayEqualityCheck( crazyOrder, new FieldsFromArray<>(Integer.class, 4, 1, 2).extract(testIntegerArray)); }
@Override public void registerService(String serviceName, String groupName, Instance instance) throws NacosException { NAMING_LOGGER.info("[REGISTER-SERVICE] {} registering service {} with instance {}", namespaceId, serviceName, instance); if (instance.isEphemeral()) { registerServiceForEphemeral(serviceName, groupName, instance); } else { doRegisterServiceForPersistent(serviceName, groupName, instance); } }
@Test void testRegisterService() throws NacosException { client.registerService(SERVICE_NAME, GROUP_NAME, instance); verify(this.rpcClient, times(1)).request(argThat(request -> { if (request instanceof InstanceRequest) { InstanceRequest request1 = (InstanceRequest) request; return request1.getType().equals(NamingRemoteConstants.REGISTER_INSTANCE); } return false; })); }
public void validate(ByteBuffer[] inputs, int[] erasedIndexes, ByteBuffer[] outputs) throws IOException { markBuffers(outputs); try { ByteBuffer validInput = CoderUtil.findFirstValidInput(inputs); boolean isDirect = validInput.isDirect(); int capacity = validInput.capacity(); int remaining = validInput.remaining(); // Init buffer if (buffer == null || buffer.isDirect() != isDirect || buffer.capacity() < remaining) { buffer = allocateBuffer(isDirect, capacity); } buffer.clear().limit(remaining); // Create newInputs and newErasedIndex for validation ByteBuffer[] newInputs = new ByteBuffer[inputs.length]; int count = 0; for (int i = 0; i < erasedIndexes.length; i++) { newInputs[erasedIndexes[i]] = outputs[i]; count++; } newErasedIndex = -1; boolean selected = false; int numValidIndexes = CoderUtil.getValidIndexes(inputs).length; for (int i = 0; i < newInputs.length; i++) { if (count == numValidIndexes) { break; } else if (!selected && inputs[i] != null) { newErasedIndex = i; newInputs[i] = null; selected = true; } else if (newInputs[i] == null) { newInputs[i] = inputs[i]; if (inputs[i] != null) { count++; } } } // Keep it for testing newValidIndexes = CoderUtil.getValidIndexes(newInputs); decoder.decode(newInputs, new int[]{newErasedIndex}, new ByteBuffer[]{buffer}); if (!buffer.equals(inputs[newErasedIndex])) { throw new InvalidDecodingException("Failed to validate decoding"); } } finally { toLimits(inputs); resetBuffers(outputs); } }
@Test public void testValidate() { prepare(null, numDataUnits, numParityUnits, erasedDataIndexes, erasedParityIndexes); testValidate(true); testValidate(false); }
public static void validate(String topic) { validate(topic, "Topic name", message -> { throw new InvalidTopicException(message); }); }
@Test public void shouldAcceptValidTopicNames() { String maxLengthString = TestUtils.randomString(249); String[] validTopicNames = {"valid", "TOPIC", "nAmEs", "ar6", "VaL1d", "_0-9_.", "...", maxLengthString}; for (String topicName : validTopicNames) { Topic.validate(topicName); } }
public Map<String, String> workerConfig(SourceAndTarget sourceAndTarget) { Map<String, String> props = new HashMap<>(); props.putAll(clusterProps(sourceAndTarget.target())); // Accept common top-level configs that are otherwise ignored by MM2. // N.B. all other worker properties should be configured for specific herders, // e.g. primary->backup.client.id props.putAll(stringsWithPrefix("offset.storage")); props.putAll(stringsWithPrefix("config.storage")); props.putAll(stringsWithPrefix("status.storage")); props.putAll(stringsWithPrefix("key.converter")); props.putAll(stringsWithPrefix("value.converter")); props.putAll(stringsWithPrefix("header.converter")); props.putAll(stringsWithPrefix("task")); props.putAll(stringsWithPrefix("worker")); props.putAll(stringsWithPrefix("replication.policy")); // transform any expression like ${provider:path:key}, since the worker doesn't do so props = transform(props); props.putAll(stringsWithPrefix(CONFIG_PROVIDERS_CONFIG)); // fill in reasonable defaults props.putIfAbsent(CommonClientConfigs.CLIENT_ID_CONFIG, sourceAndTarget.toString()); props.putIfAbsent(GROUP_ID_CONFIG, sourceAndTarget.source() + "-mm2"); props.putIfAbsent(DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG, "mm2-offsets." + sourceAndTarget.source() + ".internal"); props.putIfAbsent(DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG, "mm2-status." + sourceAndTarget.source() + ".internal"); props.putIfAbsent(DistributedConfig.CONFIG_TOPIC_CONFIG, "mm2-configs." + sourceAndTarget.source() + ".internal"); props.putIfAbsent(KEY_CONVERTER_CLASS_CONFIG, BYTE_ARRAY_CONVERTER_CLASS); props.putIfAbsent(VALUE_CONVERTER_CLASS_CONFIG, BYTE_ARRAY_CONVERTER_CLASS); props.putIfAbsent(HEADER_CONVERTER_CLASS_CONFIG, BYTE_ARRAY_CONVERTER_CLASS); return props; }
@Test public void testWorkerConfigs() { MirrorMakerConfig mirrorConfig = new MirrorMakerConfig(makeProps( "clusters", "a, b", "config.providers", "fake", "config.providers.fake.class", FakeConfigProvider.class.getName(), "replication.policy.separator", "__", "offset.storage.replication.factor", "123", "b.status.storage.replication.factor", "456", "b.producer.client.id", "client-one", "b.security.protocol", "PLAINTEXT", "b.producer.security.protocol", "SASL", "ssl.truststore.password", "secret1", "ssl.key.password", "${fake:secret:password}", // resolves to "secret2" "b.xxx", "yyy")); SourceAndTarget a = new SourceAndTarget("b", "a"); SourceAndTarget b = new SourceAndTarget("a", "b"); Map<String, String> aProps = mirrorConfig.workerConfig(a); assertEquals("b->a", aProps.get("client.id")); assertEquals("123", aProps.get("offset.storage.replication.factor")); assertEquals("__", aProps.get("replication.policy.separator")); assertEquals("fake", aProps.get("config.providers")); Map<String, String> bProps = mirrorConfig.workerConfig(b); assertEquals("a->b", bProps.get("client.id")); assertEquals("456", bProps.get("status.storage.replication.factor")); assertEquals("client-one", bProps.get("producer.client.id"), "producer props should be passed through to worker producer config: " + bProps); assertEquals("SASL", bProps.get("producer.security.protocol"), "replication-level security props should be passed through to worker producer config"); assertEquals("SASL", bProps.get("producer.security.protocol"), "replication-level security props should be passed through to worker producer config"); assertEquals("PLAINTEXT", bProps.get("consumer.security.protocol"), "replication-level security props should be passed through to worker consumer config"); assertEquals("secret1", bProps.get("ssl.truststore.password"), "security properties should be passed through to worker config: " + bProps); assertEquals("secret1", bProps.get("producer.ssl.truststore.password"), "security properties should be passed through to worker producer config: " + bProps); assertEquals("secret2", bProps.get("ssl.key.password"), "security properties should be transformed in worker config"); assertEquals("secret2", bProps.get("producer.ssl.key.password"), "security properties should be transformed in worker producer config"); assertEquals("__", bProps.get("replication.policy.separator")); }
@Override public Statistics estimateStatistics() { return estimateStatistics(SnapshotUtil.latestSnapshot(table, branch)); }
@TestTemplate public void testEstimatedRowCount() throws NoSuchTableException { sql( "CREATE TABLE %s (id BIGINT, date DATE) USING iceberg TBLPROPERTIES('%s' = '%s')", tableName, TableProperties.DEFAULT_FILE_FORMAT, format); Dataset<Row> df = spark .range(10000) .withColumn("date", date_add(expr("DATE '1970-01-01'"), expr("CAST(id AS INT)"))) .select("id", "date"); df.coalesce(1).writeTo(tableName).append(); Table table = validationCatalog.loadTable(tableIdent); SparkScanBuilder scanBuilder = new SparkScanBuilder(spark, table, CaseInsensitiveStringMap.empty()); SparkScan scan = (SparkScan) scanBuilder.build(); Statistics stats = scan.estimateStatistics(); assertThat(stats.numRows().getAsLong()).isEqualTo(10000L); }
@Override public void close() throws IOException { boolean triedToClose = false, success = false; try { flush(); ((FileOutputStream)out).getChannel().force(true); triedToClose = true; super.close(); success = true; } finally { if (success) { boolean renamed = tmpFile.renameTo(origFile); if (!renamed) { // On windows, renameTo does not replace. if (origFile.exists()) { try { Files.delete(origFile.toPath()); } catch (IOException e) { throw new IOException("Could not delete original file " + origFile, e); } } try { NativeIO.renameTo(tmpFile, origFile); } catch (NativeIOException e) { throw new IOException("Could not rename temporary file " + tmpFile + " to " + origFile + " due to failure in native rename. " + e.toString()); } } } else { if (!triedToClose) { // If we failed when flushing, try to close it to not leak an FD IOUtils.closeStream(out); } // close wasn't successful, try to delete the tmp file if (!tmpFile.delete()) { LOG.warn("Unable to delete tmp file " + tmpFile); } } } }
@Test public void testWriteNewFile() throws IOException { OutputStream fos = new AtomicFileOutputStream(DST_FILE); assertFalse(DST_FILE.exists()); fos.write(TEST_STRING.getBytes()); fos.flush(); assertFalse(DST_FILE.exists()); fos.close(); assertTrue(DST_FILE.exists()); String readBackData = DFSTestUtil.readFile(DST_FILE); assertEquals(TEST_STRING, readBackData); }
@GET @Path("{path:.*}") @Produces({MediaType.APPLICATION_OCTET_STREAM + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8}) public Response get(@PathParam("path") String path, @Context UriInfo uriInfo, @QueryParam(OperationParam.NAME) OperationParam op, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException { // Restrict access to only GETFILESTATUS and LISTSTATUS in write-only mode if((op.value() != HttpFSFileSystem.Operation.GETFILESTATUS) && (op.value() != HttpFSFileSystem.Operation.LISTSTATUS) && accessMode == AccessMode.WRITEONLY) { return Response.status(Response.Status.FORBIDDEN).build(); } UserGroupInformation user = HttpUserGroupInformation.get(); Response response; path = makeAbsolute(path); MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name()); MDC.put("hostname", request.getRemoteAddr()); switch (op.value()) { case OPEN: { Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { //Invoking the command directly using an unmanaged FileSystem that is // released by the FileSystemReleaseFilter final FSOperations.FSOpen command = new FSOperations.FSOpen(path); final FileSystem fs = createFileSystem(user); InputStream is = null; UserGroupInformation ugi = UserGroupInformation .createProxyUser(user.getShortUserName(), UserGroupInformation.getLoginUser()); try { is = ugi.doAs(new PrivilegedExceptionAction<InputStream>() { @Override public InputStream run() throws Exception { return command.execute(fs); } }); } catch (InterruptedException ie) { LOG.warn("Open interrupted.", ie); Thread.currentThread().interrupt(); } Long offset = params.get(OffsetParam.NAME, OffsetParam.class); Long len = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[] { path, offset, len }); InputStreamEntity entity = new InputStreamEntity(is, offset, len); response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM) .build(); } break; } case GETFILESTATUS: { FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS: { String filter = params.get(FilterParam.NAME, FilterParam.class); FSOperations.FSListStatus command = new FSOperations.FSListStatus(path, filter); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] filter [{}]", path, (filter != null) ? filter : "-"); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETHOMEDIRECTORY: { enforceRootPath(op.value(), path); FSOperations.FSHomeDir command = new FSOperations.FSHomeDir(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("Home Directory for [{}]", user); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case INSTRUMENTATION: { enforceRootPath(op.value(), path); Groups groups = HttpFSServerWebApp.get().get(Groups.class); Set<String> userGroups = groups.getGroupsSet(user.getShortUserName()); if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) { throw new AccessControlException( "User not in HttpFSServer admin group"); } Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class); Map snapshot = instrumentation.getSnapshot(); response = Response.ok(snapshot).build(); break; } case GETCONTENTSUMMARY: { FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Content summary for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETQUOTAUSAGE: { FSOperations.FSQuotaUsage command = new FSOperations.FSQuotaUsage(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Quota Usage for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILECHECKSUM: { FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path); Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); AUDIT_LOG.info("[{}]", path); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { Map json = fsExecute(user, command); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); } break; } case GETFILEBLOCKLOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocations command = new FSOperations.FSFileBlockLocations(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("BlockLocations", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETACLSTATUS: { FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("ACL status for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETXATTRS: { List<String> xattrNames = params.getValues(XAttrNameParam.NAME, XAttrNameParam.class); XAttrCodec encoding = params.get(XAttrEncodingParam.NAME, XAttrEncodingParam.class); FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, xattrNames, encoding); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttrs for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTXATTRS: { FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttr names for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS_BATCH: { String startAfter = params.get( HttpFSParametersProvider.StartAfterParam.NAME, HttpFSParametersProvider.StartAfterParam.class); byte[] token = HttpFSUtils.EMPTY_BYTES; if (startAfter != null) { token = startAfter.getBytes(StandardCharsets.UTF_8); } FSOperations.FSListStatusBatch command = new FSOperations .FSListStatusBatch(path, token); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] token [{}]", path, token); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOT: { FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETALLSTORAGEPOLICY: { FSOperations.FSGetAllStoragePolicies command = new FSOperations.FSGetAllStoragePolicies(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTORAGEPOLICY: { FSOperations.FSGetStoragePolicy command = new FSOperations.FSGetStoragePolicy(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFF: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); FSOperations.FSGetSnapshotDiff command = new FSOperations.FSGetSnapshotDiff(path, oldSnapshotName, snapshotName); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFFLISTING: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); String snapshotDiffStartPath = params .get(HttpFSParametersProvider.SnapshotDiffStartPathParam.NAME, HttpFSParametersProvider.SnapshotDiffStartPathParam.class); Integer snapshotDiffIndex = params.get(HttpFSParametersProvider.SnapshotDiffIndexParam.NAME, HttpFSParametersProvider.SnapshotDiffIndexParam.class); FSOperations.FSGetSnapshotDiffListing command = new FSOperations.FSGetSnapshotDiffListing(path, oldSnapshotName, snapshotName, snapshotDiffStartPath, snapshotDiffIndex); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTTABLEDIRECTORYLIST: { FSOperations.FSGetSnapshottableDirListing command = new FSOperations.FSGetSnapshottableDirListing(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTLIST: { FSOperations.FSGetSnapshotListing command = new FSOperations.FSGetSnapshotListing(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSERVERDEFAULTS: { FSOperations.FSGetServerDefaults command = new FSOperations.FSGetServerDefaults(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case CHECKACCESS: { String mode = params.get(FsActionParam.NAME, FsActionParam.class); FsActionParam fsparam = new FsActionParam(mode); FSOperations.FSAccess command = new FSOperations.FSAccess(path, FsAction.getFsAction(fsparam.value())); fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok().build(); break; } case GETECPOLICY: { FSOperations.FSGetErasureCodingPolicy command = new FSOperations.FSGetErasureCodingPolicy(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECPOLICIES: { FSOperations.FSGetErasureCodingPolicies command = new FSOperations.FSGetErasureCodingPolicies(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECCODECS: { FSOperations.FSGetErasureCodingCodecs command = new FSOperations.FSGetErasureCodingCodecs(); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GET_BLOCK_LOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocationsLegacy command = new FSOperations.FSFileBlockLocationsLegacy(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("LocatedBlocks", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILELINKSTATUS: { FSOperations.FSFileLinkStatus command = new FSOperations.FSFileLinkStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTATUS: { FSOperations.FSStatus command = new FSOperations.FSStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOTS: { Boolean allUsers = params.get(AllUsersParam.NAME, AllUsersParam.class); FSOperations.FSGetTrashRoots command = new FSOperations.FSGetTrashRoots(allUsers); Map json = fsExecute(user, command); AUDIT_LOG.info("allUsers [{}]", allUsers); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } default: { throw new IOException( MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value())); } } return response; }
@Test @TestDir @TestJetty @TestHdfs public void testMkdirWithUnmaskedPermissions() throws Exception { createHttpFSServer(false, false); FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); // Create a folder with a default acl default:user2:rw- fs.mkdirs(new Path("/tmp")); AclEntry acl = new org.apache.hadoop.fs.permission.AclEntry.Builder() .setType(AclEntryType.USER) .setScope(AclEntryScope.DEFAULT) .setName("user2") .setPermission(FsAction.READ_WRITE) .build(); fs.setAcl(new Path("/tmp"), new ArrayList<AclEntry>(Arrays.asList(acl))); String notUnmaskedDir = "/tmp/notUnmaskedDir"; String unmaskedDir = "/tmp/unmaskedDir"; // Create a file inside the folder. It should inherit the default acl // but the mask should affect the ACL permissions. The mask is controlled // by the group permissions, which are 0, and hence the mask will make // the effective permission of the inherited ACL be NONE. createDirWithHttp(notUnmaskedDir, "700", null); // Pull the relevant ACL from the FS object and check the mask has affected // its permissions. AclStatus aclStatus = fs.getAclStatus(new Path(notUnmaskedDir)); AclEntry theAcl = findAclWithName(aclStatus, "user2"); Assert.assertNotNull(theAcl); Assert.assertEquals(FsAction.NONE, aclStatus.getEffectivePermission(theAcl)); // Create another file, this time pass a mask of 777. Now the inherited // permissions should be as expected createDirWithHttp(unmaskedDir, "700", "777"); aclStatus = fs.getAclStatus(new Path(unmaskedDir)); theAcl = findAclWithName(aclStatus, "user2"); Assert.assertNotNull(theAcl); Assert.assertEquals(FsAction.READ_WRITE, aclStatus.getEffectivePermission(theAcl)); }
@Override protected Future<KafkaBridgeStatus> createOrUpdate(Reconciliation reconciliation, KafkaBridge assemblyResource) { KafkaBridgeStatus kafkaBridgeStatus = new KafkaBridgeStatus(); String namespace = reconciliation.namespace(); KafkaBridgeCluster bridge; try { bridge = KafkaBridgeCluster.fromCrd(reconciliation, assemblyResource, sharedEnvironmentProvider); } catch (Exception e) { LOGGER.warnCr(reconciliation, e); StatusUtils.setStatusConditionAndObservedGeneration(assemblyResource, kafkaBridgeStatus, e); return Future.failedFuture(new ReconciliationException(kafkaBridgeStatus, e)); } KafkaClientAuthentication auth = assemblyResource.getSpec().getAuthentication(); List<CertSecretSource> trustedCertificates = assemblyResource.getSpec().getTls() == null ? Collections.emptyList() : assemblyResource.getSpec().getTls().getTrustedCertificates(); Promise<KafkaBridgeStatus> createOrUpdatePromise = Promise.promise(); boolean bridgeHasZeroReplicas = bridge.getReplicas() == 0; String initCrbName = KafkaBridgeResources.initContainerClusterRoleBindingName(bridge.getCluster(), namespace); ClusterRoleBinding initCrb = bridge.generateClusterRoleBinding(); LOGGER.debugCr(reconciliation, "Updating Kafka Bridge cluster"); kafkaBridgeServiceAccount(reconciliation, namespace, bridge) .compose(i -> bridgeInitClusterRoleBinding(reconciliation, initCrbName, initCrb)) .compose(i -> deploymentOperations.scaleDown(reconciliation, namespace, bridge.getComponentName(), bridge.getReplicas(), operationTimeoutMs)) .compose(scale -> serviceOperations.reconcile(reconciliation, namespace, KafkaBridgeResources.serviceName(bridge.getCluster()), bridge.generateService())) .compose(i -> MetricsAndLoggingUtils.metricsAndLogging(reconciliation, configMapOperations, bridge.logging(), null)) .compose(metricsAndLogging -> configMapOperations.reconcile(reconciliation, namespace, KafkaBridgeResources.metricsAndLogConfigMapName(reconciliation.name()), bridge.generateMetricsAndLogConfigMap(metricsAndLogging))) .compose(i -> podDisruptionBudgetOperator.reconcile(reconciliation, namespace, bridge.getComponentName(), bridge.generatePodDisruptionBudget())) .compose(i -> VertxUtil.authTlsHash(secretOperations, namespace, auth, trustedCertificates)) .compose(hash -> deploymentOperations.reconcile(reconciliation, namespace, bridge.getComponentName(), bridge.generateDeployment(Collections.singletonMap(Annotations.ANNO_STRIMZI_AUTH_HASH, Integer.toString(hash)), pfa.isOpenshift(), imagePullPolicy, imagePullSecrets))) .compose(i -> deploymentOperations.scaleUp(reconciliation, namespace, bridge.getComponentName(), bridge.getReplicas(), operationTimeoutMs)) .compose(i -> deploymentOperations.waitForObserved(reconciliation, namespace, bridge.getComponentName(), 1_000, operationTimeoutMs)) .compose(i -> bridgeHasZeroReplicas ? Future.succeededFuture() : deploymentOperations.readiness(reconciliation, namespace, bridge.getComponentName(), 1_000, operationTimeoutMs)) .onComplete(reconciliationResult -> { StatusUtils.setStatusConditionAndObservedGeneration(assemblyResource, kafkaBridgeStatus, reconciliationResult.mapEmpty().cause()); if (!bridgeHasZeroReplicas) { int port = KafkaBridgeCluster.DEFAULT_REST_API_PORT; if (bridge.getHttp() != null) { port = bridge.getHttp().getPort(); } kafkaBridgeStatus.setUrl(KafkaBridgeResources.url(bridge.getCluster(), namespace, port)); } kafkaBridgeStatus.setReplicas(bridge.getReplicas()); kafkaBridgeStatus.setLabelSelector(bridge.getSelectorLabels().toSelectorString()); if (reconciliationResult.succeeded()) { createOrUpdatePromise.complete(kafkaBridgeStatus); } else { createOrUpdatePromise.fail(new ReconciliationException(kafkaBridgeStatus, reconciliationResult.cause())); } }); return createOrUpdatePromise.future(); }
@Test public void testCreateOrUpdateWithReplicasScaleUpToOne(VertxTestContext context) { final int scaleTo = 1; ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); var mockBridgeOps = supplier.kafkaBridgeOperator; DeploymentOperator mockDcOps = supplier.deploymentOperations; PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator; ConfigMapOperator mockCmOps = supplier.configMapOperations; ServiceOperator mockServiceOps = supplier.serviceOperations; String kbName = "foo"; String kbNamespace = "test"; KafkaBridge kb = ResourceUtils.createEmptyKafkaBridge(kbNamespace, kbName); kb.getSpec().setReplicas(0); KafkaBridgeCluster bridge = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kb, SHARED_ENV_PROVIDER); kb.getSpec().setReplicas(scaleTo); // Change replicas to create ScaleUp when(mockBridgeOps.get(kbNamespace, kbName)).thenReturn(kb); when(mockBridgeOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kb)); when(mockBridgeOps.updateStatusAsync(any(), any(KafkaBridge.class))).thenReturn(Future.succeededFuture()); when(mockServiceOps.get(kbNamespace, bridge.getComponentName())).thenReturn(bridge.generateService()); Deployment dep = bridge.generateDeployment(new HashMap<>(), true, null, null); when(mockDcOps.get(kbNamespace, bridge.getComponentName())).thenReturn(dep); when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(mockServiceOps.reconcile(any(), eq(kbNamespace), any(), any())).thenReturn(Future.succeededFuture()); when(mockDcOps.reconcile(any(), eq(kbNamespace), any(), any())).thenReturn(Future.succeededFuture()); doAnswer(i -> Future.succeededFuture(scaleTo)) .when(mockDcOps).scaleUp(any(), eq(kbNamespace), eq(bridge.getComponentName()), eq(scaleTo), anyLong()); doAnswer(i -> Future.succeededFuture(scaleTo)) .when(mockDcOps).scaleDown(any(), eq(kbNamespace), eq(bridge.getComponentName()), eq(scaleTo), anyLong()); when(mockBridgeOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaBridge()))); when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); KafkaBridgeAssemblyOperator ops = new KafkaBridgeAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), new MockCertManager(), new PasswordGenerator(10, "a", "a"), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS)); Checkpoint async = context.checkpoint(); ops.createOrUpdate(new Reconciliation("test-trigger", KafkaBridge.RESOURCE_KIND, kbNamespace, kbName), kb) .onComplete(context.succeeding(v -> context.verify(() -> { verify(mockDcOps).scaleUp(any(), eq(kbNamespace), eq(bridge.getComponentName()), eq(scaleTo), anyLong()); async.flag(); }))); }
@Override public void visit(final Entry target) { final String actionName = target.getName(); if (!actionName.isEmpty() && new EntryAccessor().getAction(target) == null) { AFreeplaneAction action = freeplaneActions.getAction(actionName); if(action == null) { for (final Class<? extends AFreeplaneAction> actionClass : Arrays.asList(SetBooleanPropertyAction.class, SetBooleanMapPropertyAction.class, SetBooleanMapViewPropertyAction.class, SetStringPropertyAction.class)){ final String actionPrefix = actionClass.getSimpleName() + "."; if (actionName.startsWith(actionPrefix)) { String propertyName = actionName.substring(actionPrefix.length()); action = createAction(actionClass, propertyName); if(action != null) { freeplaneActions.addAction(action); } break; } } } new EntryAccessor().setAction(target, action); } }
@Test public void attachesSetBooleanPropertyAction() { FreeplaneActions freeplaneActions = mock(FreeplaneActions.class); final SetBooleanPropertyAction setBooleanPropertyAction = Mockito.mock(SetBooleanPropertyAction.class); Entry entry = new Entry(); final String propertyActionName = "SetBooleanPropertyAction.property"; entry.setName(propertyActionName); when(freeplaneActions.getAction(propertyActionName)).thenReturn(null); final ActionFinder actionFinder = new ActionFinder(freeplaneActions){ @Override protected AFreeplaneAction createAction(Class<? extends AFreeplaneAction> actionClass, String propertyName) { return setBooleanPropertyAction; } }; actionFinder.visit(entry); Mockito.verify(freeplaneActions).addAction(setBooleanPropertyAction); assertThat(new EntryAccessor().getAction(entry), CoreMatchers.<Object> equalTo(setBooleanPropertyAction)); }
public static boolean allOf(Object collection, Object value) { if (collection == null) { throw new IllegalArgumentException("collection cannot be null"); } if (value == null) { throw new IllegalArgumentException("value cannot be null"); } // collection to check against Collection targetCollection = getTargetCollection(collection, value); // elements to check if (DMNParseUtil.isParseableCollection(value)) { Collection valueCollection = DMNParseUtil.parseCollection(value, targetCollection); return valueCollection != null && targetCollection.containsAll(valueCollection); } else if (DMNParseUtil.isJavaCollection(value)) { return targetCollection.containsAll((Collection) value); } else if (DMNParseUtil.isArrayNode(value)) { Collection valueCollection = DMNParseUtil.getCollectionFromArrayNode((ArrayNode) value); return valueCollection != null && targetCollection.containsAll(valueCollection); } else { Object formattedValue = DMNParseUtil.getFormattedValue(value, targetCollection); return targetCollection.contains(formattedValue); } }
@Test public void allOf() { assertThat(CollectionUtil.allOf(Arrays.asList("group1", "group2"), Arrays.asList("group3", "group4"))).isFalse(); assertThat(CollectionUtil.allOf(Arrays.asList("group1", "group2"), Arrays.asList("group1", "group2"))).isTrue(); assertThat(CollectionUtil.allOf(Arrays.asList("group1", "group2"), Arrays.asList("group2", "group3"))).isFalse(); assertThat(CollectionUtil.allOf(Arrays.asList("group1", "group2"), "group3")).isFalse(); assertThat(CollectionUtil.allOf(Arrays.asList("group1", "group2"), "group2")).isTrue(); assertThat(CollectionUtil.allOf("group1, group2", "group3, group4")).isFalse(); assertThat(CollectionUtil.allOf("group1, group2", "group1, group2")).isTrue(); assertThat(CollectionUtil.allOf("group1, group2", "group2, group3")).isFalse(); ObjectMapper mapper = new ObjectMapper(); assertThat(CollectionUtil.allOf(mapper.valueToTree(Arrays.asList("group1", "group2")), mapper.valueToTree(Arrays.asList("group3", "group4")))) .isFalse(); assertThat(CollectionUtil.allOf(mapper.valueToTree(Arrays.asList("group1", "group2")), mapper.valueToTree(Arrays.asList("group1", "group2")))).isTrue(); assertThat(CollectionUtil.allOf(mapper.valueToTree(Arrays.asList("group1", "group2")), mapper.valueToTree(Arrays.asList("group2", "group3")))) .isFalse(); }
public CreateStreamCommand createStreamCommand(final KsqlStructuredDataOutputNode outputNode) { return new CreateStreamCommand( outputNode.getSinkName().get(), outputNode.getSchema(), outputNode.getTimestampColumn(), outputNode.getKsqlTopic().getKafkaTopicName(), Formats.from(outputNode.getKsqlTopic()), outputNode.getKsqlTopic().getKeyFormat().getWindowInfo(), Optional.of(outputNode.getOrReplace()), Optional.of(false) ); }
@Test public void shouldCreateStreamCommandWithSingleValueWrappingFromDefaultConfig() { // Given: final CreateStream statement = new CreateStream(SOME_NAME, ONE_KEY_ONE_VALUE, false, true, withProperties, false); // When: final CreateStreamCommand cmd = createSourceFactory .createStreamCommand(statement, ksqlConfig); // Then: assertThat(cmd.getFormats().getValueFeatures(), is(SerdeFeatures.of())); }
static void verifyFixInvalidValues(final List<KiePMMLMiningField> notTargetMiningFields, final PMMLRequestData requestData) { logger.debug("verifyInvalidValues {} {}", notTargetMiningFields, requestData); final Collection<ParameterInfo> requestParams = requestData.getRequestParams(); final List<ParameterInfo> toRemove = new ArrayList<>(); notTargetMiningFields.forEach(miningField -> { ParameterInfo parameterInfo = requestParams.stream() .filter(paramInfo -> miningField.getName().equals(paramInfo.getName())) .findFirst() .orElse(null); if (parameterInfo != null) { boolean match = isMatching(parameterInfo, miningField); if (!match) { manageInvalidValues(miningField, parameterInfo, toRemove); } toRemove.forEach(requestData::removeRequestParam); } }); }
@Test void verifyFixInvalidValuesInvalidAsIs() { KiePMMLMiningField miningField0 = KiePMMLMiningField.builder("FIELD-0", null) .withDataType(DATA_TYPE.STRING) .withInvalidValueTreatmentMethod(INVALID_VALUE_TREATMENT_METHOD.AS_IS) .withInvalidValueReplacement("123") .withAllowedValues(Arrays.asList("123", "124", "125")) .build(); KiePMMLMiningField miningField1 = KiePMMLMiningField.builder("FIELD-1", null) .withDataType(DATA_TYPE.DOUBLE) .withInvalidValueTreatmentMethod(INVALID_VALUE_TREATMENT_METHOD.AS_IS) .withInvalidValueReplacement("1.23") .withAllowedValues(Arrays.asList("1.23", "12.4", "1.25")) .build(); List<KiePMMLInterval> intervals = Arrays.asList(new KiePMMLInterval(0.0, 12.4, CLOSURE.CLOSED_CLOSED), new KiePMMLInterval(12.6, 14.5, CLOSURE.OPEN_CLOSED)); KiePMMLMiningField miningField2 = KiePMMLMiningField.builder("FIELD-2", null) .withDataType(DATA_TYPE.DOUBLE) .withInvalidValueTreatmentMethod(INVALID_VALUE_TREATMENT_METHOD.AS_IS) .withInvalidValueReplacement("12.3") .withIntervals(intervals) .build(); List<KiePMMLMiningField> miningFields = Arrays.asList(miningField0, miningField1, miningField2); PMMLRequestData pmmlRequestData = new PMMLRequestData("123", "modelName"); pmmlRequestData.addRequestParam("FIELD-0", "122"); pmmlRequestData.addRequestParam("FIELD-1", 12.5); pmmlRequestData.addRequestParam("FIELD-2", 14.6); PreProcess.verifyFixInvalidValues(miningFields, pmmlRequestData); Map<String, ParameterInfo> mappedRequestParams = pmmlRequestData.getMappedRequestParams(); assertThat(mappedRequestParams.get("FIELD-0").getValue()).isEqualTo("122"); assertThat(mappedRequestParams.get("FIELD-1").getValue()).isEqualTo(12.5); assertThat(mappedRequestParams.get("FIELD-2").getValue()).isEqualTo(14.6); }
public List<String> searchTags(@Nullable String textQuery, int page, int size) { int maxPageSize = 100; int maxPage = 20; checkArgument(size <= maxPageSize, "Page size must be lower than or equals to " + maxPageSize); checkArgument(page > 0 && page <= maxPage, "Page must be between 0 and " + maxPage); if (size <= 0) { return emptyList(); } TermsAggregationBuilder tagFacet = AggregationBuilders.terms(FIELD_TAGS) .field(FIELD_TAGS) .size(size * page) .minDocCount(1) .order(BucketOrder.key(true)); if (textQuery != null) { tagFacet.includeExclude(new IncludeExclude(".*" + escapeSpecialRegexChars(textQuery) + ".*", null)); } SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder() .query(authorizationTypeSupport.createQueryFilter()) .fetchSource(false) .aggregation(tagFacet); SearchResponse response = client.search(EsClient.prepareSearch(TYPE_PROJECT_MEASURES.getMainType()) .source(searchSourceBuilder)); Terms aggregation = response.getAggregations().get(FIELD_TAGS); return aggregation.getBuckets().stream() .skip((page - 1) * size) .map(Bucket::getKeyAsString) .toList(); }
@Test public void fail_if_page_greater_than_20() { assertThatThrownBy(() -> underTest.searchTags("whatever", 21, 100)) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining("Page must be between 0 and 20"); }