focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public Option<Dataset<Row>> loadAsDataset(SparkSession spark, List<CloudObjectMetadata> cloudObjectMetadata, String fileFormat, Option<SchemaProvider> schemaProviderOption, int numPartitions) { if (LOG.isDebugEnabled()) { LOG.debug("Extracted distinct files " + cloudObjectMetadata.size() + " and some samples " + cloudObjectMetadata.stream().map(CloudObjectMetadata::getPath).limit(10).collect(Collectors.toList())); } if (isNullOrEmpty(cloudObjectMetadata)) { return Option.empty(); } DataFrameReader reader = spark.read().format(fileFormat); String datasourceOpts = getStringWithAltKeys(properties, CloudSourceConfig.SPARK_DATASOURCE_OPTIONS, true); if (schemaProviderOption.isPresent()) { Schema sourceSchema = schemaProviderOption.get().getSourceSchema(); if (sourceSchema != null && !sourceSchema.equals(InputBatch.NULL_SCHEMA)) { reader = reader.schema(AvroConversionUtils.convertAvroSchemaToStructType(sourceSchema)); } } if (StringUtils.isNullOrEmpty(datasourceOpts)) { // fall back to legacy config for BWC. TODO consolidate in HUDI-6020 datasourceOpts = getStringWithAltKeys(properties, S3EventsHoodieIncrSourceConfig.SPARK_DATASOURCE_OPTIONS, true); } if (StringUtils.nonEmpty(datasourceOpts)) { final ObjectMapper mapper = new ObjectMapper(); Map<String, String> sparkOptionsMap = null; try { sparkOptionsMap = mapper.readValue(datasourceOpts, Map.class); } catch (IOException e) { throw new HoodieException(String.format("Failed to parse sparkOptions: %s", datasourceOpts), e); } LOG.info(String.format("sparkOptions loaded: %s", sparkOptionsMap)); reader = reader.options(sparkOptionsMap); } List<String> paths = new ArrayList<>(); for (CloudObjectMetadata o : cloudObjectMetadata) { paths.add(o.getPath()); } boolean isCommaSeparatedPathFormat = properties.getBoolean(SPARK_DATASOURCE_READER_COMMA_SEPARATED_PATH_FORMAT.key(), false); Dataset<Row> dataset; if (isCommaSeparatedPathFormat) { dataset = reader.load(String.join(",", paths)); } else { dataset = reader.load(paths.toArray(new String[cloudObjectMetadata.size()])); } // add partition column from source path if configured if (containsConfigProperty(properties, PATH_BASED_PARTITION_FIELDS)) { String[] partitionKeysToAdd = getStringWithAltKeys(properties, PATH_BASED_PARTITION_FIELDS).split(","); // Add partition column for all path-based partition keys. If key is not present in path, the value will be null. for (String partitionKey : partitionKeysToAdd) { String partitionPathPattern = String.format("%s=", partitionKey); LOG.info(String.format("Adding column %s to dataset", partitionKey)); dataset = dataset.withColumn(partitionKey, split(split(input_file_name(), partitionPathPattern).getItem(1), "/").getItem(0)); } } dataset = coalesceOrRepartition(dataset, numPartitions); return Option.of(dataset); }
@Test public void emptyMetadataReturnsEmptyOption() { CloudObjectsSelectorCommon cloudObjectsSelectorCommon = new CloudObjectsSelectorCommon(new TypedProperties()); Option<Dataset<Row>> result = cloudObjectsSelectorCommon.loadAsDataset(sparkSession, Collections.emptyList(), "json", Option.empty(), 1); Assertions.assertFalse(result.isPresent()); }
@Override public AuthenticationToken authenticate(HttpServletRequest request, HttpServletResponse response) throws IOException, AuthenticationException { String authorization = request.getHeader(HttpConstants.AUTHORIZATION_HEADER); if (authorization != null) { for (Map.Entry<String, AuthenticationHandler> entry : schemeToAuthHandlerMapping.entrySet()) { if (AuthenticationHandlerUtil.matchAuthScheme( entry.getKey(), authorization)) { AuthenticationToken token = entry.getValue().authenticate(request, response); logger.trace("Token generated with type {}", token.getType()); return token; } } } // Handle the case when (authorization == null) or an invalid authorization // header (e.g. a header value without the scheme name). response.setStatus(HttpServletResponse.SC_UNAUTHORIZED); for (String scheme : schemeToAuthHandlerMapping.keySet()) { response.addHeader(HttpConstants.WWW_AUTHENTICATE_HEADER, scheme); } return null; }
@Test(timeout = 60000) public void testRequestWithInvalidAuthorization() throws Exception { HttpServletRequest request = Mockito.mock(HttpServletRequest.class); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); final Base64 base64 = new Base64(0); String credentials = "bjones:invalidpassword"; Mockito.when(request.getHeader(AUTHORIZATION_HEADER)) .thenReturn(base64.encodeToString(credentials.getBytes())); Assert.assertNull(handler.authenticate(request, response)); Mockito.verify(response).addHeader(WWW_AUTHENTICATE_HEADER, BASIC); Mockito.verify(response).addHeader(WWW_AUTHENTICATE_HEADER, NEGOTIATE); Mockito.verify(response).setStatus(HttpServletResponse.SC_UNAUTHORIZED); }
@Override public void profileUnset(String property) { }
@Test public void profileUnset() { mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { Assert.fail(); return false; } }); mSensorsAPI.profileUnset("abcde"); }
@Override public QuoteCharacter getQuoteCharacter() { return QuoteCharacter.BACK_QUOTE; }
@Test void assertGetQuoteCharacter() { assertThat(dialectDatabaseMetaData.getQuoteCharacter(), is(QuoteCharacter.BACK_QUOTE)); }
@Override public void clear() { for(Iterator<E> iter = this.iterator(); iter.hasNext(); ) { iter.next(); iter.remove(); } }
@Test public void testClear() { Collection<Object> c = new Collection<>(); c.add(new Object()); c.clear(); assertTrue(c.isEmpty()); }
@Override public String getProperty(String key) { return resolveEnvVars(super.getProperty(key)); }
@Test public void getProperty() throws Exception { SystemLambda.withEnvironmentVariable("NC_PORT", "6667").execute(() -> { System.setProperty("propertiesImplementation", "org.apache.pulsar.io.flume.node.EnvVarResolverProperties"); assertEquals(provider.getFlumeConfiguration() .getConfigurationFor("a1") .getSourceContext().get("r1").getParameters().get("port"), "6667"); }); }
public ErrorStatsData getStats(String route, String cause) { Map<String, ErrorStatsData> map = routeMap.get(route); if (map == null) { return null; } return map.get(cause); }
@Test void testGetStats() { ErrorStatsManager sm = new ErrorStatsManager(); assertNotNull(sm); sm.putStats("test", "cause"); assertNotNull(sm.getStats("test", "cause")); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { if(file.isPlaceholder()) { final DescriptiveUrl link = new DriveUrlProvider().toUrl(file).find(DescriptiveUrl.Type.http); if(DescriptiveUrl.EMPTY.equals(link)) { log.warn(String.format("Missing web link for file %s", file)); return new NullInputStream(file.attributes().getSize()); } // Write web link file return IOUtils.toInputStream(UrlFileWriterFactory.get().write(link), Charset.defaultCharset()); } else { final HttpHeaders headers = new HttpHeaders(); headers.setContentType(MEDIA_TYPE); if(status.isAppend()) { final HttpRange range = HttpRange.withStatus(status); final String header; if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) { header = String.format("bytes=%d-", range.getStart()); } else { header = String.format("bytes=%d-%d", range.getStart(), range.getEnd()); } if(log.isDebugEnabled()) { log.debug(String.format("Add range header %s for file %s", header, file)); } headers.setRange(header); // Disable compression headers.setAcceptEncoding("identity"); } if(file.attributes().isDuplicate()) { // Read previous version try { final Drive.Revisions.Get request = session.getClient().revisions().get(fileid.getFileId(file), file.attributes().getVersionId()); request.setRequestHeaders(headers); return request.executeMediaAsInputStream(); } catch(IOException e) { throw new DriveExceptionMappingService(fileid).map("Download {0} failed", e, file); } } else { try { try { final Drive.Files.Get request = session.getClient().files().get(fileid.getFileId(file)); request.setRequestHeaders(headers); request.setSupportsTeamDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")); return request.executeMediaAsInputStream(); } catch(IOException e) { throw new DriveExceptionMappingService(fileid).map("Download {0} failed", e, file); } } catch(RetriableAccessDeniedException e) { throw e; } catch(AccessDeniedException e) { if(!PreferencesFactory.get().getBoolean(String.format("connection.unsecure.download.%s", session.getHost().getHostname()))) { // Not previously dismissed callback.warn(session.getHost(), MessageFormat.format(LocaleFactory.localizedString("Download {0} failed", "Error"), file.getName()), "Acknowledge the risk of downloading known malware or other abusive file.", LocaleFactory.localizedString("Continue", "Credentials"), LocaleFactory.localizedString("Cancel", "Localizable"), String.format("connection.unsecure.download.%s", session.getHost().getHostname())); } try { final Drive.Files.Get request = session.getClient().files().get(fileid.getFileId(file)); request.setAcknowledgeAbuse(true); request.setRequestHeaders(headers); request.setSupportsTeamDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")); return request.executeMediaAsInputStream(); } catch(IOException f) { throw new DriveExceptionMappingService(fileid).map("Download {0} failed", f, file); } } } } }
@Test public void testReadEmpty() throws Exception { final DriveFileIdProvider fileid = new DriveFileIdProvider(session); final Path directory = new DriveDirectoryFeature(session, fileid).mkdir(new Path(DriveHomeFinderService.MYDRIVE_FOLDER, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final Path file = new DriveTouchFeature(session, fileid).touch(new Path(directory, String.format("t %s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file)), new TransferStatus()); assertEquals(0, new DriveAttributesFinderFeature(session, fileid).find(file).getSize()); final CountingInputStream in = new CountingInputStream(new DriveReadFeature(session, fileid).read(file, new TransferStatus(), new DisabledConnectionCallback())); in.close(); assertEquals(0L, in.getByteCount(), 0L); new DriveDeleteFeature(session, fileid).delete(Arrays.asList(file, directory), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public String compile(final ResultSet rs, final String template) { final InputStream templateStream = this.getClass().getResourceAsStream(template); return compile(rs, templateStream); }
@Test public void testResultSet() throws Exception { // setup the HSQL database with our rules. Class.forName("org.hsqldb.jdbcDriver"); Connection conn = DriverManager.getConnection("jdbc:hsqldb:mem:drools-templates", "sa", ""); try { update("CREATE TABLE cheese_rules ( id INTEGER IDENTITY, persons_age INTEGER, birth_date DATE, cheese_type VARCHAR(256), log VARCHAR(256) )", conn); update("INSERT INTO cheese_rules(persons_age,birth_date,cheese_type,log) VALUES(42, '1950-01-01', 'stilton', 'Old man stilton')", conn); update("INSERT INTO cheese_rules(persons_age,birth_date,cheese_type,log) VALUES(10, '2009-01-01', 'cheddar', 'Young man cheddar')", conn); } catch (SQLException e) { throw new IllegalStateException("Could not initialize in memory database", e); } // query the DB for the rule rows, convert them using the template. Statement sta = conn.createStatement(); ResultSet rs = sta.executeQuery("SELECT persons_age, cheese_type, log " + " FROM cheese_rules"); final ResultSetGenerator converter = new ResultSetGenerator(); final String drl = converter.compile(rs, getRulesStream()); System.out.println(drl); sta.close(); KnowledgeBuilder kbuilder = KnowledgeBuilderFactory.newKnowledgeBuilder(); kbuilder.add(ResourceFactory.newByteArrayResource(drl.getBytes()), ResourceType.DRL); assertThat(kbuilder.hasErrors()).isFalse(); InternalKnowledgeBase kbase = KnowledgeBaseFactory.newKnowledgeBase(); kbase.addPackages(kbuilder.getKnowledgePackages()); KieSession kSession = kbase.newKieSession(); //now create some test data kSession.insert(new Cheese("stilton", 42)); kSession.insert(new Person("michael", "stilton", 42)); List<String> list = new ArrayList<String>(); kSession.setGlobal("list", list); kSession.fireAllRules(); assertThat(list.size()).isEqualTo(1); }
public static String formatSql(final AstNode root) { final StringBuilder builder = new StringBuilder(); new Formatter(builder).process(root, 0); return StringUtils.stripEnd(builder.toString(), "\n"); }
@Test public void shouldFormatShowStreamsExtended() { // Given: final ListStreams listStreams = new ListStreams(Optional.empty(), true); // When: final String formatted = SqlFormatter.formatSql(listStreams); // Then: assertThat(formatted, is("SHOW STREAMS EXTENDED")); }
public Date getTimeAfter(Date afterTime) { // Computation is based on Gregorian year only. final Calendar cl = new java.util.GregorianCalendar(getTimeZone()); // move ahead one second, since we're computing the time *after* the // given time if (afterTime == null) { return null; } afterTime = new Date(afterTime.getTime() + 1000); // CronTrigger does not deal with milliseconds cl.setTime(afterTime); cl.set(Calendar.MILLISECOND, 0); boolean gotOne = false; // loop until we've computed the next time, or we've past the endTime while (!gotOne) { //if (endTime != null && cl.getTime().after(endTime)) return null; if (cl.get(Calendar.YEAR) > 2999) { // prevent endless loop... return null; } SortedSet<Integer> st = null; int t = 0; int sec = cl.get(Calendar.SECOND); int min = cl.get(Calendar.MINUTE); // get second................................................. st = seconds.tailSet(sec); if (st != null && st.size() != 0) { sec = st.first(); } else { sec = seconds.first(); min++; cl.set(Calendar.MINUTE, min); } cl.set(Calendar.SECOND, sec); min = cl.get(Calendar.MINUTE); int hr = cl.get(Calendar.HOUR_OF_DAY); t = -1; // get minute................................................. st = minutes.tailSet(min); if (st != null && st.size() != 0) { t = min; min = st.first(); } else { min = minutes.first(); hr++; } if (min != t) { cl.set(Calendar.SECOND, 0); cl.set(Calendar.MINUTE, min); setCalendarHour(cl, hr); continue; } cl.set(Calendar.MINUTE, min); hr = cl.get(Calendar.HOUR_OF_DAY); int day = cl.get(Calendar.DAY_OF_MONTH); t = -1; // get hour................................................... st = hours.tailSet(hr); if (st != null && st.size() != 0) { t = hr; hr = st.first(); } else { hr = hours.first(); day++; } if (hr != t) { cl.set(Calendar.SECOND, 0); cl.set(Calendar.MINUTE, 0); cl.set(Calendar.DAY_OF_MONTH, day); setCalendarHour(cl, hr); continue; } cl.set(Calendar.HOUR_OF_DAY, hr); day = cl.get(Calendar.DAY_OF_MONTH); int mon = cl.get(Calendar.MONTH) + 1; // '+ 1' because calendar is 0-based for this field, and we are // 1-based t = -1; int tmon = mon; // get day................................................... final boolean dayOfMSpec = !daysOfMonth.contains(NO_SPEC); final boolean dayOfWSpec = !daysOfWeek.contains(NO_SPEC); if (dayOfMSpec && !dayOfWSpec) { // get day by day of month rule st = daysOfMonth.tailSet(day); if (lastdayOfMonth) { if (!nearestWeekday) { t = day; day = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); day -= lastdayOffset; day = t > day ? 1 : day; if (t > day && ++mon > 12) { mon = 1; tmon = 3333; // ensure test of mon != tmon further below fails cl.add(Calendar.YEAR, 1); } } else { t = day; day = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); day -= lastdayOffset; final Calendar tcal = Calendar.getInstance(getTimeZone()); tcal.set(Calendar.SECOND, 0); tcal.set(Calendar.MINUTE, 0); tcal.set(Calendar.HOUR_OF_DAY, 0); tcal.set(Calendar.DAY_OF_MONTH, day); tcal.set(Calendar.MONTH, mon - 1); tcal.set(Calendar.YEAR, cl.get(Calendar.YEAR)); final int ldom = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); final int dow = tcal.get(Calendar.DAY_OF_WEEK); if (dow == Calendar.SATURDAY && day == 1) { day += 2; } else if (dow == Calendar.SATURDAY) { day -= 1; } else if (dow == Calendar.SUNDAY && day == ldom) { day -= 2; } else if (dow == Calendar.SUNDAY) { day += 1; } tcal.set(Calendar.SECOND, sec); tcal.set(Calendar.MINUTE, min); tcal.set(Calendar.HOUR_OF_DAY, hr); tcal.set(Calendar.DAY_OF_MONTH, day); tcal.set(Calendar.MONTH, mon - 1); final Date nTime = tcal.getTime(); if (nTime.before(afterTime)) { day = 1; mon++; } } } else if (nearestWeekday) { t = day; day = daysOfMonth.first(); final Calendar tcal = Calendar.getInstance(getTimeZone()); tcal.set(Calendar.SECOND, 0); tcal.set(Calendar.MINUTE, 0); tcal.set(Calendar.HOUR_OF_DAY, 0); tcal.set(Calendar.DAY_OF_MONTH, day); tcal.set(Calendar.MONTH, mon - 1); tcal.set(Calendar.YEAR, cl.get(Calendar.YEAR)); final int ldom = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); final int dow = tcal.get(Calendar.DAY_OF_WEEK); if (dow == Calendar.SATURDAY && day == 1) { day += 2; } else if (dow == Calendar.SATURDAY) { day -= 1; } else if (dow == Calendar.SUNDAY && day == ldom) { day -= 2; } else if (dow == Calendar.SUNDAY) { day += 1; } tcal.set(Calendar.SECOND, sec); tcal.set(Calendar.MINUTE, min); tcal.set(Calendar.HOUR_OF_DAY, hr); tcal.set(Calendar.DAY_OF_MONTH, day); tcal.set(Calendar.MONTH, mon - 1); final Date nTime = tcal.getTime(); if (nTime.before(afterTime)) { day = daysOfMonth.first(); mon++; } } else if (st != null && st.size() != 0) { t = day; day = st.first(); // make sure we don't over-run a short month, such as february final int lastDay = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); if (day > lastDay) { day = daysOfMonth.first(); mon++; } } else { day = daysOfMonth.first(); mon++; } if (day != t || mon != tmon) { cl.set(Calendar.SECOND, 0); cl.set(Calendar.MINUTE, 0); cl.set(Calendar.HOUR_OF_DAY, 0); cl.set(Calendar.DAY_OF_MONTH, day); cl.set(Calendar.MONTH, mon - 1); // '- 1' because calendar is 0-based for this field, and we // are 1-based continue; } } else if (dayOfWSpec && !dayOfMSpec) { // get day by day of week rule if (lastdayOfWeek) { // are we looking for the last XXX day of // the month? final int dow = daysOfWeek.first(); // desired // d-o-w final int cDow = cl.get(Calendar.DAY_OF_WEEK); // current d-o-w int daysToAdd = 0; if (cDow < dow) { daysToAdd = dow - cDow; } if (cDow > dow) { daysToAdd = dow + (7 - cDow); } final int lDay = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); if (day + daysToAdd > lDay) { // did we already miss the // last one? cl.set(Calendar.SECOND, 0); cl.set(Calendar.MINUTE, 0); cl.set(Calendar.HOUR_OF_DAY, 0); cl.set(Calendar.DAY_OF_MONTH, 1); cl.set(Calendar.MONTH, mon); // no '- 1' here because we are promoting the month continue; } // find date of last occurrence of this day in this month... while ((day + daysToAdd + 7) <= lDay) { daysToAdd += 7; } day += daysToAdd; if (daysToAdd > 0) { cl.set(Calendar.SECOND, 0); cl.set(Calendar.MINUTE, 0); cl.set(Calendar.HOUR_OF_DAY, 0); cl.set(Calendar.DAY_OF_MONTH, day); cl.set(Calendar.MONTH, mon - 1); // '- 1' here because we are not promoting the month continue; } } else if (nthdayOfWeek != 0) { // are we looking for the Nth XXX day in the month? final int dow = daysOfWeek.first(); // desired // d-o-w final int cDow = cl.get(Calendar.DAY_OF_WEEK); // current d-o-w int daysToAdd = 0; if (cDow < dow) { daysToAdd = dow - cDow; } else if (cDow > dow) { daysToAdd = dow + (7 - cDow); } boolean dayShifted = false; if (daysToAdd > 0) { dayShifted = true; } day += daysToAdd; int weekOfMonth = day / 7; if (day % 7 > 0) { weekOfMonth++; } daysToAdd = (nthdayOfWeek - weekOfMonth) * 7; day += daysToAdd; if (daysToAdd < 0 || day > getLastDayOfMonth(mon, cl .get(Calendar.YEAR))) { cl.set(Calendar.SECOND, 0); cl.set(Calendar.MINUTE, 0); cl.set(Calendar.HOUR_OF_DAY, 0); cl.set(Calendar.DAY_OF_MONTH, 1); cl.set(Calendar.MONTH, mon); // no '- 1' here because we are promoting the month continue; } else if (daysToAdd > 0 || dayShifted) { cl.set(Calendar.SECOND, 0); cl.set(Calendar.MINUTE, 0); cl.set(Calendar.HOUR_OF_DAY, 0); cl.set(Calendar.DAY_OF_MONTH, day); cl.set(Calendar.MONTH, mon - 1); // '- 1' here because we are NOT promoting the month continue; } } else { final int cDow = cl.get(Calendar.DAY_OF_WEEK); // current d-o-w int dow = daysOfWeek.first(); // desired // d-o-w st = daysOfWeek.tailSet(cDow); if (st != null && st.size() > 0) { dow = st.first(); } int daysToAdd = 0; if (cDow < dow) { daysToAdd = dow - cDow; } if (cDow > dow) { daysToAdd = dow + (7 - cDow); } final int lDay = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); if (day + daysToAdd > lDay) { // will we pass the end of // the month? cl.set(Calendar.SECOND, 0); cl.set(Calendar.MINUTE, 0); cl.set(Calendar.HOUR_OF_DAY, 0); cl.set(Calendar.DAY_OF_MONTH, 1); cl.set(Calendar.MONTH, mon); // no '- 1' here because we are promoting the month continue; } else if (daysToAdd > 0) { // are we switching days? cl.set(Calendar.SECOND, 0); cl.set(Calendar.MINUTE, 0); cl.set(Calendar.HOUR_OF_DAY, 0); cl.set(Calendar.DAY_OF_MONTH, day + daysToAdd); cl.set(Calendar.MONTH, mon - 1); // '- 1' because calendar is 0-based for this field, // and we are 1-based continue; } } } else { // dayOfWSpec && !dayOfMSpec throw new UnsupportedOperationException( "Support for specifying both a day-of-week AND a day-of-month parameter is not implemented."); } cl.set(Calendar.DAY_OF_MONTH, day); mon = cl.get(Calendar.MONTH) + 1; // '+ 1' because calendar is 0-based for this field, and we are // 1-based int year = cl.get(Calendar.YEAR); t = -1; // test for expressions that never generate a valid fire date, // but keep looping... if (year > MAX_YEAR) { return null; } // get month................................................... st = months.tailSet(mon); if (st != null && st.size() != 0) { t = mon; mon = st.first(); } else { mon = months.first(); year++; } if (mon != t) { cl.set(Calendar.SECOND, 0); cl.set(Calendar.MINUTE, 0); cl.set(Calendar.HOUR_OF_DAY, 0); cl.set(Calendar.DAY_OF_MONTH, 1); cl.set(Calendar.MONTH, mon - 1); // '- 1' because calendar is 0-based for this field, and we are // 1-based cl.set(Calendar.YEAR, year); continue; } cl.set(Calendar.MONTH, mon - 1); // '- 1' because calendar is 0-based for this field, and we are // 1-based year = cl.get(Calendar.YEAR); t = -1; // get year................................................... st = years.tailSet(year); if (st != null && st.size() != 0) { t = year; year = st.first(); } else { return null; // ran out of years... } if (year != t) { cl.set(Calendar.SECOND, 0); cl.set(Calendar.MINUTE, 0); cl.set(Calendar.HOUR_OF_DAY, 0); cl.set(Calendar.DAY_OF_MONTH, 1); cl.set(Calendar.MONTH, 0); // '- 1' because calendar is 0-based for this field, and we are // 1-based cl.set(Calendar.YEAR, year); continue; } cl.set(Calendar.YEAR, year); gotOne = true; } // while( !done ) return cl.getTime(); }
@Test void getTimeAfter() throws ParseException { CronExpression cronExpression = new CronExpression("0 0 8 1 JAN ?"); Date nextDate = cronExpression.getTimeAfter(new Date()); System.out.println(nextDate); assertThat(nextDate).isNotNull(); cronExpression = new CronExpression("0 */5 * * * ?"); nextDate = cronExpression.getTimeAfter(new Date()); System.out.println(nextDate); assertThat(nextDate).isNotNull(); }
public RetryingHMSHandler(Configuration conf, IHMSHandler baseHandler, boolean local) throws MetaException { super(conf, baseHandler, local); retryInterval = MetastoreConf.getTimeVar(origConf, ConfVars.HMS_HANDLER_INTERVAL, TimeUnit.MILLISECONDS); retryLimit = MetastoreConf.getIntVar(origConf, ConfVars.HMS_HANDLER_ATTEMPTS); try { //invoking init method of baseHandler this way since it adds the retry logic //in case of transient failures in init method invoke(baseHandler, baseHandler.getClass().getDeclaredMethod("init", (Class<?>[]) null), null); } catch (Throwable e) { LOG.error("HMSHandler Fatal error: " + ExceptionUtils.getStackTrace(e)); MetaException me = new MetaException(e.getMessage()); me.initCause(e); throw me; } }
@Test public void testRetryingHMSHandler() throws Exception { String dbName = "hive4159"; String tblName = "tmptbl"; new DatabaseBuilder() .setName(dbName) .create(msc, conf); Assert.assertEquals(2, AlternateFailurePreListener.getCallCount()); new TableBuilder() .setDbName(dbName) .setTableName(tblName) .addCol("c1", ColumnType.STRING_TYPE_NAME) .create(msc, conf); Assert.assertEquals(4, AlternateFailurePreListener.getCallCount()); }
public String getQuery() throws Exception { return getQuery(weatherConfiguration.getLocation()); }
@Test public void testFindStationInCircleQuery() throws Exception { WeatherConfiguration weatherConfiguration = new WeatherConfiguration(); weatherConfiguration.setLat(LATITUDE); weatherConfiguration.setLon(LONGITUDE); weatherConfiguration.setCnt(25); weatherConfiguration.setMode(WeatherMode.JSON); weatherConfiguration.setLanguage(WeatherLanguage.nl); weatherConfiguration.setAppid(APPID); weatherConfiguration.setWeatherApi(WeatherApi.Station); WeatherQuery weatherQuery = new WeatherQuery(weatherConfiguration); weatherConfiguration.setGeoLocationProvider(geoLocationProvider); String query = weatherQuery.getQuery(); assertThat(query, is( "http://api.openweathermap.org/data/2.5/station/find?lat=51.98&lon=4.13&lang=nl&cnt=25&APPID=9162755b2efa555823cfe0451d7fff38")); }
@Override public boolean offerLast(V e) { return get(offerLastAsync(e)); }
@Test public void testOfferLastOrigin() { Deque<Integer> queue = new ArrayDeque<Integer>(); queue.offerLast(1); queue.offerLast(2); queue.offerLast(3); assertThat(queue).containsExactly(1, 2, 3); Assertions.assertEquals((Integer)1, queue.poll()); }
@Override public boolean remove(long key) { return super.remove0(key, 0); }
@Test public void testRemove() { final long key = random.nextLong(); insert(key); assertTrue(hsa.remove(key)); assertFalse(hsa.remove(key)); }
public static void removeMatching(Collection<String> values, String... patterns) { removeMatching(values, Arrays.asList(patterns)); }
@Test public void testRemoveMatchingWithNoMatchingPattern() throws Exception { Collection<String> values = stringToList("A"); StringCollectionUtil.removeMatching(values, "B"); assertTrue(values.contains("A")); }
@Override public List<String> selectTagByConfig(String dataId, String group, String tenant) { ConfigTagsRelationMapper configTagsRelationMapper = mapperManager.findMapper( dataSourceService.getDataSourceType(), TableConstant.CONFIG_TAGS_RELATION); String sql = configTagsRelationMapper.select(Collections.singletonList("tag_name"), Arrays.asList("data_id", "group_id", "tenant_id")); return databaseOperate.queryMany(sql, new Object[] {dataId, group, tenant}, String.class); }
@Test void testSelectTagByConfig() { String dataId = "dataId4567222"; String group = "group3456789"; String tenant = "tenant4567890"; //mock page list List<String> tagStrings = Arrays.asList("", "", ""); when(databaseOperate.queryMany(anyString(), eq(new Object[] {dataId, group, tenant}), eq(String.class))).thenReturn(tagStrings); List<String> configTags = embeddedConfigInfoPersistService.selectTagByConfig(dataId, group, tenant); assertEquals(tagStrings, configTags); }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case BOOLEAN: builder.columnType(XUGU_BOOLEAN); builder.dataType(XUGU_BOOLEAN); break; case TINYINT: builder.columnType(XUGU_TINYINT); builder.dataType(XUGU_TINYINT); break; case SMALLINT: builder.columnType(XUGU_SMALLINT); builder.dataType(XUGU_SMALLINT); break; case INT: builder.columnType(XUGU_INTEGER); builder.dataType(XUGU_INTEGER); break; case BIGINT: builder.columnType(XUGU_BIGINT); builder.dataType(XUGU_BIGINT); break; case FLOAT: builder.columnType(XUGU_FLOAT); builder.dataType(XUGU_FLOAT); break; case DOUBLE: builder.columnType(XUGU_DOUBLE); builder.dataType(XUGU_DOUBLE); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.columnType(String.format("%s(%s,%s)", XUGU_NUMERIC, precision, scale)); builder.dataType(XUGU_NUMERIC); builder.precision(precision); builder.scale(scale); break; case BYTES: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(XUGU_BLOB); builder.dataType(XUGU_BLOB); } else if (column.getColumnLength() <= MAX_BINARY_LENGTH) { builder.columnType(XUGU_BINARY); builder.dataType(XUGU_BINARY); } else { builder.columnType(XUGU_BLOB); builder.dataType(XUGU_BLOB); } break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(String.format("%s(%s)", XUGU_VARCHAR, MAX_VARCHAR_LENGTH)); builder.dataType(XUGU_VARCHAR); } else if (column.getColumnLength() <= MAX_VARCHAR_LENGTH) { builder.columnType( String.format("%s(%s)", XUGU_VARCHAR, column.getColumnLength())); builder.dataType(XUGU_VARCHAR); } else { builder.columnType(XUGU_CLOB); builder.dataType(XUGU_CLOB); } break; case DATE: builder.columnType(XUGU_DATE); builder.dataType(XUGU_DATE); break; case TIME: builder.dataType(XUGU_TIME); if (column.getScale() != null && column.getScale() > 0) { Integer timeScale = column.getScale(); if (timeScale > MAX_TIME_SCALE) { timeScale = MAX_TIME_SCALE; log.warn( "The time column {} type time({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to time({})", column.getName(), column.getScale(), MAX_SCALE, timeScale); } builder.columnType(String.format("%s(%s)", XUGU_TIME, timeScale)); builder.scale(timeScale); } else { builder.columnType(XUGU_TIME); } break; case TIMESTAMP: if (column.getScale() == null || column.getScale() <= 0) { builder.columnType(XUGU_TIMESTAMP); } else { int timestampScale = column.getScale(); if (column.getScale() > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } builder.columnType(String.format("TIMESTAMP(%s)", timestampScale)); builder.scale(timestampScale); } builder.dataType(XUGU_TIMESTAMP); break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.XUGU, column.getDataType().getSqlType().name(), column.getName()); } return builder.build(); }
@Test public void testReconvertBytes() { Column column = PhysicalColumn.builder() .name("test") .dataType(PrimitiveByteArrayType.INSTANCE) .columnLength(null) .build(); BasicTypeDefine typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(XuguTypeConverter.XUGU_BLOB, typeDefine.getColumnType()); Assertions.assertEquals(XuguTypeConverter.XUGU_BLOB, typeDefine.getDataType()); column = PhysicalColumn.builder() .name("test") .dataType(PrimitiveByteArrayType.INSTANCE) .columnLength(MAX_BINARY_LENGTH) .build(); typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(XuguTypeConverter.XUGU_BINARY, typeDefine.getColumnType()); Assertions.assertEquals(XuguTypeConverter.XUGU_BINARY, typeDefine.getDataType()); }
public boolean authenticate(LDAPConnection connection, String bindDn, EncryptedValue password) throws LDAPException { checkArgument(!isNullOrEmpty(bindDn), "Binding with empty principal is forbidden."); checkArgument(password != null, "Binding with null credentials is forbidden."); checkArgument(password.isSet(), "Binding with empty credentials is forbidden."); final SimpleBindRequest bindRequest = new SimpleBindRequest(bindDn, encryptedValueService.decrypt(password)); LOG.trace("Re-binding with DN <{}> using password", bindDn); try { final BindResult bind = connection.bind(bindRequest); if (!bind.getResultCode().equals(ResultCode.SUCCESS)) { LOG.trace("Re-binding DN <{}> failed", bindDn); throw new RuntimeException(bind.toString()); } final boolean authenticated = connection.getLastBindRequest().equals(bindRequest); LOG.trace("Binding DN <{}> did not throw, connection authenticated: {}", bindDn, authenticated); return authenticated; } catch (LDAPBindException e) { LOG.trace("Re-binding DN <{}> failed", bindDn); return false; } }
@Test public void authenticateThrowsIllegalArgumentExceptionIfPrincipalIsNull() throws LDAPException { assertThatThrownBy(() -> connector.authenticate(connection, null, encryptedValueService.encrypt("secret"))) .hasMessageContaining("Binding with empty principal is forbidden.") .isInstanceOf(IllegalArgumentException.class); }
public Set<ContentPackInstallation> findByContentPackId(ModelId id) { final DBQuery.Query query = DBQuery.is(ContentPackInstallation.FIELD_CONTENT_PACK_ID, id); try (final DBCursor<ContentPackInstallation> installations = dbCollection.find(query)) { return ImmutableSet.copyOf((Iterator<ContentPackInstallation>) installations); } }
@Test @MongoDBFixtures("ContentPackInstallationPersistenceServiceTest.json") public void findByContentPackId() { final ModelId id = ModelId.of("4e3d7025-881e-6870-da03-cafebabe0001"); final Set<ContentPackInstallation> contentPacks = persistenceService.findByContentPackId(id); assertThat(contentPacks) .hasSize(2) .allSatisfy(contentPackInstallation -> assertThat(contentPackInstallation.contentPackId()).isEqualTo(id)) .anySatisfy(contentPackInstallation -> assertThat(contentPackInstallation.id()).isEqualTo(new ObjectId("5b4c935b4b900a0000000001"))) .anySatisfy(contentPackInstallation -> assertThat(contentPackInstallation.id()).isEqualTo(new ObjectId("5b4c935b4b900a0000000002"))); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } SimpleSubscriptionData that = (SimpleSubscriptionData) o; return Objects.equals(topic, that.topic) && Objects.equals(expressionType, that.expressionType) && Objects.equals(expression, that.expression); }
@Test public void testEqual() { String topic = "test-topic"; String expressionType = "TAG"; String expression1 = "test-expression-1"; String expression2 = "test-expression-1"; SimpleSubscriptionData simpleSubscriptionData1 = new SimpleSubscriptionData(topic, expressionType, expression1, 1); SimpleSubscriptionData simpleSubscriptionData2 = new SimpleSubscriptionData(topic, expressionType, expression2, 1); assertThat(simpleSubscriptionData1.equals(simpleSubscriptionData2)).isTrue(); }
@Override public void execute(String mapName, Predicate predicate, Collection<Integer> partitions, Result result) { RetryableHazelcastException storedException = null; for (Integer partitionId : partitions) { try { partitionScanRunner.run(mapName, predicate, partitionId, result); } catch (RetryableHazelcastException e) { // RetryableHazelcastException are stored and re-thrown later. this is to ensure all partitions // are touched as when the parallel execution was used. // see discussion at https://github.com/hazelcast/hazelcast/pull/5049#discussion_r28773099 for details. if (storedException == null) { storedException = e; } } } if (storedException != null) { throw storedException; } }
@Test public void execute_fail_retryable() { PartitionScanRunner runner = mock(PartitionScanRunner.class); CallerRunsPartitionScanExecutor executor = new CallerRunsPartitionScanExecutor(runner); Predicate<Object, Object> predicate = Predicates.equal("attribute", 1); QueryResult queryResult = new QueryResult(IterationType.ENTRY, null, null, Long.MAX_VALUE, false); doThrow(new RetryableHazelcastException()).when(runner).run(anyString(), eq(predicate), anyInt(), eq(queryResult)); List<Integer> list = asList(1, 2, 3); assertThatThrownBy(() -> executor.execute("Map", predicate, list, queryResult)) .isInstanceOf(RetryableHazelcastException.class); }
public static UWhileLoop create(UExpression condition, UStatement body) { return new AutoValue_UWhileLoop(condition, (USimpleStatement) body); }
@Test public void equality() { new EqualsTester() .addEqualityGroup( UWhileLoop.create( UParens.create( UMethodInvocation.create( UMemberSelect.create( UFreeIdent.create("itr"), "hasNext", UMethodType.create(UPrimitiveType.BOOLEAN)))), UBlock.create( UExpressionStatement.create( UMethodInvocation.create( UMemberSelect.create( UFreeIdent.create("itr"), "next", UMethodType.create(UTypeVar.create("E")))))))) .addEqualityGroup( UWhileLoop.create( UParens.create( UMethodInvocation.create( UMemberSelect.create( UFreeIdent.create("elements"), "hasNext", UMethodType.create(UPrimitiveType.BOOLEAN)))), UBlock.create( UExpressionStatement.create( UMethodInvocation.create( UMemberSelect.create( UFreeIdent.create("elements"), "next", UMethodType.create(UTypeVar.create("E")))))))) .testEquals(); }
public FEELFnResult<TemporalAccessor> invoke(@ParameterName("from") String val) { if ( val == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null")); } try { TemporalAccessor parsed = FEEL_TIME.parse(val); if (parsed.query(TemporalQueries.offset()) != null) { // it is an offset-zoned time, so I can know for certain an OffsetTime OffsetTime asOffSetTime = parsed.query(OffsetTime::from); return FEELFnResult.ofResult(asOffSetTime); } else if (parsed.query(TemporalQueries.zone()) == null) { // if it does not contain any zone information at all, then I know for certain is a local time. LocalTime asLocalTime = parsed.query(LocalTime::from); return FEELFnResult.ofResult(asLocalTime); } else if (parsed.query(TemporalQueries.zone()) != null) { boolean hasSeconds = timeStringWithSeconds(val); LocalTime asLocalTime = parsed.query(LocalTime::from); ZoneId zoneId = parsed.query(TemporalQueries.zone()); ZoneTime zoneTime = ZoneTime.of(asLocalTime, zoneId, hasSeconds); return FEELFnResult.ofResult(zoneTime); } return FEELFnResult.ofResult(parsed); } catch (DateTimeException e) { return manageDateTimeException(e, val); } }
@Test void parseWithZoneIANA() { final TemporalAccessor parsedResult = timeFunction.invoke("00:01:00@Europe/Paris").getOrElse(null); assertThat(parsedResult.query(TemporalQueries.localTime())).isEqualTo(LocalTime.of(0, 1, 0)); assertThat(parsedResult.query(TemporalQueries.zone())).isEqualTo(ZoneId.of("Europe/Paris")); }
@NonNull @Override public Object configure(CNode config, ConfigurationContext context) throws ConfiguratorException { return Stapler.lookupConverter(target) .convert( target, context.getSecretSourceResolver() .resolve(config.asScalar().toString())); }
@Test public void _enum() throws Exception { // Jenkins do register a StaplerConverter for it. Configurator<Node.Mode> c = registry.lookupOrFail(Node.Mode.class); final Node.Mode value = c.configure(new Scalar("NORMAL"), context); assertEquals(Node.Mode.NORMAL, value); }
@Override public void doDraw() { LOGGER.info("doDraw"); }
@Test void testDoDraw() { final var ballItem = new BallItem(); final var ballThread = mock(BallThread.class); ballItem.setTwin(ballThread); ballItem.draw(); assertTrue(appender.logContains("draw")); assertTrue(appender.logContains("doDraw")); verifyNoMoreInteractions(ballThread); assertEquals(2, appender.getLogSize()); }
@Override public void delete(PipelineDao nativeEntity) { final Set<PipelineConnections> pipelineConnections = connectionsService.loadByPipelineId(nativeEntity.id()); for (PipelineConnections connections : pipelineConnections) { final Set<String> pipelineIds = connections.pipelineIds().stream() .filter(pipelineId -> !pipelineId.equals(nativeEntity.id())) .collect(Collectors.toSet()); if (pipelineIds.isEmpty()) { LOG.trace("Removing pipeline connections for stream {}", connections.streamId()); connectionsService.delete(connections.streamId()); } else { final PipelineConnections newConnections = connections.toBuilder() .pipelineIds(pipelineIds) .build(); LOG.trace("Saving updated pipeline connections: {}", newConnections); connectionsService.save(newConnections); } } pipelineService.delete(nativeEntity.id()); }
@Test @MongoDBFixtures("PipelineFacadeTest/pipelines.json") public void delete() throws NotFoundException { final PipelineDao pipelineDao = pipelineService.load("5a85c4854b900afd5d662be3"); assertThat(pipelineService.loadAll()).hasSize(1); facade.delete(pipelineDao); assertThat(pipelineService.loadAll()).isEmpty(); assertThatThrownBy(() -> pipelineService.load("5a85c4854b900afd5d662be3")) .isInstanceOf(NotFoundException.class); }
public static boolean hasIllegalNodeAddress(List<RemoteInstance> remoteInstances) { if (CollectionUtils.isEmpty(remoteInstances)) { return false; } Set<String> remoteAddressSet = remoteInstances.stream().map(remoteInstance -> remoteInstance.getAddress().getHost()).collect(Collectors.toSet()); return !Sets.intersection(ILLEGAL_NODE_ADDRESS_IN_CLUSTER_MODE, remoteAddressSet).isEmpty(); }
@Test public void hasIllegalNodeAddressWithEmptySet() { boolean flag = OAPNodeChecker.hasIllegalNodeAddress(Lists.newArrayList()); Assertions.assertFalse(flag); }
@Override public void setLastDirScannerFinishTime(long time) { this.lastDirScannerFinishTime = time; }
@Test public void testSetLastDirScannerFinishTime() throws IOException { assertEquals(dataset.getLastDirScannerFinishTime(), 0L); dataset.setLastDirScannerFinishTime(System.currentTimeMillis()); assertNotEquals(0L, dataset.getLastDirScannerFinishTime()); }
protected void squashDuplicateRoutes() { Map<String, String> squashMap = new TreeMap<>(); // create a map using the 'from' URI as the key to eliminate duplicates routeMap.forEach((key, route) -> { Map<String, Object> from = (Map<String, Object>) route.getComponentsMap().get(FROM); String uri = from.get(URI).toString(); squashMap.put(uri, key); }); // use the de-duplicated URIs to create a route map with unique routes, regardless of the ID Map<String, Route> squashedRouteMap = new TreeMap<>(); squashMap.forEach((key, value) -> squashedRouteMap.put(value, routeMap.get(value))); routeMap.clear(); routeMap.putAll(squashedRouteMap); }
@Test public void testSquashDuplicateRoutes() throws IllegalAccessException { @SuppressWarnings("unchecked") Map<String, Route> result = (Map<String, Route>) FieldUtils.readDeclaredField(processor, "routeMap", true); result.clear(); result.put("route1", TestUtil.route()); result.put("route2", TestUtil.route()); assertAll( () -> assertNotNull(result), () -> assertEquals(2, result.size())); processor.squashDuplicateRoutes(); assertAll( () -> assertNotNull(result), () -> assertEquals(1, result.size())); }
@Override public void validateQuery( final SessionConfig config, final ExecutionPlan executionPlan, final Collection<QueryMetadata> runningQueries ) { validateCacheBytesUsage( runningQueries.stream() .filter(q -> q instanceof PersistentQueryMetadata) .collect(Collectors.toList()), config, config.getConfig(false) .getLong(KsqlConfig.KSQL_TOTAL_CACHE_MAX_BYTES_BUFFERING) ); }
@Test public void shouldIgnoreGlobalLimitSetInOverrides() { // Given: final SessionConfig config = SessionConfig.of( new KsqlConfig(ImmutableMap.of(KsqlConfig.KSQL_TOTAL_CACHE_MAX_BYTES_BUFFERING, 30)), ImmutableMap.of( StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 50, KsqlConfig.KSQL_TOTAL_CACHE_MAX_BYTES_BUFFERING, 500 ) ); // When/Then: assertThrows( KsqlException.class, () -> queryValidator.validateQuery(config, plan, queries) ); }
@Override public String getString(int rowIndex, int columnIndex) { if (columnIndex != 0) { throw new IllegalArgumentException("Column index must always be 0 for aggregation result sets"); } return _groupByResults.get(rowIndex).get("value").asText(); }
@Test(expectedExceptions = IllegalArgumentException.class) public void testGetStringExceptionState() { // Run the test final String result = _groupByResultSetUnderTest.getString(0, 1); }
public Optional<String> serverAssignor() { return this.serverAssignor; }
@Test public void testMembershipManagerServerAssignor() { ConsumerMembershipManager membershipManager = createMembershipManagerJoiningGroup(); assertEquals(Optional.empty(), membershipManager.serverAssignor()); membershipManager = createMembershipManagerJoiningGroup("instance1", "Uniform"); assertEquals(Optional.of("Uniform"), membershipManager.serverAssignor()); }
@Override public void onMatch(RelOptRuleCall call) { final Sort sort = call.rel(0); final SortExchange exchange = call.rel(1); final RelMetadataQuery metadataQuery = call.getMetadataQuery(); if (RelMdUtil.checkInputForCollationAndLimit( metadataQuery, exchange.getInput(), sort.getCollation(), sort.offset, sort.fetch)) { // Don't rewrite anything if the input is already sorted AND the // input node would already return fewer than sort.offset + sort.fetch // rows (e.g. there is already an inner limit applied) return; } RelCollation collation = sort.getCollation(); Preconditions.checkArgument( collation.equals(exchange.getCollation()), "Expected collation on exchange and sort to be the same" ); final RexNode fetch; if (sort.fetch == null) { fetch = null; } else if (sort.offset == null) { fetch = sort.fetch; } else { int total = RexExpressionUtils.getValueAsInt(sort.fetch) + RexExpressionUtils.getValueAsInt(sort.offset); fetch = REX_BUILDER.makeLiteral(total, TYPE_FACTORY.createSqlType(SqlTypeName.INTEGER)); } // do not transform sort-exchange copy when there's no fetch limit, or fetch amount is larger than threshold if (!collation.getFieldCollations().isEmpty() && (fetch == null || RexExpressionUtils.getValueAsInt(fetch) > DEFAULT_SORT_EXCHANGE_COPY_THRESHOLD)) { return; } final RelNode newExchangeInput = sort.copy(sort.getTraitSet(), exchange.getInput(), collation, null, fetch); final RelNode exchangeCopy = exchange.copy(exchange.getTraitSet(), newExchangeInput, exchange.getDistribution()); final RelNode sortCopy = sort.copy(sort.getTraitSet(), exchangeCopy, collation, sort.offset == null ? REX_ZERO : sort.offset, sort.fetch); call.transformTo(sortCopy); }
@Test public void shouldMatchNoSortAndPushDownLimitPlusOffset() { // Given: SortExchange exchange = PinotLogicalSortExchange.create(_input, RelDistributions.SINGLETON, RelCollations.EMPTY, false, true); Sort sort = LogicalSort.create(exchange, RelCollations.EMPTY, literal(2), literal(1)); Mockito.when(_call.rel(0)).thenReturn(sort); Mockito.when(_call.rel(1)).thenReturn(exchange); // When: PinotSortExchangeCopyRule.SORT_EXCHANGE_COPY.onMatch(_call); // Then: ArgumentCaptor<RelNode> sortCopyCapture = ArgumentCaptor.forClass(LogicalSort.class); Mockito.verify(_call, Mockito.times(1)).transformTo(sortCopyCapture.capture()); RelNode sortCopy = sortCopyCapture.getValue(); Assert.assertTrue(sortCopy instanceof LogicalSort); Assert.assertTrue(((LogicalSort) sortCopy).getInput() instanceof PinotLogicalSortExchange); Assert.assertTrue(((LogicalSort) sortCopy).getInput().getInput(0) instanceof LogicalSort); LogicalSort innerSort = (LogicalSort) ((LogicalSort) sortCopy).getInput().getInput(0); Assert.assertEquals(innerSort.getCollation().getKeys().size(), 0); Assert.assertNull((innerSort).offset); Assert.assertEquals((innerSort).fetch, literal(3)); }
@Override public void apply(final Path file, final Local local, final TransferStatus status, final ProgressListener listener) throws BackgroundException { // Rename existing file before putting new file in place if(status.isExists()) { Path rename; do { final String proposal = MessageFormat.format(PreferencesFactory.get().getProperty("queue.upload.file.rename.format"), FilenameUtils.getBaseName(file.getName()), UserDateFormatterFactory.get().getMediumFormat(System.currentTimeMillis(), false).replace(Path.DELIMITER, '-').replace(':', '-'), StringUtils.isNotBlank(file.getExtension()) ? String.format(".%s", file.getExtension()) : StringUtils.EMPTY); rename = new Path(file.getParent(), proposal, file.getType()); } while(find.find(rename)); if(log.isInfoEnabled()) { log.info(String.format("Rename existing file %s to %s", file, rename)); } move.move(file, rename, new TransferStatus().exists(false), new Delete.DisabledCallback(), new DisabledConnectionCallback()); if(log.isDebugEnabled()) { log.debug(String.format("Clear exist flag for file %s", file)); } status.exists(false).getDisplayname().exists(false); } super.apply(file, local, status, listener); }
@Test public void testFileUploadWithTemporaryFilename() throws Exception { final Path file = new Path("/t", EnumSet.of(Path.Type.file)); final AtomicBoolean found = new AtomicBoolean(); final AtomicInteger moved = new AtomicInteger(); final Find find = new Find() { @Override public boolean find(final Path f, final ListProgressListener listener) { if(f.equals(file)) { found.set(true); return true; } return false; } }; final AttributesFinder attributes = new AttributesFinder() { @Override public PathAttributes find(final Path file, final ListProgressListener listener) { return new PathAttributes(); } }; final Host host = new Host(new TestProtocol()); final NullSession session = new NullSession(host) { @Override @SuppressWarnings("unchecked") public <T> T _getFeature(final Class<T> type) { if(type.equals(Move.class)) { return (T) new Move() { @Override public Path move(final Path source, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) { if(moved.incrementAndGet() == 1) { // Rename existing target file assertEquals(file, source); } else if(moved.get() == 2) { // Move temporary renamed file in place assertEquals(file, renamed); } else { fail(); } return renamed; } @Override public boolean isRecursive(final Path source, final Path target) { return true; } }; } if(type.equals(Write.class)) { return (T) new Write<Void>() { @Override public StatusOutputStream write(final Path file, final TransferStatus status, final ConnectionCallback callback) { fail(); return null; } }; } return null; } }; final UploadFilterOptions options = new UploadFilterOptions(host).withTemporary(true); final RenameExistingFilter f = new RenameExistingFilter(new DisabledUploadSymlinkResolver(), session, options); f.withFinder(find).withAttributes(attributes); assertTrue(options.temporary); final TransferStatus status = f.prepare(file, new NullLocal("t"), new TransferStatus().exists(true), new DisabledProgressListener()); f.apply(file, new NullLocal("t"), status, new DisabledProgressListener()); assertFalse(status.isExists()); assertFalse(status.getDisplayname().exists); assertNotNull(status.getRename()); assertNotNull(status.getRename().remote); assertEquals(file, status.getDisplayname().remote); assertNull(status.getRename().local); f.apply(file, new NullLocal("t"), status, new DisabledProgressListener()); // Complete status.setComplete(); f.complete(file, new NullLocal("t"), status, new DisabledProgressListener()); assertTrue(found.get()); assertEquals(2, moved.get()); }
public SearchQuery parse(String encodedQueryString) { if (Strings.isNullOrEmpty(encodedQueryString) || "*".equals(encodedQueryString)) { return new SearchQuery(encodedQueryString); } final var queryString = URLDecoder.decode(encodedQueryString, StandardCharsets.UTF_8); final Matcher matcher = querySplitterMatcher(requireNonNull(queryString).trim()); final ImmutableMultimap.Builder<String, FieldValue> builder = ImmutableMultimap.builder(); final ImmutableSet.Builder<String> disallowedKeys = ImmutableSet.builder(); while (matcher.find()) { final String entry = matcher.group(); if (!entry.contains(":")) { builder.put(withPrefixIfNeeded(defaultField), createFieldValue(defaultFieldKey.getFieldType(), entry, false)); continue; } final Iterator<String> entryFields = FIELD_VALUE_SPLITTER.splitToList(entry).iterator(); checkArgument(entryFields.hasNext(), INVALID_ENTRY_MESSAGE, entry); final String key = entryFields.next(); // Skip if there are no valid k/v pairs. (i.e. "action:") if (!entryFields.hasNext()) { continue; } final boolean negate = key.startsWith("-"); final String cleanKey = key.replaceFirst("^-", ""); final String value = entryFields.next(); VALUE_SPLITTER.splitToList(value).forEach(v -> { if (!dbFieldMapping.containsKey(cleanKey)) { disallowedKeys.add(cleanKey); } final SearchQueryField translatedKey = dbFieldMapping.get(cleanKey); if (translatedKey != null) { builder.put(withPrefixIfNeeded(translatedKey.getDbField()), createFieldValue(translatedKey.getFieldType(), v, negate)); } else { builder.put(withPrefixIfNeeded(defaultField), createFieldValue(defaultFieldKey.getFieldType(), v, negate)); } }); checkArgument(!entryFields.hasNext(), INVALID_ENTRY_MESSAGE, entry); } return new SearchQuery(queryString, builder.build(), disallowedKeys.build()); }
@Test void nullFieldPrefixDoesNotChangeDefaultBehavior() { final SearchQueryParser parser = new SearchQueryParser("name", ImmutableSet.of("name", "breed"), null); final SearchQuery searchQuery = parser.parse("Bobby breed:terrier"); final Multimap<String, SearchQueryParser.FieldValue> queryMap = searchQuery.getQueryMap(); assertThat(queryMap.keySet().size()).isEqualTo(2); assertThat(queryMap.keySet()).containsOnly("name", "breed"); assertThat(queryMap.get("name")).containsOnly(new SearchQueryParser.FieldValue("Bobby", false)); assertThat(queryMap.get("breed")).containsOnly(new SearchQueryParser.FieldValue("terrier", false)); assertThat(searchQuery.hasDisallowedKeys()).isFalse(); }
public boolean setCleanCodeAttribute(DefaultIssue raw, @Nullable CleanCodeAttribute previousCleanCodeAttribute, IssueChangeContext changeContext) { CleanCodeAttribute newCleanCodeAttribute = requireNonNull(raw.getCleanCodeAttribute()); if (Objects.equals(previousCleanCodeAttribute, newCleanCodeAttribute)) { return false; } raw.setFieldChange(changeContext, CLEAN_CODE_ATTRIBUTE, previousCleanCodeAttribute, newCleanCodeAttribute.name()); raw.setCleanCodeAttribute(newCleanCodeAttribute); raw.setUpdateDate(changeContext.date()); raw.setChanged(true); return true; }
@Test void setCleanCodeAttribute_whenCleanCodeAttributeNotChanged_shouldNotUpdateIssue() { issue.setCleanCodeAttribute(CleanCodeAttribute.CLEAR); boolean updated = underTest.setCleanCodeAttribute(issue, CleanCodeAttribute.CLEAR, context); assertThat(updated).isFalse(); assertThat(issue.getCleanCodeAttribute()).isEqualTo(CleanCodeAttribute.CLEAR); }
@Override public void updateDependencyTree(int childStreamId, int parentStreamId, short weight, boolean exclusive) { // It is assumed there are all validated at a higher level. For example in the Http2FrameReader. assert weight >= MIN_WEIGHT && weight <= MAX_WEIGHT : "Invalid weight"; assert childStreamId != parentStreamId : "A stream cannot depend on itself"; assert childStreamId > 0 && parentStreamId >= 0 : "childStreamId must be > 0. parentStreamId must be >= 0."; streamByteDistributor.updateDependencyTree(childStreamId, parentStreamId, weight, exclusive); }
@Test public void invalidParentStreamIdThrows() { assertThrows(AssertionError.class, new Executable() { @Override public void execute() throws Throwable { controller.updateDependencyTree(STREAM_D, -1, DEFAULT_PRIORITY_WEIGHT, true); } }); }
Converter<E> compile() { head = tail = null; for (Node n = top; n != null; n = n.next) { switch (n.type) { case Node.LITERAL: addToList(new LiteralConverter<E>((String) n.getValue())); break; case Node.COMPOSITE_KEYWORD: CompositeNode cn = (CompositeNode) n; CompositeConverter<E> compositeConverter = createCompositeConverter(cn); if (compositeConverter == null) { addError("Failed to create converter for [%" + cn.getValue() + "] keyword"); addToList(new LiteralConverter<E>("%PARSER_ERROR[" + cn.getValue() + "]")); break; } compositeConverter.setFormattingInfo(cn.getFormatInfo()); compositeConverter.setOptionList(cn.getOptions()); Compiler<E> childCompiler = new Compiler<E>(cn.getChildNode(), converterMap); childCompiler.setContext(context); Converter<E> childConverter = childCompiler.compile(); compositeConverter.setChildConverter(childConverter); addToList(compositeConverter); break; case Node.SIMPLE_KEYWORD: SimpleKeywordNode kn = (SimpleKeywordNode) n; DynamicConverter<E> dynaConverter = createConverter(kn); if (dynaConverter != null) { dynaConverter.setFormattingInfo(kn.getFormatInfo()); dynaConverter.setOptionList(kn.getOptions()); addToList(dynaConverter); } else { // if the appropriate dynaconverter cannot be found, then replace // it with a dummy LiteralConverter indicating an error. Converter<E> errConveter = new LiteralConverter<E>("%PARSER_ERROR[" + kn.getValue() + "]"); addStatus(new ErrorStatus("[" + kn.getValue() + "] is not a valid conversion word", this)); addToList(errConveter); } } } return head; }
@Test public void testCompositeFormatting() throws Exception { { Parser<Object> p = new Parser<Object>("xyz %4.10(ABC)"); p.setContext(context); Node t = p.parse(); Converter<Object> head = p.compile(t, converterMap); String result = write(head, new Object()); assertEquals("xyz ABC", result); } { Parser<Object> p = new Parser<Object>("xyz %-4.10(ABC)"); p.setContext(context); Node t = p.parse(); Converter<Object> head = p.compile(t, converterMap); String result = write(head, new Object()); assertEquals("xyz ABC ", result); } { Parser<Object> p = new Parser<Object>("xyz %.2(ABC %hello)"); p.setContext(context); Node t = p.parse(); Converter<Object> head = p.compile(t, converterMap); String result = write(head, new Object()); assertEquals("xyz lo", result); } { Parser<Object> p = new Parser<Object>("xyz %.-2(ABC)"); p.setContext(context); Node t = p.parse(); Converter<Object> head = p.compile(t, converterMap); String result = write(head, new Object()); assertEquals("xyz AB", result); } { Parser<Object> p = new Parser<Object>("xyz %30.30(ABC %20hello)"); p.setContext(context); Node t = p.parse(); Converter<Object> head = p.compile(t, converterMap); String result = write(head, new Object()); assertEquals("xyz ABC Hello", result); } }
private RemotingCommand lockBatchMQ(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { final RemotingCommand response = RemotingCommand.createResponseCommand(null); LockBatchRequestBody requestBody = LockBatchRequestBody.decode(request.getBody(), LockBatchRequestBody.class); Set<MessageQueue> lockOKMQSet = new HashSet<>(); Set<MessageQueue> selfLockOKMQSet = this.brokerController.getRebalanceLockManager().tryLockBatch( requestBody.getConsumerGroup(), requestBody.getMqSet(), requestBody.getClientId()); if (requestBody.isOnlyThisBroker() || !brokerController.getBrokerConfig().isLockInStrictMode()) { lockOKMQSet = selfLockOKMQSet; } else { requestBody.setOnlyThisBroker(true); int replicaSize = this.brokerController.getMessageStoreConfig().getTotalReplicas(); int quorum = replicaSize / 2 + 1; if (quorum <= 1) { lockOKMQSet = selfLockOKMQSet; } else { final ConcurrentMap<MessageQueue, Integer> mqLockMap = new ConcurrentHashMap<>(); for (MessageQueue mq : selfLockOKMQSet) { if (!mqLockMap.containsKey(mq)) { mqLockMap.put(mq, 0); } mqLockMap.put(mq, mqLockMap.get(mq) + 1); } BrokerMemberGroup memberGroup = this.brokerController.getBrokerMemberGroup(); if (memberGroup != null) { Map<Long, String> addrMap = new HashMap<>(memberGroup.getBrokerAddrs()); addrMap.remove(this.brokerController.getBrokerConfig().getBrokerId()); final CountDownLatch countDownLatch = new CountDownLatch(addrMap.size()); requestBody.setMqSet(selfLockOKMQSet); requestBody.setOnlyThisBroker(true); for (Long brokerId : addrMap.keySet()) { try { this.brokerController.getBrokerOuterAPI().lockBatchMQAsync(addrMap.get(brokerId), requestBody, 1000, new LockCallback() { @Override public void onSuccess(Set<MessageQueue> lockOKMQSet) { for (MessageQueue mq : lockOKMQSet) { if (!mqLockMap.containsKey(mq)) { mqLockMap.put(mq, 0); } mqLockMap.put(mq, mqLockMap.get(mq) + 1); } countDownLatch.countDown(); } @Override public void onException(Throwable e) { LOGGER.warn("lockBatchMQAsync on {} failed, {}", addrMap.get(brokerId), e); countDownLatch.countDown(); } }); } catch (Exception e) { LOGGER.warn("lockBatchMQAsync on {} failed, {}", addrMap.get(brokerId), e); countDownLatch.countDown(); } } try { countDownLatch.await(2000, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { LOGGER.warn("lockBatchMQ exception on {}, {}", this.brokerController.getBrokerConfig().getBrokerName(), e); } } for (MessageQueue mq : mqLockMap.keySet()) { if (mqLockMap.get(mq) >= quorum) { lockOKMQSet.add(mq); } } } } LockBatchResponseBody responseBody = new LockBatchResponseBody(); responseBody.setLockOKMQSet(lockOKMQSet); response.setBody(responseBody.encode()); response.setCode(ResponseCode.SUCCESS); response.setRemark(null); return response; }
@Test public void testLockBatchMQ() throws Exception { RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.LOCK_BATCH_MQ, null); LockBatchRequestBody lockBatchRequestBody = new LockBatchRequestBody(); lockBatchRequestBody.setClientId("1111"); lockBatchRequestBody.setConsumerGroup("group"); request.setBody(JSON.toJSON(lockBatchRequestBody).toString().getBytes()); RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); }
public static int getTerm(int y, int n) { if (y < 1900 || y > 2100) { return -1; } if (n < 1 || n > 24) { return -1; } final String _table = S_TERM_INFO[y - 1900]; Integer[] _info = new Integer[6]; for (int i = 0; i < 6; i++) { _info[i] = Integer.parseInt(_table.substring(i * 5, 5 * (i + 1)), 16); } String[] _calday = new String[24]; for (int i = 0; i < 6; i++) { _calday[4 * i] = _info[i].toString().substring(0, 1); _calday[4 * i + 1] = _info[i].toString().substring(1, 3); _calday[4 * i + 2] = _info[i].toString().substring(3, 4); _calday[4 * i + 3] = _info[i].toString().substring(4, 6); } return NumberUtil.parseInt(_calday[n - 1]); }
@Test public void getTermTest1(){ final int term = SolarTerms.getTerm(1987, 3); assertEquals(4, term); }
static Supplier<Configuration> initializeConfiguration(Supplier<Configuration> configurationSupplier) { try { Configuration configuration = configurationSupplier.get(); return () -> configuration; } catch (Exception e) { // We have to be careful not to throw anything here as this static block gets executed during plugin scanning and any exceptions will // cause the worker to fail during startup, even if it's not configured to use the basic auth extension. return () -> { throw new ConnectException("Failed to retrieve JAAS configuration", e); }; } }
@Test public void testBadJaasConfigInitialization() { SecurityException jaasConfigurationException = new SecurityException(new IOException("Bad JAAS config is bad")); Supplier<Configuration> configuration = BasicAuthSecurityRestExtension.initializeConfiguration(() -> { throw jaasConfigurationException; }); ConnectException thrownException = assertThrows(ConnectException.class, configuration::get); assertEquals(jaasConfigurationException, thrownException.getCause()); }
@Override public synchronized void editSchedule() { updateConfigIfNeeded(); long startTs = clock.getTime(); CSQueue root = scheduler.getRootQueue(); Resource clusterResources = Resources.clone(scheduler.getClusterResource()); containerBasedPreemptOrKill(root, clusterResources); if (LOG.isDebugEnabled()) { LOG.debug("Total time used=" + (clock.getTime() - startTs) + " ms."); } }
@Test public void testProportionalPreemption() { int[][] qData = new int[][]{ // / A B C D { 100, 10, 40, 20, 30 }, // abs { 100, 100, 100, 100, 100 }, // maxCap { 100, 30, 60, 10, 0 }, // used { 45, 20, 5, 20, 0 }, // pending { 0, 0, 0, 0, 0 }, // reserved { 3, 1, 1, 1, 0 }, // apps { -1, 1, 1, 1, 1 }, // req granularity { 4, 0, 0, 0, 0 }, // subqueues }; ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData); policy.editSchedule(); // A will preempt guaranteed-allocated. verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appA))); }
@Override public boolean retainAll(Collection<?> objects) { if (objects != null) { Set<CharSequenceWrapper> toRetain = objects.stream() .filter(CharSequence.class::isInstance) .map(CharSequence.class::cast) .map(CharSequenceWrapper::wrap) .collect(Collectors.toSet()); return Iterables.retainAll(wrapperSet, toRetain); } return false; }
@Test public void testRetainAll() { CharSequenceSet set = CharSequenceSet.of(ImmutableList.of("123", "456")); assertThat(set.retainAll(ImmutableList.of("456", "789", 123))) .overridingErrorMessage("Set should be changed") .isTrue(); assertThat(set).hasSize(1).contains("456"); set = CharSequenceSet.of(ImmutableList.of("123", "456")); assertThat(set.retainAll(ImmutableList.of("123", "456"))) .overridingErrorMessage("Set should not be changed") .isFalse(); assertThat(set.retainAll(ImmutableList.of(123, 456))) .overridingErrorMessage("Set should be changed") .isTrue(); assertThat(set).isEmpty(); }
public static Set<Class<?>> getAllowClassType() { return Collections.unmodifiableSet(ALLOW_CLAZZ_SET); }
@Test public void getAllowClassType() { Assertions.assertTrue(SerializerSecurityRegistry.getAllowClassType().contains(Long.class)); Assertions.assertTrue(SerializerSecurityRegistry.getAllowClassType().contains(Integer.class)); Assertions.assertTrue(SerializerSecurityRegistry.getAllowClassType().contains(HeartbeatMessage.class)); Assertions.assertTrue(SerializerSecurityRegistry.getAllowClassType().contains(BranchCommitRequest.class)); Assertions.assertTrue(SerializerSecurityRegistry.getAllowClassType().contains(BranchCommitResponse.class)); Assertions.assertFalse(SerializerSecurityRegistry.getAllowClassType().contains(AbstractBranchEndRequest.class)); Assertions.assertFalse(SerializerSecurityRegistry.getAllowClassType().contains(Version.class)); }
public static LookupResult single(final CharSequence singleValue) { return multi(singleValue, Collections.singletonMap(SINGLE_VALUE_KEY, singleValue)); }
@Test public void serializeSingleBoolean() { final LookupResult lookupResult = LookupResult.single(true); final JsonNode node = objectMapper.convertValue(lookupResult, JsonNode.class); assertThat(node.isNull()).isFalse(); assertThat(node.path("single_value").asBoolean()).isTrue(); assertThat(node.path("multi_value").path("value").asBoolean()).isTrue(); assertThat(node.path("ttl").asLong()).isEqualTo(Long.MAX_VALUE); }
public static List<Path> pluginUrls(Path topPath) throws IOException { boolean containsClassFiles = false; Set<Path> archives = new TreeSet<>(); LinkedList<DirectoryEntry> dfs = new LinkedList<>(); Set<Path> visited = new HashSet<>(); if (isArchive(topPath)) { return Collections.singletonList(topPath); } DirectoryStream<Path> topListing = Files.newDirectoryStream( topPath, PLUGIN_PATH_FILTER ); dfs.push(new DirectoryEntry(topListing)); visited.add(topPath); try { while (!dfs.isEmpty()) { Iterator<Path> neighbors = dfs.peek().iterator; if (!neighbors.hasNext()) { dfs.pop().stream.close(); continue; } Path adjacent = neighbors.next(); if (Files.isSymbolicLink(adjacent)) { try { Path symlink = Files.readSymbolicLink(adjacent); // if symlink is absolute resolve() returns the absolute symlink itself Path parent = adjacent.getParent(); if (parent == null) { continue; } Path absolute = parent.resolve(symlink).toRealPath(); if (Files.exists(absolute)) { adjacent = absolute; } else { continue; } } catch (IOException e) { // See https://issues.apache.org/jira/browse/KAFKA-6288 for a reported // failure. Such a failure at this stage is not easily reproducible and // therefore an exception is caught and ignored after issuing a // warning. This allows class scanning to continue for non-broken plugins. log.warn( "Resolving symbolic link '{}' failed. Ignoring this path.", adjacent, e ); continue; } } if (!visited.contains(adjacent)) { visited.add(adjacent); if (isArchive(adjacent)) { archives.add(adjacent); } else if (isClassFile(adjacent)) { containsClassFiles = true; } else { DirectoryStream<Path> listing = Files.newDirectoryStream( adjacent, PLUGIN_PATH_FILTER ); dfs.push(new DirectoryEntry(listing)); } } } } finally { while (!dfs.isEmpty()) { dfs.pop().stream.close(); } } if (containsClassFiles) { if (archives.isEmpty()) { return Collections.singletonList(topPath); } log.warn("Plugin path contains both java archives and class files. Returning only the" + " archives"); } return Arrays.asList(archives.toArray(new Path[0])); }
@Test public void testPluginUrlsWithAbsoluteSymlink() throws Exception { createBasicDirectoryLayout(); Path anotherPath = rootDir.resolve("moreplugins"); Files.createDirectories(anotherPath); anotherPath = anotherPath.toRealPath(); Files.createDirectories(anotherPath.resolve("connectorB-deps")); Files.createSymbolicLink( pluginPath.resolve("connectorB/deps/symlink"), anotherPath.resolve("connectorB-deps") ); List<Path> expectedUrls = createBasicExpectedUrls(); expectedUrls.add(Files.createFile(anotherPath.resolve("connectorB-deps/converter.jar"))); assertUrls(expectedUrls, PluginUtils.pluginUrls(pluginPath)); }
public static void unitize(double[] array) { unitize2(array); }
@Test public void testUnitize() { System.out.println("unitize"); double[] data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0}; MathEx.unitize(data); assertEquals(1, MathEx.norm(data), 1E-7); }
public static String evaluate(final co.elastic.logstash.api.Event event, final String template) throws JsonProcessingException { if (event instanceof Event) { return evaluate((Event) event, template); } else { throw new IllegalStateException("Unknown event concrete class: " + event.getClass().getName()); } }
@Test public void TestStringIsOneDateTag() throws IOException { Event event = getTestEvent(); String path = "%{+YYYY}"; assertEquals("2015", StringInterpolation.evaluate(event, path)); }
@Override public long get() { return complete(asyncCounter.get()); }
@Test(expected = StorageException.class) public void testExecutionError() { AtomicCounterWithErrors atomicCounter = new AtomicCounterWithErrors(); atomicCounter.setErrorState(TestingCompletableFutures.ErrorState.EXECUTION_EXCEPTION); DefaultAtomicCounter counter = new DefaultAtomicCounter(atomicCounter, 1000); counter.get(); }
@Override public void accept(MetadataShellState state) { String fullGlob = glob.startsWith("/") ? glob : state.workingDirectory() + "/" + glob; List<String> globComponents = CommandUtils.stripDotPathComponents(CommandUtils.splitPath(fullGlob)); MetadataNode root = state.root(); if (root == null) { throw new RuntimeException("Invalid null root"); } if (!accept(globComponents, 0, root, new String[0])) { handler.accept(Optional.empty()); } }
@Test public void testDotDot() { InfoConsumer consumer = new InfoConsumer(); GlobVisitor visitor = new GlobVisitor("..", consumer); visitor.accept(DATA); assertEquals(Optional.of(Collections.singletonList( new MetadataNodeInfo(new String[0], DATA.root()))), consumer.infos); }
public static String getTypeName(final int type) { switch (type) { case START_EVENT_V3: return "Start_v3"; case STOP_EVENT: return "Stop"; case QUERY_EVENT: return "Query"; case ROTATE_EVENT: return "Rotate"; case INTVAR_EVENT: return "Intvar"; case LOAD_EVENT: return "Load"; case NEW_LOAD_EVENT: return "New_load"; case SLAVE_EVENT: return "Slave"; case CREATE_FILE_EVENT: return "Create_file"; case APPEND_BLOCK_EVENT: return "Append_block"; case DELETE_FILE_EVENT: return "Delete_file"; case EXEC_LOAD_EVENT: return "Exec_load"; case RAND_EVENT: return "RAND"; case XID_EVENT: return "Xid"; case USER_VAR_EVENT: return "User var"; case FORMAT_DESCRIPTION_EVENT: return "Format_desc"; case TABLE_MAP_EVENT: return "Table_map"; case PRE_GA_WRITE_ROWS_EVENT: return "Write_rows_event_old"; case PRE_GA_UPDATE_ROWS_EVENT: return "Update_rows_event_old"; case PRE_GA_DELETE_ROWS_EVENT: return "Delete_rows_event_old"; case WRITE_ROWS_EVENT_V1: return "Write_rows_v1"; case UPDATE_ROWS_EVENT_V1: return "Update_rows_v1"; case DELETE_ROWS_EVENT_V1: return "Delete_rows_v1"; case BEGIN_LOAD_QUERY_EVENT: return "Begin_load_query"; case EXECUTE_LOAD_QUERY_EVENT: return "Execute_load_query"; case INCIDENT_EVENT: return "Incident"; case HEARTBEAT_LOG_EVENT: case HEARTBEAT_LOG_EVENT_V2: return "Heartbeat"; case IGNORABLE_LOG_EVENT: return "Ignorable"; case ROWS_QUERY_LOG_EVENT: return "Rows_query"; case WRITE_ROWS_EVENT: return "Write_rows"; case UPDATE_ROWS_EVENT: return "Update_rows"; case DELETE_ROWS_EVENT: return "Delete_rows"; case GTID_LOG_EVENT: return "Gtid"; case ANONYMOUS_GTID_LOG_EVENT: return "Anonymous_Gtid"; case PREVIOUS_GTIDS_LOG_EVENT: return "Previous_gtids"; case PARTIAL_UPDATE_ROWS_EVENT: return "Update_rows_partial"; case TRANSACTION_CONTEXT_EVENT : return "Transaction_context"; case VIEW_CHANGE_EVENT : return "view_change"; case XA_PREPARE_LOG_EVENT : return "Xa_prepare"; case TRANSACTION_PAYLOAD_EVENT : return "transaction_payload"; default: return "Unknown type:" + type; } }
@Test public void getTypeNameInputPositiveOutputNotNull33() { // Arrange final int type = 14; // Act final String actual = LogEvent.getTypeName(type); // Assert result Assert.assertEquals("User var", actual); }
public List<MavenArtifact> searchSha1(String sha1) throws IOException, TooManyRequestsException { if (null == sha1 || !sha1.matches("^[0-9A-Fa-f]{40}$")) { throw new IllegalArgumentException("Invalid SHA1 format"); } if (cache != null) { final List<MavenArtifact> cached = cache.get(sha1); if (cached != null) { LOGGER.debug("cache hit for Central: " + sha1); if (cached.isEmpty()) { throw new FileNotFoundException("Artifact not found in Central"); } return cached; } } final List<MavenArtifact> result = new ArrayList<>(); final URL url = new URL(String.format(query, rootURL, sha1)); LOGGER.trace("Searching Central url {}", url); // Determine if we need to use a proxy. The rules: // 1) If the proxy is set, AND the setting is set to true, use the proxy // 2) Otherwise, don't use the proxy (either the proxy isn't configured, // or proxy is specifically set to false) final URLConnectionFactory factory = new URLConnectionFactory(settings); final HttpURLConnection conn = factory.createHttpURLConnection(url, useProxy); conn.setDoOutput(true); // JSON would be more elegant, but there's not currently a dependency // on JSON, so don't want to add one just for this conn.addRequestProperty("Accept", "application/xml"); conn.connect(); if (conn.getResponseCode() == 200) { boolean missing = false; try { final DocumentBuilder builder = XmlUtils.buildSecureDocumentBuilder(); final Document doc = builder.parse(conn.getInputStream()); final XPath xpath = XPathFactory.newInstance().newXPath(); final String numFound = xpath.evaluate("/response/result/@numFound", doc); if ("0".equals(numFound)) { missing = true; } else { final NodeList docs = (NodeList) xpath.evaluate("/response/result/doc", doc, XPathConstants.NODESET); for (int i = 0; i < docs.getLength(); i++) { final String g = xpath.evaluate("./str[@name='g']", docs.item(i)); LOGGER.trace("GroupId: {}", g); final String a = xpath.evaluate("./str[@name='a']", docs.item(i)); LOGGER.trace("ArtifactId: {}", a); final String v = xpath.evaluate("./str[@name='v']", docs.item(i)); final NodeList attributes = (NodeList) xpath.evaluate("./arr[@name='ec']/str", docs.item(i), XPathConstants.NODESET); boolean pomAvailable = false; boolean jarAvailable = false; for (int x = 0; x < attributes.getLength(); x++) { final String tmp = xpath.evaluate(".", attributes.item(x)); if (".pom".equals(tmp)) { pomAvailable = true; } else if (".jar".equals(tmp)) { jarAvailable = true; } } final String centralContentUrl = settings.getString(Settings.KEYS.CENTRAL_CONTENT_URL); String artifactUrl = null; String pomUrl = null; if (jarAvailable) { //org/springframework/spring-core/3.2.0.RELEASE/spring-core-3.2.0.RELEASE.pom artifactUrl = centralContentUrl + g.replace('.', '/') + '/' + a + '/' + v + '/' + a + '-' + v + ".jar"; } if (pomAvailable) { //org/springframework/spring-core/3.2.0.RELEASE/spring-core-3.2.0.RELEASE.pom pomUrl = centralContentUrl + g.replace('.', '/') + '/' + a + '/' + v + '/' + a + '-' + v + ".pom"; } result.add(new MavenArtifact(g, a, v, artifactUrl, pomUrl)); } } } catch (ParserConfigurationException | IOException | SAXException | XPathExpressionException e) { // Anything else is jacked up XML stuff that we really can't recover from well final String errorMessage = "Failed to parse MavenCentral XML Response: " + e.getMessage(); throw new IOException(errorMessage, e); } if (missing) { if (cache != null) { cache.put(sha1, result); } throw new FileNotFoundException("Artifact not found in Central"); } } else if (conn.getResponseCode() == 429) { final String errorMessage = "Too many requests sent to MavenCentral; additional requests are being rejected."; throw new TooManyRequestsException(errorMessage); } else { final String errorMessage = "Could not connect to MavenCentral (" + conn.getResponseCode() + "): " + conn.getResponseMessage(); throw new IOException(errorMessage); } if (cache != null) { cache.put(sha1, result); } return result; }
@Test public void testMultipleReturns() throws Exception { try { List<MavenArtifact> ma = searcher.searchSha1("94A9CE681A42D0352B3AD22659F67835E560D107"); assertTrue(ma.size() > 1); } catch (IOException ex) { //we hit a failure state on the CI Assume.assumeFalse(StringUtils.contains(ex.getMessage(), "Could not connect to MavenCentral")); throw ex; } }
public void putUserProperty(final String name, final String value) { if (MessageConst.STRING_HASH_SET.contains(name)) { throw new RuntimeException(String.format( "The Property<%s> is used by system, input another please", name)); } if (value == null || value.trim().isEmpty() || name == null || name.trim().isEmpty()) { throw new IllegalArgumentException( "The name or value of property can not be null or blank string!" ); } this.putProperty(name, value); }
@Test(expected = IllegalArgumentException.class) public void putUserEmptyValuePropertyWithException() throws Exception { Message m = new Message(); m.putUserProperty("prop1", " "); }
@Override protected List<String> getArgs() { TableMetadata tableMetadata = getMetadata(); RetentionConfig config = tablesClient.getTableRetention(tableMetadata).get(); String columnName = config.getColumnName(); List<String> jobArgs = Stream.of( "--tableName", tableMetadata.fqtn(), "--columnName", columnName, "--granularity", config.getGranularity().getValue(), "--count", Integer.toString(config.getCount())) .collect(Collectors.toList()); if (!StringUtils.isBlank(config.getColumnPattern())) { jobArgs.add("--columnPattern"); jobArgs.add(config.getColumnPattern()); } return jobArgs; }
@Test void testRetentionJobArgsForTableWithoutPattern() { TableRetentionTask tableRetentionTask = new TableRetentionTask(jobsClient, tablesClient, tableMetadata); String columnPattern = ""; String columnName = "testColumnName"; int count = 1; Retention.GranularityEnum retentionGranularity = Retention.GranularityEnum.DAY; RetentionConfig retentionConfigMock = Mockito.mock(RetentionConfig.class); Mockito.when(retentionConfigMock.getColumnPattern()).thenReturn(columnPattern); Mockito.when(retentionConfigMock.getColumnName()).thenReturn(columnName); Mockito.when(retentionConfigMock.getGranularity()).thenReturn(retentionGranularity); Mockito.when(retentionConfigMock.getCount()).thenReturn(count); Mockito.when(tablesClient.getTableRetention(tableMetadata)) .thenReturn(Optional.of(retentionConfigMock)); List<String> expectedArgs = Stream.of( "--tableName", tableMetadata.fqtn(), "--columnName", columnName, "--granularity", retentionGranularity.getValue(), "--count", String.valueOf(count)) .collect(Collectors.toList()); Assertions.assertEquals(expectedArgs, tableRetentionTask.getArgs()); }
public boolean run() throws ExecutionException, InterruptedException, ScanningWorkflowException, IOException { String logId = mainCliOptions.getLogId(); // TODO(b/171405612): Find a way to print the log ID at every log line. logger.atInfo().log("%sTsunamiCli starting...", logId); ImmutableList<Process> languageServerProcesses = remoteServerLoader.runServerProcesses(); ScanResults scanResults = scanningWorkflow.run(buildScanTarget()); languageServerProcesses.forEach(Process::destroy); logger.atInfo().log("Tsunami scan finished, saving results."); saveResults(scanResults); if (hasSuccessfulResults(scanResults)) { logger.atInfo().log("TsunamiCli finished..."); return true; } else { logger.atInfo().log( "Tsunami scan has failed status, message = %s.", scanResults.getStatusMessage()); return false; } }
@Test public void run_whenScanFailed_generatesFailedScanResults() throws InterruptedException, ExecutionException, IOException { try (ScanResult scanResult = new ClassGraph().enableAllInfo().scan()) { Guice.createInjector( new AbstractModule() { @Override protected void configure() { bind(ScanResultsArchiver.class).toInstance(scanResultsArchiver); install( new ConfigModule(scanResult, TsunamiConfig.fromYamlData(ImmutableMap.of()))); install( new CliOptionsModule( scanResult, "TsunamiCliTest", new String[] { "--ip-v4-target=" + IP_TARGET, "--hostname-target=" + HOSTNAME_TARGET })); install(new FakeUtcClockModule()); install(new FakePluginExecutionModule()); install(new FakePortScannerBootstrapModule()); install(new FailedVulnDetectorBootstrapModule()); install(new RemoteServerLoaderModule(ImmutableList.of())); } }) .injectMembers(this); boolean scanSucceeded = tsunamiCli.run(); assertThat(scanSucceeded).isFalse(); verify(scanResultsArchiver, times(1)).archive(scanResultsCaptor.capture()); ScanResults storedScanResult = scanResultsCaptor.getValue(); assertThat(storedScanResult.getScanStatus()).isEqualTo(ScanStatus.FAILED); assertThat(storedScanResult.getStatusMessage()).isEqualTo("All VulnDetectors failed."); } }
public String getAuthority() { return authority; }
@Test void getAuthority() { OriginName trusted = OriginName.fromVipAndApp("woodly-doodly", "westerndigital"); assertEquals("westerndigital", trusted.getAuthority()); }
public Node parse() throws ScanException { return E(); }
@Test public void testBasic() throws Exception { Parser<Object> p = new Parser("hello"); Node t = p.parse(); assertEquals(Node.LITERAL, t.getType()); assertEquals("hello", t.getValue()); }
public String[] getS3ObjectsNames( String bucketName ) throws Exception { Bucket bucket = getBucket( bucketName ); if ( bucket == null ) { throw new Exception( Messages.getString( "S3DefaultService.Exception.UnableToFindBucket.Message", bucketName ) ); } return getS3Objects( bucket ).getObjectSummaries().stream().map( b -> b.getKey() ).toArray( String[]::new ); }
@Test public void testGetObjectsNamesInEmptyBucket() throws Exception { String[] actual = provider.getS3ObjectsNames( BUCKET3_NAME ); logArray( actual ); assertEquals( 0, actual.length ); }
@Override public RangePartition clone() throws CloneNotSupportedException { return (RangePartition)super.clone(); }
@Test void requireThatCloneIsImplemented() throws CloneNotSupportedException { RangePartition node1 = new RangePartition("foo=300-399"); RangePartition node2 = node1.clone(); assertEquals(node1, node2); assertNotSame(node1, node2); }
@Nullable public static Result parse(String url) { return parse(url, true); }
@Test public void testParse() { GalleryDetailUrlParser.Result result = GalleryDetailUrlParser.parse(url, strict); if (isNull) { assertNull(result); } else { assertEquals(gid, result.gid); assertEquals(token, result.token); } }
private List<Column> getFileSchema() throws DdlException { if (fileStatuses.isEmpty()) { return Lists.newArrayList(); } TNetworkAddress address; List<Long> nodeIds = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getBackendIds(true); if (RunMode.isSharedDataMode()) { nodeIds.addAll(GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getComputeNodeIds(true)); } if (nodeIds.isEmpty()) { if (RunMode.isSharedNothingMode()) { throw new DdlException("Failed to send proxy request. No alive backends"); } else { throw new DdlException("Failed to send proxy request. No alive backends or compute nodes"); } } Collections.shuffle(nodeIds); ComputeNode node = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getBackendOrComputeNode(nodeIds.get(0)); address = new TNetworkAddress(node.getHost(), node.getBrpcPort()); PGetFileSchemaResult result; try { PGetFileSchemaRequest request = getGetFileSchemaRequest(fileStatuses); Future<PGetFileSchemaResult> future = BackendServiceClient.getInstance().getFileSchema(address, request); result = future.get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new DdlException("failed to get file schema", e); } catch (Exception e) { throw new DdlException("failed to get file schema: " + e.getMessage()); } if (TStatusCode.findByValue(result.status.statusCode) != TStatusCode.OK) { throw new DdlException("failed to get file schema, path: " + path + ", error: " + result.status.errorMsgs); } List<Column> columns = new ArrayList<>(); for (PSlotDescriptor slot : result.schema) { columns.add(new Column(slot.colName, Type.fromProtobuf(slot.slotType), true)); } return columns; }
@Test public void testGetFileSchema(@Mocked GlobalStateMgr globalStateMgr, @Mocked SystemInfoService systemInfoService) throws Exception { new Expectations() { { globalStateMgr.getCurrentState().getNodeMgr().getClusterInfo(); result = systemInfoService; minTimes = 0; systemInfoService.getBackendIds(anyBoolean); result = new ArrayList<>(); minTimes = 0; } }; TableFunctionTable t = new TableFunctionTable(newProperties()); Method method = TableFunctionTable.class.getDeclaredMethod("getFileSchema", null); method.setAccessible(true); try { method.invoke(t, null); } catch (Exception e) { Assert.assertTrue(e.getCause().getMessage().contains("Failed to send proxy request. No alive backends")); } new MockUp<RunMode>() { @Mock public RunMode getCurrentRunMode() { return RunMode.SHARED_DATA; } }; try { method.invoke(t, null); } catch (Exception e) { Assert.assertTrue(e.getCause().getMessage(). contains("Failed to send proxy request. No alive backends or compute nodes")); } Backend backend = new Backend(1L, "192.168.1.1", 9050); backend.setBrpcPort(8050); List<Long> nodeList = new ArrayList<>(); nodeList.add(1L); new Expectations() { { systemInfoService.getBackendIds(anyBoolean); result = nodeList; minTimes = 0; systemInfoService.getComputeNodeIds(anyBoolean); result = new ArrayList<>(); minTimes = 0; systemInfoService.getBackendOrComputeNode(anyLong); result = backend; minTimes = 0; } }; try { method.invoke(t, null); } catch (Exception e) { Assert.assertFalse(false); } }
@Override public synchronized int available() throws IOException { checkNotClosed(); if (finished) { return 0; } long available = Math.max(file.size() - pos, 0); return Ints.saturatedCast(available); }
@Test public void testAvailable() throws IOException { JimfsInputStream in = newInputStream(1, 2, 3, 4, 5, 6, 7, 8); assertThat(in.available()).isEqualTo(8); assertThat(in.read()).isEqualTo(1); assertThat(in.available()).isEqualTo(7); assertThat(in.read(new byte[3])).isEqualTo(3); assertThat(in.available()).isEqualTo(4); assertThat(in.read(new byte[10], 1, 2)).isEqualTo(2); assertThat(in.available()).isEqualTo(2); assertThat(in.read(new byte[10])).isEqualTo(2); assertThat(in.available()).isEqualTo(0); }
@Override public Set<Algorithm> getKeys(final Path file, final LoginCallback prompt) throws BackgroundException { final Path container = containerService.getContainer(file); final Set<Algorithm> keys = super.getKeys(container, prompt); if(container.isRoot()) { return keys; } try { final AWSKMS client = this.client(container); try { final Map<String, String> aliases = new HashMap<>(); for(AliasListEntry entry : client.listAliases().getAliases()) { aliases.put(entry.getTargetKeyId(), entry.getAliasName()); } for(KeyListEntry entry : client.listKeys().getKeys()) { keys.add(new AliasedAlgorithm(entry, aliases.get(entry.getKeyId()))); } } catch(AmazonClientException e) { throw new AmazonServiceExceptionMappingService().map("Cannot read AWS KMS configuration", e); } finally { client.shutdown(); } } catch(AccessDeniedException e) { log.warn(String.format("Ignore failure reading keys from KMS. %s", e.getMessage())); keys.add(SSE_KMS_DEFAULT); } return keys; }
@Test public void testGetKeys_ap_southeast_2() throws Exception { final KMSEncryptionFeature kms = new KMSEncryptionFeature(session, new S3LocationFeature(session), new S3AccessControlListFeature(session), new DisabledX509TrustManager(), new DefaultX509KeyManager()); final Set<Encryption.Algorithm> keys = kms.getKeys(new Path("test-ap-southeast-2-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory)), new DisabledLoginCallback()); assertTrue(keys.contains(Encryption.Algorithm.NONE)); assertTrue(keys.contains(S3EncryptionFeature.SSE_AES256)); assertEquals(2, keys.size()); }
String getProviderName() { return configuration.get(PROVIDER_NAME).orElseThrow(() -> new IllegalArgumentException("Provider Name is missing")); }
@Test public void return_provider_name() { settings.setProperty("sonar.auth.saml.providerName", "MyProviderName"); assertThat(underTest.getProviderName()).isEqualTo("MyProviderName"); }
@Override public void postProcessBeforeDestruction(Object bean, String beanName) throws BeansException { try { // note: Spring will call close() on AutoCloseable beans. if (bean instanceof Startable startable) { startable.stop(); } } catch (Exception e) { LoggerFactory.getLogger(StartableBeanPostProcessor.class) .warn("Dispose of component {} failed", bean.getClass().getCanonicalName(), e); } }
@Test public void stops_api_startable() { Startable startable = mock(Startable.class); underTest.postProcessBeforeDestruction(startable, "startable"); verify(startable).stop(); verifyNoMoreInteractions(startable); }
@Override public String evaluate(EvaluationContext evaluationContext, String... args) { SimpleDateFormat dateFormat = null; if (args != null) { switch (args.length) { case 1: dateFormat = new SimpleDateFormat(args[0]); return dateFormat.format(Calendar.getInstance().getTime()); case 2: Calendar now = Calendar.getInstance(); // 2nd argument is a delta with unit. String unit = args[1].substring(args[1].length() - 1); String amountStr = args[1].substring(0, args[1].length() - 1); if (isInteger(amountStr)) { int amount = Integer.parseInt(amountStr); switch (unit) { case "m": now.add(Calendar.MINUTE, amount); break; case "H": now.add(Calendar.HOUR, amount); break; case "d": now.add(Calendar.DATE, amount); break; case "M": now.add(Calendar.MONTH, amount); break; case "y": now.add(Calendar.YEAR, amount); break; default: break; } } dateFormat = new SimpleDateFormat(args[0]); return dateFormat.format(now.getTime()); default: return String.valueOf(System.currentTimeMillis()); } } return String.valueOf(System.currentTimeMillis()); }
@Test void testPatternEvaluation() { Calendar currentDate = Calendar.getInstance(); // Compute evaluation. NowELFunction function = new NowELFunction(); String result = function.evaluate(null, "dd/MM/yyyy HH:mm:ss"); // Assert formatting. int day = currentDate.get(Calendar.DAY_OF_MONTH); int month = currentDate.get(Calendar.MONTH); int year = currentDate.get(Calendar.YEAR); String dateString = (day < 10 ? "0" + day : day) + "/" + (++month < 10 ? "0" + month : month) + "/" + year; assertTrue(result.startsWith(dateString)); }
public List<Counter> getRangeCountersToBeDisplayed(Range range) throws IOException { final List<Counter> result = new ArrayList<>(getRangeCounters(range)); result.removeIf(counter -> !counter.isDisplayed() || counter.isJobCounter()); return Collections.unmodifiableList(result); }
@Test public void testGetRangeCountersToBeDisplayed() throws IOException { final Counter counter = createCounter(); final Collector collector = new Collector(TEST, Collections.singletonList(counter)); if (collector.getCounters().isEmpty()) { fail("getCounters"); } final JavaInformations javaInformations = new JavaInformations(null, true); final List<JavaInformations> javaInformationsList = Collections .singletonList(javaInformations); counter.addRequest("test1", 0, 0, 0, false, 1000); collector.collectWithoutErrors(javaInformationsList); counter.addRequest("test1", 0, 0, 0, false, 1000); collector.collectWithoutErrors(javaInformationsList); collector.collectWithoutErrors(javaInformationsList); assertEquals("jour", 1, getSizeOfCountersToBeDisplayed(collector, Period.JOUR)); assertEquals("semaine", 1, getSizeOfCountersToBeDisplayed(collector, Period.SEMAINE)); assertEquals("mois", 1, getSizeOfCountersToBeDisplayed(collector, Period.MOIS)); assertEquals("année", 1, getSizeOfCountersToBeDisplayed(collector, Period.ANNEE)); assertEquals("tout", 1, getSizeOfCountersToBeDisplayed(collector, Period.TOUT)); assertEquals("custom", 1, collector .getRangeCountersToBeDisplayed(Range.createCustomRange(new Date(), new Date())) .size()); }
@GetMapping(value = "/{appId}/{clusterName}/{namespace:.+}") public ApolloConfig queryConfig(@PathVariable String appId, @PathVariable String clusterName, @PathVariable String namespace, @RequestParam(value = "dataCenter", required = false) String dataCenter, @RequestParam(value = "releaseKey", defaultValue = "-1") String clientSideReleaseKey, @RequestParam(value = "ip", required = false) String clientIp, @RequestParam(value = "label", required = false) String clientLabel, @RequestParam(value = "messages", required = false) String messagesAsString, HttpServletRequest request, HttpServletResponse response) throws IOException { String originalNamespace = namespace; //strip out .properties suffix namespace = namespaceUtil.filterNamespaceName(namespace); //fix the character case issue, such as FX.apollo <-> fx.apollo namespace = namespaceUtil.normalizeNamespace(appId, namespace); if (Strings.isNullOrEmpty(clientIp)) { clientIp = WebUtils.tryToGetClientIp(request); } ApolloNotificationMessages clientMessages = transformMessages(messagesAsString); List<Release> releases = Lists.newLinkedList(); String appClusterNameLoaded = clusterName; if (!ConfigConsts.NO_APPID_PLACEHOLDER.equalsIgnoreCase(appId)) { Release currentAppRelease = configService.loadConfig(appId, clientIp, clientLabel, appId, clusterName, namespace, dataCenter, clientMessages); if (currentAppRelease != null) { releases.add(currentAppRelease); //we have cluster search process, so the cluster name might be overridden appClusterNameLoaded = currentAppRelease.getClusterName(); } } //if namespace does not belong to this appId, should check if there is a public configuration if (!namespaceBelongsToAppId(appId, namespace)) { Release publicRelease = this.findPublicConfig(appId, clientIp, clientLabel, clusterName, namespace, dataCenter, clientMessages); if (Objects.nonNull(publicRelease)) { releases.add(publicRelease); } } if (releases.isEmpty()) { response.sendError(HttpServletResponse.SC_NOT_FOUND, String.format( "Could not load configurations with appId: %s, clusterName: %s, namespace: %s", appId, clusterName, originalNamespace)); Tracer.logEvent("Apollo.Config.NotFound", assembleKey(appId, clusterName, originalNamespace, dataCenter)); return null; } auditReleases(appId, clusterName, dataCenter, clientIp, releases); String mergedReleaseKey = releases.stream().map(Release::getReleaseKey) .collect(Collectors.joining(ConfigConsts.CLUSTER_NAMESPACE_SEPARATOR)); if (mergedReleaseKey.equals(clientSideReleaseKey)) { // Client side configuration is the same with server side, return 304 response.setStatus(HttpServletResponse.SC_NOT_MODIFIED); Tracer.logEvent("Apollo.Config.NotModified", assembleKey(appId, appClusterNameLoaded, originalNamespace, dataCenter)); return null; } ApolloConfig apolloConfig = new ApolloConfig(appId, appClusterNameLoaded, originalNamespace, mergedReleaseKey); apolloConfig.setConfigurations(mergeReleaseConfigurations(releases)); Tracer.logEvent("Apollo.Config.Found", assembleKey(appId, appClusterNameLoaded, originalNamespace, dataCenter)); return apolloConfig; }
@Test public void testQueryConfigForNoAppIdPlaceHolderWithPublicNamespace() throws Exception { String someClientSideReleaseKey = "1"; String someServerSideReleaseKey = "2"; HttpServletResponse someResponse = mock(HttpServletResponse.class); String somePublicAppId = "somePublicAppId"; AppNamespace somePublicAppNamespace = assemblePublicAppNamespace(somePublicAppId, somePublicNamespaceName); String appId = ConfigConsts.NO_APPID_PLACEHOLDER; when(appNamespaceService.findPublicNamespaceByName(somePublicNamespaceName)) .thenReturn(somePublicAppNamespace); when(configService.loadConfig(appId, someClientIp, someClientLabel, somePublicAppId, someClusterName, somePublicNamespaceName, someDataCenter, someNotificationMessages)).thenReturn(somePublicRelease); when(somePublicRelease.getReleaseKey()).thenReturn(someServerSideReleaseKey); when(namespaceUtil.normalizeNamespace(appId, somePublicNamespaceName)).thenReturn(somePublicNamespaceName); ApolloConfig result = configController.queryConfig(appId, someClusterName, somePublicNamespaceName, someDataCenter, someClientSideReleaseKey, someClientIp, someClientLabel, someMessagesAsString, someRequest, someResponse); verify(configService, never()).loadConfig(appId, someClientIp, someClientLabel, appId, someClusterName, somePublicNamespaceName, someDataCenter, someNotificationMessages); assertEquals(someServerSideReleaseKey, result.getReleaseKey()); assertEquals(appId, result.getAppId()); assertEquals(someClusterName, result.getCluster()); assertEquals(somePublicNamespaceName, result.getNamespaceName()); assertEquals("foo", result.getConfigurations().get("apollo.public.bar")); }
@Override public boolean next() throws SQLException { if (skipAll) { return false; } if (!paginationContext.getActualRowCount().isPresent()) { return getMergedResult().next(); } return rowNumber++ < paginationContext.getActualRowCount().get() && getMergedResult().next(); }
@Test void assertNextForSkipAll() throws SQLException { OracleSelectStatement selectStatement = new OracleSelectStatement(); selectStatement.setProjections(new ProjectionsSegment(0, 0)); WhereSegment whereSegment = mock(WhereSegment.class); BinaryOperationExpression binaryOperationExpression = mock(BinaryOperationExpression.class); when(binaryOperationExpression.getLeft()).thenReturn(new ColumnSegment(0, 0, new IdentifierValue("row_id"))); when(binaryOperationExpression.getRight()).thenReturn(new LiteralExpressionSegment(0, 0, Integer.MAX_VALUE)); when(binaryOperationExpression.getOperator()).thenReturn(">="); when(whereSegment.getExpr()).thenReturn(binaryOperationExpression); SubqueryTableSegment subqueryTableSegment = mock(SubqueryTableSegment.class); SubquerySegment subquerySegment = mock(SubquerySegment.class); SelectStatement subSelectStatement = mock(MySQLSelectStatement.class); ProjectionsSegment subProjectionsSegment = mock(ProjectionsSegment.class); TopProjectionSegment topProjectionSegment = mock(TopProjectionSegment.class); when(topProjectionSegment.getAlias()).thenReturn("row_id"); when(subProjectionsSegment.getProjections()).thenReturn(Collections.singletonList(topProjectionSegment)); when(subSelectStatement.getProjections()).thenReturn(subProjectionsSegment); when(subquerySegment.getSelect()).thenReturn(subSelectStatement); when(subqueryTableSegment.getSubquery()).thenReturn(subquerySegment); selectStatement.setFrom(subqueryTableSegment); selectStatement.setWhere(whereSegment); ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "Oracle")); ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); SelectStatementContext selectStatementContext = new SelectStatementContext(createShardingSphereMetaData(database), null, selectStatement, DefaultDatabase.LOGIC_NAME, Collections.emptyList()); when(database.getName()).thenReturn(DefaultDatabase.LOGIC_NAME); MergedResult actual = resultMerger.merge(Arrays.asList(mockQueryResult(), mockQueryResult(), mockQueryResult(), mockQueryResult()), selectStatementContext, database, mock(ConnectionContext.class)); assertFalse(actual.next()); }
public synchronized ImmutableList<Struct> readTableRecords(String tableId, String... columnNames) throws IllegalStateException { return readTableRecords(tableId, ImmutableList.copyOf(columnNames)); }
@Test public void testReadRecordsShouldWorkWhenSpannerReadSucceeds() throws ExecutionException, InterruptedException { // arrange prepareTable(); when(resultSet.next()).thenReturn(true).thenReturn(true).thenReturn(false); Struct struct1 = Struct.newBuilder() .set("SingerId") .to(int64(1)) .set("FirstName") .to(string("Marc")) .set("LastName") .to(string("Richards")) .build(); Struct struct2 = Struct.newBuilder() .set("SingerId") .to(int64(2)) .set("FirstName") .to(string("Catalina")) .set("LastName") .to(string("Smith")) .build(); when(resultSet.getCurrentRowAsStruct()).thenReturn(struct1).thenReturn(struct2); when(spanner.getDatabaseClient(any()).singleUse().read(any(), any(), any())) .thenReturn(resultSet); // act ImmutableList<Struct> actual = testManager.readTableRecords("Singers", "SingerId", "FirstName", "LastName"); // assert ImmutableList<Struct> expected = ImmutableList.of(struct1, struct2); assertThat(actual).containsExactlyElementsIn(expected); }
boolean visit(ResourceRequest rr) { Priority priority = rr.getPriority(); Resource capability = rr.getCapability(); Map<Resource, TrackerPerPriorityResource> subMap = map.get(priority); if (subMap == null) { subMap = new HashMap<>(); map.put(priority, subMap); } TrackerPerPriorityResource tracker = subMap.get(capability); if (tracker == null) { tracker = new TrackerPerPriorityResource(); subMap.put(capability, tracker); } return tracker.visit(rr.getResourceName()); }
@Test public void testVisitAnyRequestFirst() { VisitedResourceRequestTracker tracker = new VisitedResourceRequestTracker(nodeTracker); // Visit ANY request first assertTrue(FIRST_CALL_FAILURE, tracker.visit(anyRequest)); // All other requests should return false assertFalse(ANY_VISITED + RACK_FAILURE, tracker.visit(rackRequest)); assertFalse(ANY_VISITED + NODE_FAILURE, tracker.visit(node1Request)); assertFalse(ANY_VISITED + NODE_FAILURE, tracker.visit(node2Request)); }
public static NamesrvController start(final NamesrvController controller) throws Exception { if (null == controller) { throw new IllegalArgumentException("NamesrvController is null"); } boolean initResult = controller.initialize(); if (!initResult) { controller.shutdown(); System.exit(-3); } Runtime.getRuntime().addShutdownHook(new ShutdownHookThread(log, (Callable<Void>) () -> { controller.shutdown(); return null; })); controller.start(); return controller; }
@Test public void testStart() throws Exception { NamesrvController controller = NamesrvStartup.start(namesrvController); Assert.assertNotNull(controller); }
static PublicationParams getPublicationParams( final ChannelUri channelUri, final MediaDriver.Context ctx, final DriverConductor driverConductor, final boolean isIpc) { final PublicationParams params = new PublicationParams(ctx, isIpc); params.getEntityTag(channelUri, driverConductor); params.getSessionId(channelUri, driverConductor); params.getTermBufferLength(channelUri); params.getMtuLength(channelUri); params.getLingerTimeoutNs(channelUri); params.getEos(channelUri); params.getSparse(channelUri, ctx); params.getSpiesSimulateConnection(channelUri, ctx); params.getUntetheredWindowLimitTimeout(channelUri, ctx); params.getUntetheredRestingTimeout(channelUri, ctx); params.getMaxResend(channelUri); int count = 0; final String initialTermIdStr = channelUri.get(INITIAL_TERM_ID_PARAM_NAME); count = initialTermIdStr != null ? count + 1 : count; final String termIdStr = channelUri.get(TERM_ID_PARAM_NAME); count = termIdStr != null ? count + 1 : count; final String termOffsetStr = channelUri.get(TERM_OFFSET_PARAM_NAME); count = termOffsetStr != null ? count + 1 : count; if (count > 0) { if (count < 3) { throw new IllegalArgumentException("params must be used as a complete set: " + INITIAL_TERM_ID_PARAM_NAME + " " + TERM_ID_PARAM_NAME + " " + TERM_OFFSET_PARAM_NAME + " channel=" + channelUri); } params.initialTermId = Integer.parseInt(initialTermIdStr); params.termId = Integer.parseInt(termIdStr); params.termOffset = Integer.parseInt(termOffsetStr); if (params.termOffset > params.termLength) { throw new IllegalArgumentException( TERM_OFFSET_PARAM_NAME + "=" + params.termOffset + " > " + TERM_LENGTH_PARAM_NAME + "=" + params.termLength + ": channel=" + channelUri); } if (params.termOffset < 0 || params.termOffset > LogBufferDescriptor.TERM_MAX_LENGTH) { throw new IllegalArgumentException( TERM_OFFSET_PARAM_NAME + "=" + params.termOffset + " out of range: channel=" + channelUri); } if ((params.termOffset & (FrameDescriptor.FRAME_ALIGNMENT - 1)) != 0) { throw new IllegalArgumentException( TERM_OFFSET_PARAM_NAME + "=" + params.termOffset + " must be a multiple of FRAME_ALIGNMENT: channel=" + channelUri); } if (params.termId - params.initialTermId < 0) { throw new IllegalStateException( "difference greater than 2^31 - 1: " + INITIAL_TERM_ID_PARAM_NAME + "=" + params.initialTermId + " when " + TERM_ID_PARAM_NAME + "=" + params.termId + " channel=" + channelUri); } params.hasPosition = true; } params.isResponse = CONTROL_MODE_RESPONSE.equals(channelUri.get(MDC_CONTROL_MODE_PARAM_NAME)); params.responseCorrelationId = Long.parseLong(channelUri.get(RESPONSE_CORRELATION_ID_PARAM_NAME, "-1")); return params; }
@Test void hasMaxRetransmits() { final ChannelUri uri = ChannelUri.parse("aeron:udp?endpoint=localhost:1010|" + CommonContext.MAX_RESEND_PARAM_NAME + "=100"); final PublicationParams params = PublicationParams.getPublicationParams(uri, ctx, conductor, false); assertTrue(params.hasMaxResend); assertEquals(100, params.maxResend); }
public IssuesChangesNotification newIssuesChangesNotification(Set<DefaultIssue> issues, Map<String, UserDto> assigneesByUuid) { AnalysisChange change = new AnalysisChange(analysisMetadataHolder.getAnalysisDate()); Set<ChangedIssue> changedIssues = issues.stream() .map(issue -> new ChangedIssue.Builder(issue.key()) .setAssignee(getAssignee(issue.assignee(), assigneesByUuid)) .setNewStatus(issue.status()) .setNewIssueStatus(issue.status() != null ? IssueStatus.of(issue.status(), issue.resolution()) : null) .setRule(getRuleByRuleKey(issue.ruleKey())) .setProject(getProject()) .build()) .collect(Collectors.toSet()); return issuesChangesSerializer.serialize(new IssuesChangesNotificationBuilder(changedIssues, change)); }
@Test public void newIssuesChangesNotification_fails_with_NPE_if_issue_has_no_key() { RuleKey ruleKey = RuleKey.of("foo", "bar"); DefaultIssue issue = new DefaultIssue() .setRuleKey(ruleKey); Map<String, UserDto> assigneesByUuid = nonEmptyAssigneesByUuid(); ruleRepository.add(ruleKey); treeRootHolder.setRoot(ReportComponent.builder(PROJECT, 1).build()); analysisMetadata.setAnalysisDate(new Random().nextLong()); analysisMetadata.setBranch(mock(Branch.class)); assertThatThrownBy(() -> underTest.newIssuesChangesNotification(ImmutableSet.of(issue), assigneesByUuid)) .isInstanceOf(NullPointerException.class) .hasMessage("key can't be null"); }
@Override public Column convert(BasicTypeDefine typeDefine) { PhysicalColumn.PhysicalColumnBuilder builder = PhysicalColumn.builder() .name(typeDefine.getName()) .sourceType(typeDefine.getColumnType()) .nullable(typeDefine.isNullable()) .defaultValue(typeDefine.getDefaultValue()) .comment(typeDefine.getComment()); String pgDataType = typeDefine.getDataType().toLowerCase(); switch (pgDataType) { case PG_BOOLEAN: builder.dataType(BasicType.BOOLEAN_TYPE); break; case PG_BOOLEAN_ARRAY: builder.dataType(ArrayType.BOOLEAN_ARRAY_TYPE); break; case PG_SMALLSERIAL: case PG_SMALLINT: builder.dataType(BasicType.SHORT_TYPE); break; case PG_SMALLINT_ARRAY: builder.dataType(ArrayType.SHORT_ARRAY_TYPE); break; case PG_INTEGER: case PG_SERIAL: builder.dataType(BasicType.INT_TYPE); break; case PG_INTEGER_ARRAY: builder.dataType(ArrayType.INT_ARRAY_TYPE); break; case PG_BIGINT: case PG_BIGSERIAL: builder.dataType(BasicType.LONG_TYPE); break; case PG_BIGINT_ARRAY: builder.dataType(ArrayType.LONG_ARRAY_TYPE); break; case PG_REAL: builder.dataType(BasicType.FLOAT_TYPE); break; case PG_REAL_ARRAY: builder.dataType(ArrayType.FLOAT_ARRAY_TYPE); break; case PG_DOUBLE_PRECISION: builder.dataType(BasicType.DOUBLE_TYPE); break; case PG_DOUBLE_PRECISION_ARRAY: builder.dataType(ArrayType.DOUBLE_ARRAY_TYPE); break; case PG_NUMERIC: DecimalType decimalType; if (typeDefine.getPrecision() != null && typeDefine.getPrecision() > 0) { decimalType = new DecimalType( typeDefine.getPrecision().intValue(), typeDefine.getScale()); } else { decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE); } builder.dataType(decimalType); break; case PG_MONEY: // -92233720368547758.08 to +92233720368547758.07, With the sign bit it's 20, we use // 30 precision to save it DecimalType moneyDecimalType; moneyDecimalType = new DecimalType(30, 2); builder.dataType(moneyDecimalType); builder.columnLength(30L); builder.scale(2); break; case PG_CHAR: case PG_CHARACTER: builder.dataType(BasicType.STRING_TYPE); if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.columnLength(TypeDefineUtils.charTo4ByteLength(1L)); builder.sourceType(pgDataType); } else { builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength())); builder.sourceType(String.format("%s(%s)", pgDataType, typeDefine.getLength())); } break; case PG_VARCHAR: case PG_CHARACTER_VARYING: builder.dataType(BasicType.STRING_TYPE); if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.sourceType(pgDataType); } else { builder.sourceType(String.format("%s(%s)", pgDataType, typeDefine.getLength())); builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength())); } break; case PG_TEXT: builder.dataType(BasicType.STRING_TYPE); break; case PG_UUID: builder.dataType(BasicType.STRING_TYPE); builder.sourceType(pgDataType); builder.columnLength(128L); break; case PG_JSON: case PG_JSONB: case PG_XML: case PG_GEOMETRY: case PG_GEOGRAPHY: builder.dataType(BasicType.STRING_TYPE); break; case PG_CHAR_ARRAY: case PG_VARCHAR_ARRAY: case PG_TEXT_ARRAY: builder.dataType(ArrayType.STRING_ARRAY_TYPE); break; case PG_BYTEA: builder.dataType(PrimitiveByteArrayType.INSTANCE); break; case PG_DATE: builder.dataType(LocalTimeType.LOCAL_DATE_TYPE); break; case PG_TIME: case PG_TIME_TZ: builder.dataType(LocalTimeType.LOCAL_TIME_TYPE); if (typeDefine.getScale() != null && typeDefine.getScale() > MAX_TIME_SCALE) { builder.scale(MAX_TIME_SCALE); log.warn( "The scale of time type is larger than {}, it will be truncated to {}", MAX_TIME_SCALE, MAX_TIME_SCALE); } else { builder.scale(typeDefine.getScale()); } break; case PG_TIMESTAMP: case PG_TIMESTAMP_TZ: builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); if (typeDefine.getScale() != null && typeDefine.getScale() > MAX_TIMESTAMP_SCALE) { builder.scale(MAX_TIMESTAMP_SCALE); log.warn( "The scale of timestamp type is larger than {}, it will be truncated to {}", MAX_TIMESTAMP_SCALE, MAX_TIMESTAMP_SCALE); } else { builder.scale(typeDefine.getScale()); } break; default: throw CommonError.convertToSeaTunnelTypeError( identifier(), typeDefine.getDataType(), typeDefine.getName()); } return builder.build(); }
@Test public void testConvertFloat() { BasicTypeDefine<Object> typeDefine = BasicTypeDefine.builder() .name("test") .columnType("float4") .dataType("float4") .build(); Column column = PostgresTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(BasicType.FLOAT_TYPE, column.getDataType()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType().toLowerCase()); }
public static ConfigChangeContentBuilder convertJsonString(String content) { return GSON.fromJson(content, ConfigChangeContentBuilder.class); }
@Test public void testConvertJsonString() { ConfigChangeContentBuilder contentBuilder = ConfigChangeContentBuilder.convertJsonString(configString); assertNotNull(contentBuilder.getCreateItems()); assertNotNull(contentBuilder.getUpdateItems().get(0).oldItem); assertNotNull(contentBuilder.getUpdateItems().get(0).newItem); assertNotNull(contentBuilder.getDeleteItems()); }
protected static Map<String, String> appendParameter(Map<String, String> parameters, String key, String value) { if (parameters == null) { parameters = new HashMap<>(); } parameters.put(key, value); return parameters; }
@Test void appendParameters2() { Map<String, String> source = new HashMap<>(); source.put("default.num", "one1"); source.put("num", "ONE1"); source = AbstractBuilder.appendParameter(source, "default.num", "one"); source = AbstractBuilder.appendParameter(source, "num", "ONE"); Assertions.assertTrue(source.containsKey("default.num")); Assertions.assertEquals("ONE", source.get("num")); }
@Override public String getName() { return TransformFunctionType.COALESCE.getName(); }
@Test public void testCoalesceIntColumnsAndLiterals() { final int intLiteral = 313; TransformFunction coalesceFunc = TransformFunctionFactory.get( RequestContextUtils.getExpression(String.format("COALESCE(%s,%s)", INT_SV_NULL_COLUMN, intLiteral)), _dataSourceMap); Assert.assertEquals(coalesceFunc.getName(), "coalesce"); int[] expectedResults = new int[NUM_ROWS]; for (int i = 0; i < NUM_ROWS; i++) { if (isNullRow(i)) { expectedResults[i] = intLiteral; } else { expectedResults[i] = _intSVValues[i]; } } testTransformFunction(coalesceFunc, expectedResults); }
public static void readBySax(String path, int rid, RowHandler rowHandler) { readBySax(FileUtil.file(path), rid, rowHandler); }
@Test public void doAfterAllAnalysedTest() { final String path = "readBySax.xls"; final AtomicInteger doAfterAllAnalysedTime = new AtomicInteger(0); ExcelUtil.readBySax(path, -1, new RowHandler() { @Override public void handle(final int sheetIndex, final long rowIndex, final List<Object> rowCells) { //Console.log("sheetIndex={};rowIndex={},rowCells={}",sheetIndex,rowIndex,rowCells); } @Override public void doAfterAllAnalysed() { doAfterAllAnalysedTime.addAndGet(1); } }); //总共2个sheet页,读取所有sheet时,一共执行doAfterAllAnalysed2次。 assertEquals(2, doAfterAllAnalysedTime.intValue()); }
@Override public void filter(ContainerRequestContext requestContext) throws IOException { if (!requestContext.getUriInfo().getPath().endsWith(targetPath)) { return; } final List<MediaType> acceptedFormats = requestContext.getAcceptableMediaTypes(); final Map<MediaType, ExportFormat> exportFormatCandidates = supportedFormats.entrySet() .stream() .filter(entry -> acceptedFormats.stream().anyMatch(acceptedFormat -> entry.getKey().isCompatible(acceptedFormat))) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); if (exportFormatCandidates.isEmpty()) { requestContext.abortWith(Response.status(Response.Status.UNSUPPORTED_MEDIA_TYPE).build()); return; } final Map<MediaType, Optional<String>> candidateErrors = exportFormatCandidates.entrySet() .stream() .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().hasError())); if (candidateErrors.values().stream().allMatch(Optional::isPresent)) { final String errorMessage = candidateErrors.values().stream() .map(optionalError -> optionalError.orElse("")) .collect(Collectors.joining("\n")); requestContext.abortWith(Response.status(Response.Status.UNSUPPORTED_MEDIA_TYPE) .entity(errorMessage) .type(MoreMediaTypes.TEXT_PLAIN_TYPE) .build()); return; } final List<String> allowedMediaTypes = candidateErrors.entrySet().stream() .filter(entry -> !entry.getValue().isPresent()) .map(Map.Entry::getKey) .map(MediaType::toString) .collect(Collectors.toList()); requestContext.getHeaders().put(HttpHeaders.ACCEPT, allowedMediaTypes); }
@Test void returns415IfAcceptedFormatIsNotEnabled() throws Exception { final ContainerRequestFilter filter = new MessageExportFormatFilter(Collections.singleton(disabledJsonExportFormat)); final ContainerRequestContext requestContext = mockRequestContext(Collections.emptyList()); filter.filter(requestContext); verifyRequestAborted(requestContext); }
public static KeyStore loadKeyStore(File certificateChainFile, File privateKeyFile, String keyPassword) throws IOException, GeneralSecurityException { PrivateKey key; try { key = createPrivateKey(privateKeyFile, keyPassword); } catch (OperatorCreationException | IOException | GeneralSecurityException | PKCSException e) { throw new GeneralSecurityException("Private Key issues", e); } List<X509Certificate> certificateChain = readCertificateChain(certificateChainFile); if (certificateChain.isEmpty()) { throw new CertificateException("Certificate file does not contain any certificates: " + certificateChainFile); } KeyStore keyStore = KeyStore.getInstance("JKS"); keyStore.load(null, null); keyStore.setKeyEntry("key", key, keyPassword.toCharArray(), certificateChain.stream().toArray(Certificate[]::new)); return keyStore; }
@Test void testParsingPKCS1WithPassword() throws IOException, GeneralSecurityException { KeyStore keystore = PEMImporter.loadKeyStore(pemCert, privkeyWithPasswordPKCS1, "test"); assertEquals(1, keystore.size()); assertTrue(keystore.containsAlias("key")); assertEquals(1, keystore.getCertificateChain("key").length); }
@Override public void configure(final Map<String, ?> config) { configure( config, new Options(), org.rocksdb.LRUCache::new, org.rocksdb.WriteBufferManager::new ); }
@Test public void shouldNotUseCacheForWriteBufferIfNotConfigured() { // When: CONFIG_PROPS.put(KsqlBoundedMemoryRocksDBConfig.ACCOUNT_WRITE_BUFFER_AGAINST_CACHE, false); KsqlBoundedMemoryRocksDBConfigSetter.configure( CONFIG_PROPS, rocksOptions, cacheFactory, bufferManagerFactory ); // Then: verify(bufferManagerFactory).create(anyLong(), same(writeCache)); }
static Schema sortKeySchema(Schema schema, SortOrder sortOrder) { List<SortField> sortFields = sortOrder.fields(); int size = sortFields.size(); List<Types.NestedField> transformedFields = Lists.newArrayListWithCapacity(size); for (int i = 0; i < size; ++i) { int sourceFieldId = sortFields.get(i).sourceId(); Types.NestedField sourceField = schema.findField(sourceFieldId); Preconditions.checkArgument( sourceField != null, "Cannot find source field: %s", sourceFieldId); Type transformedType = sortFields.get(i).transform().getResultType(sourceField.type()); // There could be multiple transformations on the same source column, like in the PartitionKey // case. To resolve the collision, field id is set to transform index and field name is set to // sourceFieldName_transformIndex Types.NestedField transformedField = Types.NestedField.of( i, sourceField.isOptional(), sourceField.name() + '_' + i, transformedType, sourceField.doc()); transformedFields.add(transformedField); } return new Schema(transformedFields); }
@Test public void testResultSchema() { Schema schema = new Schema( Types.NestedField.required(1, "id", Types.StringType.get()), Types.NestedField.required(2, "ratio", Types.DoubleType.get()), Types.NestedField.optional( 3, "user", Types.StructType.of( Types.NestedField.required(11, "name", Types.StringType.get()), Types.NestedField.required(12, "ts", Types.TimestampType.withoutZone()), Types.NestedField.optional(13, "device_id", Types.UUIDType.get()), Types.NestedField.optional( 14, "location", Types.StructType.of( Types.NestedField.required(101, "lat", Types.FloatType.get()), Types.NestedField.required(102, "long", Types.FloatType.get()), Types.NestedField.required(103, "blob", Types.BinaryType.get())))))); SortOrder sortOrder = SortOrder.builderFor(schema) .asc("ratio") .sortBy(Expressions.hour("user.ts"), SortDirection.ASC, NullOrder.NULLS_FIRST) .sortBy( Expressions.bucket("user.device_id", 16), SortDirection.ASC, NullOrder.NULLS_FIRST) .sortBy( Expressions.truncate("user.location.blob", 16), SortDirection.ASC, NullOrder.NULLS_FIRST) .build(); assertThat(SortKeyUtil.sortKeySchema(schema, sortOrder).asStruct()) .isEqualTo( Types.StructType.of( Types.NestedField.required(0, "ratio_0", Types.DoubleType.get()), Types.NestedField.required(1, "ts_1", Types.IntegerType.get()), Types.NestedField.optional(2, "device_id_2", Types.IntegerType.get()), Types.NestedField.required(3, "blob_3", Types.BinaryType.get()))); }
@Override public void register(ProviderConfig config) { throw new UnsupportedOperationException("DomainRegistry not support register providerConfig:" + config.getInterfaceId()); }
@Test(expected = UnsupportedOperationException.class) public void testRegister() { domainRegistry.register(new ProviderConfig<>()); }
@Override public String getName() { return name.toLowerCase(ENGLISH); }
@Test public void testParsePinotSchemaToPinotColumns() { PinotConfig pinotConfig = new PinotConfig(); pinotConfig.setInferDateTypeInSchema(true); pinotConfig.setInferTimestampTypeInSchema(true); Schema testPinotSchema = new Schema.SchemaBuilder() .addSingleValueDimension("singleValueIntDimension", FieldSpec.DataType.INT) .addSingleValueDimension("singleValueLongDimension", FieldSpec.DataType.LONG) .addSingleValueDimension("singleValueFloatDimension", FieldSpec.DataType.FLOAT) .addSingleValueDimension("singleValueDoubleDimension", FieldSpec.DataType.DOUBLE) .addSingleValueDimension("singleValueBytesDimension", FieldSpec.DataType.BYTES) .addSingleValueDimension("singleValueBooleanDimension", FieldSpec.DataType.BOOLEAN) .addSingleValueDimension("singleValueStringDimension", FieldSpec.DataType.STRING) .addMultiValueDimension("multiValueIntDimension", FieldSpec.DataType.INT) .addMultiValueDimension("multiValueLongDimension", FieldSpec.DataType.LONG) .addMultiValueDimension("multiValueFloatDimension", FieldSpec.DataType.FLOAT) .addMultiValueDimension("multiValueDoubleDimension", FieldSpec.DataType.DOUBLE) .addMultiValueDimension("multiValueBytesDimension", FieldSpec.DataType.BYTES) .addMultiValueDimension("multiValueBooleanDimension", FieldSpec.DataType.BOOLEAN) .addMultiValueDimension("multiValueStringDimension", FieldSpec.DataType.STRING) .addMetric("intMetric", FieldSpec.DataType.INT) .addMetric("longMetric", FieldSpec.DataType.LONG) .addMetric("floatMetric", FieldSpec.DataType.FLOAT) .addMetric("doubleMetric", FieldSpec.DataType.DOUBLE) .addMetric("bytesMetric", FieldSpec.DataType.BYTES) .addTime(new TimeGranularitySpec(FieldSpec.DataType.INT, TimeUnit.DAYS, "daysSinceEpoch"), new TimeGranularitySpec(FieldSpec.DataType.INT, TimeUnit.DAYS, "daysSinceEpoch")) .addDateTime("epochDayDateTime", FieldSpec.DataType.INT, "1:DAYS:EPOCH", "1:DAYS") .addDateTime("epochMillisDateTime", FieldSpec.DataType.LONG, "1:MILLISECONDS:EPOCH", "1:SECONDS") .addDateTime("epochTenDayDateTime", FieldSpec.DataType.INT, "10:DAYS:EPOCH", "1:DAYS") .addDateTime("epochSecondsDateTime", FieldSpec.DataType.LONG, "1:SECONDS:EPOCH", "1:SECONDS") .build(); Map<String, Type> expectedTypeMap = new ImmutableMap.Builder<String, Type>() .put("singleValueIntDimension", INTEGER) .put("singleValueLongDimension", BIGINT) .put("singleValueFloatDimension", DOUBLE) .put("singleValueDoubleDimension", DOUBLE) .put("singleValueBytesDimension", VARBINARY) .put("singleValueBooleanDimension", BOOLEAN) .put("singleValueStringDimension", VARCHAR) .put("multiValueIntDimension", new ArrayType(INTEGER)) .put("multiValueLongDimension", new ArrayType(BIGINT)) .put("multiValueFloatDimension", new ArrayType(DOUBLE)) .put("multiValueDoubleDimension", new ArrayType(DOUBLE)) .put("multiValueBytesDimension", new ArrayType(VARBINARY)) .put("multiValueBooleanDimension", new ArrayType(BOOLEAN)) .put("multiValueStringDimension", new ArrayType(VARCHAR)) .put("intMetric", INTEGER) .put("longMetric", BIGINT) .put("floatMetric", DOUBLE) .put("doubleMetric", DOUBLE) .put("bytesMetric", VARBINARY) .put("daysSinceEpoch", DateType.DATE) .put("epochDayDateTime", DateType.DATE) .put("epochMillisDateTime", TimestampType.TIMESTAMP) .put("epochTenDayDateTime", INTEGER) .put("epochSecondsDateTime", BIGINT) .build(); Map<String, String> expectedComment = new ImmutableMap.Builder<String, String>() .put("sd1", FieldSpec.FieldType.DIMENSION.name()) .put("singleValueIntDimension", FieldSpec.FieldType.DIMENSION.name()) .put("singleValueLongDimension", FieldSpec.FieldType.DIMENSION.name()) .put("singleValueFloatDimension", FieldSpec.FieldType.DIMENSION.name()) .put("singleValueDoubleDimension", FieldSpec.FieldType.DIMENSION.name()) .put("singleValueBytesDimension", FieldSpec.FieldType.DIMENSION.name()) .put("singleValueBooleanDimension", FieldSpec.FieldType.DIMENSION.name()) .put("singleValueStringDimension", FieldSpec.FieldType.DIMENSION.name()) .put("multiValueIntDimension", FieldSpec.FieldType.DIMENSION.name()) .put("multiValueLongDimension", FieldSpec.FieldType.DIMENSION.name()) .put("multiValueFloatDimension", FieldSpec.FieldType.DIMENSION.name()) .put("multiValueDoubleDimension", FieldSpec.FieldType.DIMENSION.name()) .put("multiValueBytesDimension", FieldSpec.FieldType.DIMENSION.name()) .put("multiValueBooleanDimension", FieldSpec.FieldType.DIMENSION.name()) .put("multiValueStringDimension", FieldSpec.FieldType.DIMENSION.name()) .put("intMetric", FieldSpec.FieldType.METRIC.name()) .put("longMetric", FieldSpec.FieldType.METRIC.name()) .put("floatMetric", FieldSpec.FieldType.METRIC.name()) .put("doubleMetric", FieldSpec.FieldType.METRIC.name()) .put("bytesMetric", FieldSpec.FieldType.METRIC.name()) .put("daysSinceEpoch", FieldSpec.FieldType.TIME.name()) .put("epochDayDateTime", FieldSpec.FieldType.DATE_TIME.name()) .put("epochMillisDateTime", FieldSpec.FieldType.DATE_TIME.name()) .put("epochTenDayDateTime", FieldSpec.FieldType.DATE_TIME.name()) .put("epochSecondsDateTime", FieldSpec.FieldType.DATE_TIME.name()) .build(); List<PinotColumn> pinotColumns = PinotColumnUtils.getPinotColumnsForPinotSchema(testPinotSchema, pinotConfig.isInferDateTypeInSchema(), pinotConfig.isInferTimestampTypeInSchema()); for (PinotColumn column : pinotColumns) { assertEquals(column.getType(), expectedTypeMap.get(column.getName()), "Failed to compare column type for field - " + column.getName()); assertEquals(column.getComment(), expectedComment.get(column.getName()), "Failed to compare column comment for field - " + column.getName()); assertEquals(column.isNullable(), false); } }
@VisibleForTesting public Account updateLastSeen(Account account, Device device) { // compute a non-negative integer between 0 and 86400. long n = Util.ensureNonNegativeLong(account.getUuid().getLeastSignificantBits()); final long lastSeenOffsetSeconds = n % ChronoUnit.DAYS.getDuration().toSeconds(); // produce a truncated timestamp which is either today at UTC midnight // or yesterday at UTC midnight, based on per-user randomized offset used. final long todayInMillisWithOffset = Util.todayInMillisGivenOffsetFromNow(clock, Duration.ofSeconds(lastSeenOffsetSeconds).negated()); // only update the device's last seen time when it falls behind the truncated timestamp. // this ensures a few things: // (1) each account will only update last-seen at most once per day // (2) these updates will occur throughout the day rather than all occurring at UTC midnight. if (device.getLastSeen() < todayInMillisWithOffset) { Metrics.summary(DAYS_SINCE_LAST_SEEN_DISTRIBUTION_NAME, IS_PRIMARY_DEVICE_TAG, String.valueOf(device.isPrimary())) .record(Duration.ofMillis(todayInMillisWithOffset - device.getLastSeen()).toDays()); return accountsManager.updateDeviceLastSeen(account, device, Util.todayInMillis(clock)); } return account; }
@Test void testUpdateLastSeenEndOfDay() { clock.pin(Instant.ofEpochMilli(today + 86_400_000L - 1)); final Device device1 = acct1.getDevices().stream().findFirst().get(); final Device device2 = acct2.getDevices().stream().findFirst().get(); final Account updatedAcct1 = accountAuthenticator.updateLastSeen(acct1, device1); final Account updatedAcct2 = accountAuthenticator.updateLastSeen(acct2, device2); verify(accountsManager).updateDeviceLastSeen(eq(acct1), eq(device1), anyLong()); verify(accountsManager).updateDeviceLastSeen(eq(acct2), eq(device2), anyLong()); assertThat(device1.getLastSeen()).isEqualTo(today); assertThat(device2.getLastSeen()).isEqualTo(today); assertThat(updatedAcct1).isNotSameAs(acct1); assertThat(updatedAcct2).isNotSameAs(acct2); }
protected boolean isClusterVersionUnknownOrGreaterOrEqual(Version version) { Version clusterVersion = getNodeEngine().getClusterService().getClusterVersion(); return clusterVersion.isUnknownOrGreaterOrEqual(version); }
@Test public void testClusterVersion_isUnknownGreaterOrEqual_currentVersion() { assertTrue(object.isClusterVersionUnknownOrGreaterOrEqual(CURRENT_CLUSTER_VERSION)); }
public Map<String, String> build() { Map<String, String> builder = new HashMap<>(); configureFileSystem(builder); configureNetwork(builder); configureCluster(builder); configureSecurity(builder); configureOthers(builder); LOGGER.info("Elasticsearch listening on [HTTP: {}:{}, TCP: {}:{}]", builder.get(ES_HTTP_HOST_KEY), builder.get(ES_HTTP_PORT_KEY), builder.get(ES_TRANSPORT_HOST_KEY), builder.get(ES_TRANSPORT_PORT_KEY)); return builder; }
@Test public void configureSecurity_givenClusterSearchPasswordNotProvided_dontAddXpackParameters() throws Exception { Props props = minProps(true); EsSettings settings = new EsSettings(props, new EsInstallation(props), system); Map<String, String> outputParams = settings.build(); assertThat(outputParams.get("xpack.security.transport.ssl.enabled")).isNull(); }
@GET @Path("/health") @Operation(summary = "Health check endpoint to verify worker readiness and liveness") public Response healthCheck() throws Throwable { WorkerStatus workerStatus; int statusCode; try { FutureCallback<Void> cb = new FutureCallback<>(); herder.healthCheck(cb); long timeoutNs = TimeUnit.MILLISECONDS.toNanos(requestTimeout.healthCheckTimeoutMs()); long deadlineNs = timeoutNs + time.nanoseconds(); time.waitForFuture(cb, deadlineNs); statusCode = Response.Status.OK.getStatusCode(); workerStatus = WorkerStatus.healthy(); } catch (TimeoutException e) { String statusDetails = e instanceof StagedTimeoutException ? ((StagedTimeoutException) e).stage().summarize() : null; if (!herder.isReady()) { statusCode = Response.Status.SERVICE_UNAVAILABLE.getStatusCode(); workerStatus = WorkerStatus.starting(statusDetails); } else { statusCode = Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(); workerStatus = WorkerStatus.unhealthy(statusDetails); } } catch (ExecutionException e) { throw e.getCause(); } return Response.status(statusCode).entity(workerStatus).build(); }
@Test public void testHealthCheckStarting() throws Throwable { expectHealthCheck(new TimeoutException()); when(herder.isReady()).thenReturn(false); Response response = rootResource.healthCheck(); assertEquals(Response.Status.SERVICE_UNAVAILABLE.getStatusCode(), response.getStatus()); WorkerStatus expectedStatus = WorkerStatus.starting(null); WorkerStatus actualStatus = workerStatus(response); assertEquals(expectedStatus, actualStatus); }
@DELETE @Path("/{connector}/offsets") @Operation(summary = "Reset the offsets for the specified connector") public Response resetConnectorOffsets(final @Parameter(hidden = true) @QueryParam("forward") Boolean forward, final @Context HttpHeaders headers, final @PathParam("connector") String connector) throws Throwable { FutureCallback<Message> cb = new FutureCallback<>(); herder.resetConnectorOffsets(connector, cb); Message msg = requestHandler.completeOrForwardRequest(cb, "/connectors/" + connector + "/offsets", "DELETE", headers, null, new TypeReference<Message>() { }, new IdentityTranslator<>(), forward); return Response.ok().entity(msg).build(); }
@Test public void testResetOffsetsNotLeader() throws Throwable { final ArgumentCaptor<Callback<Message>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackNotLeaderException(cb).when(herder).resetConnectorOffsets(eq(CONNECTOR_NAME), cb.capture()); when(restClient.httpRequest(eq(LEADER_URL + "connectors/" + CONNECTOR_NAME + "/offsets?forward=true"), eq("DELETE"), isNull(), isNull(), any())) .thenReturn(new RestClient.HttpResponse<>(200, new HashMap<>(), new Message(""))); connectorsResource.resetConnectorOffsets(null, NULL_HEADERS, CONNECTOR_NAME); }
@Override public FlowEntry decode(ObjectNode json, CodecContext context) { checkNotNull(json, "JSON object cannot be null"); // decode FlowRule-specific fields using the FlowRule codec FlowRule flowRule = context.codec(FlowRule.class).decode(json, context); JsonNode stateNode = json.get(STATE); FlowEntry.FlowEntryState state = (null == stateNode) ? FlowEntry.FlowEntryState.ADDED : FlowEntry.FlowEntryState.valueOf(stateNode.asText()); JsonNode lifeNode = json.get(LIFE); long life = (null == lifeNode) ? 0 : lifeNode.asLong(); JsonNode liveTypeNode = json.get(LIVE_TYPE); FlowEntry.FlowLiveType liveType = (null == liveTypeNode) ? FlowEntry.FlowLiveType.UNKNOWN : FlowEntry.FlowLiveType.valueOf(liveTypeNode.asText()); JsonNode packetsNode = json.get(PACKETS); long packets = (null == packetsNode) ? 0 : packetsNode.asLong(); JsonNode bytesNode = json.get(BYTES); long bytes = (null == bytesNode) ? 0 : bytesNode.asLong(); return new DefaultFlowEntry(flowRule, state, life, liveType, packets, bytes); }
@Test public void testDecode() throws IOException { InputStream jsonStream = FlowEntryCodec.class.getResourceAsStream(JSON_FILE); JsonNode jsonString = context.mapper().readTree(jsonStream); FlowEntry expected = flowEntryCodec.decode((ObjectNode) jsonString, context); assertEquals(expected, FLOW_ENTRY); }
@PutMapping("/config") public Result<Boolean> updateConfig(@RequestBody ConfigInfo configInfo) { if (StringUtils.isEmpty(configInfo.getGroup()) || StringUtils.isEmpty(configInfo.getKey()) || StringUtils.isEmpty(configInfo.getContent())) { return new Result<>(ResultCodeType.MISS_PARAM.getCode(), ResultCodeType.MISS_PARAM.getMessage()); } Result<List<ConfigInfo>> result = configService.getConfigList(configInfo, PluginType.OTHER, true); if (result.isSuccess() && CollectionUtils.isEmpty(result.getData())) { return new Result<>(ResultCodeType.NOT_EXISTS.getCode(), ResultCodeType.NOT_EXISTS.getMessage()); } return configService.publishConfig(configInfo); }
@Test public void updateConfig() { Result<Boolean> result = configController.updateConfig(configInfo); Assert.assertTrue(result.isSuccess()); }
public PipelineColumnMetaData getColumnMetaData(final int columnIndex) { return getColumnMetaData(columnNames.get(columnIndex - 1)); }
@Test void assertIndexOutOfBoundsException() { assertThrows(IndexOutOfBoundsException.class, () -> pipelineTableMetaData.getColumnMetaData(2)); }
@Produces @DefaultBean @Singleton BackgroundJobServerConfiguration backgroundJobServerConfiguration(BackgroundJobServerWorkerPolicy backgroundJobServerWorkerPolicy) { if (jobRunrBuildTimeConfiguration.backgroundJobServer().enabled()) { final BackgroundJobServerConfiguration backgroundJobServerConfiguration = usingStandardBackgroundJobServerConfiguration(); backgroundJobServerConfiguration.andBackgroundJobServerWorkerPolicy(backgroundJobServerWorkerPolicy); jobRunrRuntimeConfiguration.backgroundJobServer().name().ifPresent(backgroundJobServerConfiguration::andName); jobRunrRuntimeConfiguration.backgroundJobServer().pollIntervalInSeconds().ifPresent(backgroundJobServerConfiguration::andPollIntervalInSeconds); jobRunrRuntimeConfiguration.backgroundJobServer().serverTimeoutPollIntervalMultiplicand().ifPresent(backgroundJobServerConfiguration::andServerTimeoutPollIntervalMultiplicand); jobRunrRuntimeConfiguration.backgroundJobServer().deleteSucceededJobsAfter().ifPresent(backgroundJobServerConfiguration::andDeleteSucceededJobsAfter); jobRunrRuntimeConfiguration.backgroundJobServer().permanentlyDeleteDeletedJobsAfter().ifPresent(backgroundJobServerConfiguration::andPermanentlyDeleteDeletedJobsAfter); jobRunrRuntimeConfiguration.backgroundJobServer().scheduledJobsRequestSize().ifPresent(backgroundJobServerConfiguration::andScheduledJobsRequestSize); jobRunrRuntimeConfiguration.backgroundJobServer().orphanedJobsRequestSize().ifPresent(backgroundJobServerConfiguration::andOrphanedJobsRequestSize); jobRunrRuntimeConfiguration.backgroundJobServer().succeededJobRequestSize().ifPresent(backgroundJobServerConfiguration::andSucceededJobsRequestSize); jobRunrRuntimeConfiguration.backgroundJobServer().interruptJobsAwaitDurationOnStop().ifPresent(backgroundJobServerConfiguration::andInterruptJobsAwaitDurationOnStopBackgroundJobServer); return backgroundJobServerConfiguration; } return null; }
@Test void backgroundJobServerConfigurationIsNotSetupWhenNotConfigured() { when(backgroundJobServerBuildTimeConfiguration.enabled()).thenReturn(false); Assertions.assertThat(jobRunrProducer.backgroundJobServerConfiguration(mock(BackgroundJobServerWorkerPolicy.class))).isNull(); }
public Object normalizeValue( ValueMetaInterface valueMeta, Object value ) throws KettleValueException { if ( valueMeta.isDate() ) { // Pass date field converted to UTC, see PDI-10836 Calendar cal = Calendar.getInstance( valueMeta.getDateFormatTimeZone() ); cal.setTime( valueMeta.getDate( value ) ); Calendar utc = Calendar.getInstance( TimeZone.getTimeZone( "UTC" ) ); // Reset time-related fields utc.clear(); utc.set( cal.get( Calendar.YEAR ), cal.get( Calendar.MONTH ), cal.get( Calendar.DATE ), cal.get( Calendar.HOUR_OF_DAY ), cal.get( Calendar.MINUTE ), cal.get( Calendar.SECOND ) ); value = utc; } else if ( valueMeta.isStorageBinaryString() ) { value = valueMeta.convertToNormalStorageType( value ); } if ( ValueMetaInterface.TYPE_INTEGER == valueMeta.getType() ) { // Salesforce integer values can be only http://www.w3.org/2001/XMLSchema:int // see org.pentaho.di.ui.trans.steps.salesforceinput.SalesforceInputDialog#addFieldToTable // So we need convert Hitachi Vantara integer (real java Long value) to real int. // It will be sent correct as http://www.w3.org/2001/XMLSchema:int // use checked cast for prevent losing data value = Ints.checkedCast( (Long) value ); } return value; }
@Test public void createIntObjectTest() throws KettleValueException { SalesforceStep step = spy( new MockSalesforceStep( smh.stepMeta, smh.stepDataInterface, 0, smh.transMeta, smh.trans ) ); ValueMetaInterface valueMeta = Mockito.mock( ValueMetaInterface.class ); Mockito.when( valueMeta.getType() ).thenReturn( ValueMetaInterface.TYPE_INTEGER ); Object value = step.normalizeValue( valueMeta, 100L ); Assert.assertTrue( value instanceof Integer ); }
static void replaceHeader(Headers headers, String key, String value) { try { headers.remove(key); headers.add(key, value.getBytes(UTF_8)); } catch (IllegalStateException e) { log(e, "error setting header {0} in headers {1}", key, headers); } }
@Test void replaceHeader_replace() { record.headers().add("b3", new byte[0]); KafkaHeaders.replaceHeader(record.headers(), "b3", "1"); assertThat(record.headers().lastHeader("b3").value()) .containsExactly('1'); }
public static List<AclEntry> replaceAclEntries(List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException { ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec); ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); // Replacement is done separately for each scope: access and default. EnumMap<AclEntryScope, AclEntry> providedMask = Maps.newEnumMap(AclEntryScope.class); EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class); EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class); for (AclEntry aclSpecEntry: aclSpec) { scopeDirty.add(aclSpecEntry.getScope()); if (aclSpecEntry.getType() == MASK) { providedMask.put(aclSpecEntry.getScope(), aclSpecEntry); maskDirty.add(aclSpecEntry.getScope()); } else { aclBuilder.add(aclSpecEntry); } } // Copy existing entries if the scope was not replaced. for (AclEntry existingEntry: existingAcl) { if (!scopeDirty.contains(existingEntry.getScope())) { if (existingEntry.getType() == MASK) { providedMask.put(existingEntry.getScope(), existingEntry); } else { aclBuilder.add(existingEntry); } } } copyDefaultsIfNeeded(aclBuilder); calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); return buildAndValidateAcl(aclBuilder); }
@Test public void testReplaceAclEntriesUnchanged() throws AclException { List<AclEntry> existing = new ImmutableList.Builder<AclEntry>() .add(aclEntry(ACCESS, USER, ALL)) .add(aclEntry(ACCESS, USER, "bruce", ALL)) .add(aclEntry(ACCESS, GROUP, READ_EXECUTE)) .add(aclEntry(ACCESS, GROUP, "sales", ALL)) .add(aclEntry(ACCESS, MASK, ALL)) .add(aclEntry(ACCESS, OTHER, NONE)) .add(aclEntry(DEFAULT, USER, ALL)) .add(aclEntry(DEFAULT, USER, "bruce", ALL)) .add(aclEntry(DEFAULT, GROUP, READ_EXECUTE)) .add(aclEntry(DEFAULT, GROUP, "sales", ALL)) .add(aclEntry(DEFAULT, MASK, ALL)) .add(aclEntry(DEFAULT, OTHER, NONE)) .build(); List<AclEntry> aclSpec = Lists.newArrayList( aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "bruce", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "sales", ALL), aclEntry(ACCESS, MASK, ALL), aclEntry(ACCESS, OTHER, NONE), aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "bruce", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, GROUP, "sales", ALL), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE)); assertEquals(existing, replaceAclEntries(existing, aclSpec)); }
public QueryMetadata parse(Query query) { checkArgument(query.query() instanceof ElasticsearchQueryString); final String mainQueryString = query.query().queryString(); final java.util.stream.Stream<String> queryStringStreams = java.util.stream.Stream.concat( java.util.stream.Stream.of(mainQueryString), query.searchTypes().stream().flatMap(this::queryStringsFromSearchType) ); return queryStringStreams .map(queryStringParser::parse) .reduce(QueryMetadata.builder().build(), (meta1, meta2) -> QueryMetadata.builder().usedParameters( Sets.union(meta1.usedParameters(), meta2.usedParameters()) ).build()); }
@Test public void parse() { final QueryMetadata queryMetadata = queryParser.parse(Query.builder() .id("abc123") .query(ElasticsearchQueryString.of("user_name:$username$ http_method:$foo$")) .timerange(RelativeRange.create(600)) .build()); assertThat(queryMetadata.usedParameterNames()) .containsExactlyInAnyOrder("username", "foo"); }
@Override public List<ServiceDTO> getServiceInstances(String serviceId) { try { List<Instance> instances = namingService.selectInstances(serviceId,true); List<ServiceDTO> serviceDTOList = Lists.newLinkedList(); instances.forEach(instance -> { ServiceDTO serviceDTO = this.toServiceDTO(instance, serviceId); serviceDTOList.add(serviceDTO); }); return serviceDTOList; } catch (NacosException ex) { logger.error(ex.getMessage(),ex); } return Collections.emptyList(); }
@Test public void testGetServiceInstancesWithInvalidServiceId() { assertTrue(nacosDiscoveryService.getServiceInstances(someServiceId).isEmpty()); }