id
stringlengths
23
26
content
stringlengths
182
2.49k
codereview_java_data_5407
return convertDMNOutput(decisionModel.evaluateAll(dmnContext), predictInput); } - DecisionModel getDecisionModel(DecisionModels decisionModels, ModelIdentifier modelIdentifier) { String[] namespaceAndName = extractNamespaceAndName(modelIdentifier.getResourceId()); return decisionModels.getDecisionModel(namespaceAndName[0], namespaceAndName[1]); } Why not public? Default visibility is rarely useful, I would suggest `protected` at least return convertDMNOutput(decisionModel.evaluateAll(dmnContext), predictInput); } + protected DecisionModel getDecisionModel(DecisionModels decisionModels, ModelIdentifier modelIdentifier) { String[] namespaceAndName = extractNamespaceAndName(modelIdentifier.getResourceId()); return decisionModels.getDecisionModel(namespaceAndName[0], namespaceAndName[1]); }
codereview_java_data_5412
this.defaultMessageStore.getRunningFlags().makeLogicsQueueError(); } - private void multiDispatchQueue(DispatchRequest request, int maxRetries) { Map<String, String> prop = request.getPropertiesMap(); String multiDispatchQueue = prop.get(MessageConst.PROPERTY_INNER_MULTI_DISPATCH); String multiQueueOffset = prop.get(MessageConst.PROPERTY_INNER_MULTI_QUEUE_OFFSET); Would you like to reduce the nesting level? this.defaultMessageStore.getRunningFlags().makeLogicsQueueError(); } + private void multiDispatchLmqQueue(DispatchRequest request, int maxRetries) { Map<String, String> prop = request.getPropertiesMap(); String multiDispatchQueue = prop.get(MessageConst.PROPERTY_INNER_MULTI_DISPATCH); String multiQueueOffset = prop.get(MessageConst.PROPERTY_INNER_MULTI_QUEUE_OFFSET);
codereview_java_data_5419
import org.apache.thrift.TException; import org.junit.Assert; import org.junit.Test; -import static org.mockito.Matchers.anyLong; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.spy; nit: this itself gives the TableMetadataV2, no? import org.apache.thrift.TException; import org.junit.Assert; import org.junit.Test; +import org.mockito.ArgumentCaptor; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.spy;
codereview_java_data_5423
throws AccumuloException, AccumuloSecurityException; /** - * List all compactions running in Accumulo * * @return the list of active compactions * @since 2.1.0 Should mention this will return internal and external compactions. throws AccumuloException, AccumuloSecurityException; /** + * List all internal and external compactions running in Accumulo. * * @return the list of active compactions * @since 2.1.0
codereview_java_data_5431
AlertDialog alert = alertDialogBuilder.create(); alert.show(); } } show a toast in the `else` block that location service already enabled. AlertDialog alert = alertDialogBuilder.create(); alert.show(); + }else{ + Toast.makeText(CropImage.this,"GPS is enabled",Toast.LENGTH_SHORT).show(); } }
codereview_java_data_5436
} private void testOldClientNewServer() throws Exception { - // TODO fails in pagestore mode - if (!config.mvStore) { - return; - } Server server = org.h2.tools.Server.createTcpServer(); server.start(); int port = server.getPort(); The whole test is PageStore-only. It should not be disabled, especially in such way. I sent a possible fix in #1123. } private void testOldClientNewServer() throws Exception { Server server = org.h2.tools.Server.createTcpServer(); server.start(); int port = server.getPort();
codereview_java_data_5437
super.onCreate(savedInstanceState); wasPreviouslyDarkTheme = systemThemeUtils.isDeviceInNightMode(); setTheme(wasPreviouslyDarkTheme ? R.style.DarkAppTheme : R.style.LightAppTheme); - float fontScale = android.provider.Settings.System.getFloat(getBaseContext().getContentResolver(),android.provider.Settings.System.FONT_SCALE, 1f); adjustFontScale(getResources().getConfiguration(),fontScale); } There should be a new line after line 31 super.onCreate(savedInstanceState); wasPreviouslyDarkTheme = systemThemeUtils.isDeviceInNightMode(); setTheme(wasPreviouslyDarkTheme ? R.style.DarkAppTheme : R.style.LightAppTheme); + float fontScale = android.provider.Settings.System.getFloat(getBaseContext().getContentResolver() + , android.provider.Settings.System.FONT_SCALE, 1f); adjustFontScale(getResources().getConfiguration(),fontScale); }
codereview_java_data_5439
+ "Examples of invalid host lists are '', ':1000', and 'localhost:80000'"), @SuppressWarnings("unchecked") - PORT("port", Predicates.or(new Bounds(1024, 65535), in(true, "0"), new Matches("\\d{1,5}-\\d{1,5}")), "An positive integer in the range 1024-65535 (not already in use or specified elsewhere in the configuration),\n" + "zero to indicate any open ephemeral port, or a range of positive integers specified as M-N"), The regex could be a bit more restrictive, like `\\d{4,5}-\\d{4,5}` Would there be a benefit for creating a Predicate to verify the port range, like maybe fail sooner (when setting a prop in shell). Instead of validating in getPorts(). + "Examples of invalid host lists are '', ':1000', and 'localhost:80000'"), @SuppressWarnings("unchecked") + PORT("port", Predicates.or(new Bounds(1024, 65535), in(true, "0"), new PortRange("\\d{4,5}-\\d{4,5}")), "An positive integer in the range 1024-65535 (not already in use or specified elsewhere in the configuration),\n" + "zero to indicate any open ephemeral port, or a range of positive integers specified as M-N"),
codereview_java_data_5442
public void pendingTransactionRetentionPeriod() { final int pendingTxRetentionHours = 999; parseCommand("--tx-pool-retention-hours", String.valueOf(pendingTxRetentionHours)); - verify(mockTransactionPoolConfigurationBuilder) - .pendingTxRetentionPeriod(pendingTxRetentionHours); assertThat(commandOutput.toString()).isEmpty(); assertThat(commandErrorOutput.toString()).isEmpty(); } You're not actually checking that the value 999 is set. public void pendingTransactionRetentionPeriod() { final int pendingTxRetentionHours = 999; parseCommand("--tx-pool-retention-hours", String.valueOf(pendingTxRetentionHours)); + verify(mockControllerBuilder) + .transactionPoolConfiguration(transactionPoolConfigurationArgumentCaptor.capture()); + assertThat(transactionPoolConfigurationArgumentCaptor.getValue().getPendingTxRetentionPeriod()) + .isEqualTo(pendingTxRetentionHours); assertThat(commandOutput.toString()).isEmpty(); assertThat(commandErrorOutput.toString()).isEmpty(); }
codereview_java_data_5447
*/ JAR, /** - * Represents a ZIP file contains JAR files. */ JARS_IN_ZIP } "contains" -> "containing" or "that contains" */ JAR, /** + * Represents a ZIP file that contains JAR files. */ JARS_IN_ZIP }
codereview_java_data_5474
boolean pass = l % 2 == 0; if (!pass) { - Metrics.metric("dropped").inc(); } - Metrics.metric("total").inc(); return pass; }) We should use the thread-safe version here. We could actually stress-test it a bit by having lot more items and check that the total matches. boolean pass = l % 2 == 0; if (!pass) { + Metrics.metric("dropped").increment(); } + Metrics.metric("total").increment(); return pass; })
codereview_java_data_5480
String serviceName = WebUtils.required(request, CommonParams.SERVICE_NAME); String namespaceId = WebUtils.optional(request, CommonParams.NAMESPACE_ID, Constants.DEFAULT_NAMESPACE_ID); - String agent = request.getHeader(HttpHeaderConsts.USER_AGENT_HEADER); ClientInfo clientInfo = new ClientInfo(agent); Ditto below Suggestions, here can be changed to a similar Map. getDefault (request, key, defaultKey) strategy String serviceName = WebUtils.required(request, CommonParams.SERVICE_NAME); String namespaceId = WebUtils.optional(request, CommonParams.NAMESPACE_ID, Constants.DEFAULT_NAMESPACE_ID); + String agent = WebUtils.getUserAgent(request); ClientInfo clientInfo = new ClientInfo(agent);
codereview_java_data_5486
return defaultMQAdminExtImpl.queryMessage(topic, key, maxNum, begin, end); } - public QueryResult queryMessageByUniqueKey(String topic, String key, int maxNum, long begin, long end) throws MQClientException, InterruptedException { return defaultMQAdminExtImpl.queryMessageByUniqKey(topic, key, maxNum, begin, end); The function name, queryMessageByUniqueKey is inconsistent with queryMessageByUniqKey, I suggest keep the same return defaultMQAdminExtImpl.queryMessage(topic, key, maxNum, begin, end); } + public QueryResult queryMessageByUniqKey(String topic, String key, int maxNum, long begin, long end) throws MQClientException, InterruptedException { return defaultMQAdminExtImpl.queryMessageByUniqKey(topic, key, maxNum, begin, end);
codereview_java_data_5492
return null; } - public Object createNonNullArray() { return new Object @NonNull[0]; } `public Object[] createNonNullArray() {` maybe? return null; } + public Object[] createNonNullArray() { return new Object @NonNull[0]; }
codereview_java_data_5493
// PMD 6.0.0 addFilterRuleMoved("java", "controversial", "unnecessary", "UnnecessaryParentheses"); - addFilterRuleMoved("java", "unnecessary", "UnnecessaryParentheses", "UselessParentheses"); } this one should be `addFilterRuleRenamed`, isn't it? // PMD 6.0.0 addFilterRuleMoved("java", "controversial", "unnecessary", "UnnecessaryParentheses"); + addFilterRuleRenamed("java", "unnecessary", "UnnecessaryParentheses", "UselessParentheses"); }
codereview_java_data_5496
"}} ~~~~"; String creator = media.getCreator(); - if (creator == null || creator.isEmpty()) throw new RuntimeException("Failed to nominate for deletion"); String creatorName = creator.replace(" (page does not exist)", ""); return pageEditClient.prependEdit(media.filename, fileDeleteString + "\n", summary) We use braces even if it is single line statement:) "}} ~~~~"; String creator = media.getCreator(); + if (creator == null || creator.isEmpty()) { throw new RuntimeException("Failed to nominate for deletion"); + } String creatorName = creator.replace(" (page does not exist)", ""); return pageEditClient.prependEdit(media.filename, fileDeleteString + "\n", summary)
codereview_java_data_5500
@Override public PriorityQueue read(ObjectDataInput in) throws IOException { int size = in.readInt(); - Comparator comparator = in.readObject(); - PriorityQueue res = size < 1 ? new PriorityQueue(comparator) : new PriorityQueue(size, comparator); for (int i = 0; i < size; i++) { res.add(in.readObject()); } It's ok, but shorter would be: ``` PriorityQueue res = new PriorityQueue(Math.min(1, size), in.readObject()); ``` @Override public PriorityQueue read(ObjectDataInput in) throws IOException { int size = in.readInt(); + PriorityQueue res = new PriorityQueue(Math.min(1, size), in.readObject()); for (int i = 0; i < size; i++) { res.add(in.readObject()); }
codereview_java_data_5501
private final Optional<Long> healthcheckMaxTotalTimeoutSeconds; private final Optional<HealthcheckOptions> healthcheck; - private final Optional<String> healthCheckResultFilePath; private final Optional<Boolean> skipHealthchecksOnDeploy; private final Optional<Long> deployHealthTimeoutSeconds; private final Optional<Long> considerHealthyAfterRunningForSeconds; I think we can make this part of the `HealthcheckOptions` object. There are healthcheck-related fields outside that object, but in general they are deprecated, in favor of the ones consolidated in `HealthcheckOptions` private final Optional<Long> healthcheckMaxTotalTimeoutSeconds; private final Optional<HealthcheckOptions> healthcheck; private final Optional<Boolean> skipHealthchecksOnDeploy; private final Optional<Long> deployHealthTimeoutSeconds; private final Optional<Long> considerHealthyAfterRunningForSeconds;
codereview_java_data_5510
} public void handleGoAway(Channel parentChannel, int lastStreamId, GoAwayException exception) { - log.warn(() -> "Received GOAWAY on " + parentChannel + " with lastStreamId of " + lastStreamId); try { MultiplexedChannelRecord multiplexedChannel = parentChannel.attr(MULTIPLEXED_CHANNEL).get(); Still not sure if we should set it to `warn` as `GOAWAY` is not uncommon and retry normally should work. Using `warn` might raise unnecessary concerns. Can we add the message in the exception instead? } public void handleGoAway(Channel parentChannel, int lastStreamId, GoAwayException exception) { + log.debug(() -> "Received GOAWAY on " + parentChannel + " with lastStreamId of " + lastStreamId); try { MultiplexedChannelRecord multiplexedChannel = parentChannel.attr(MULTIPLEXED_CHANNEL).get();
codereview_java_data_5513
Map<String, Object> aclAccessConfigMap = AclUtils.getYamlDataObject(fileHome + File.separator + fileName, Map.class); if (aclAccessConfigMap == null || aclAccessConfigMap.isEmpty()) { - throw new AclException(String.format("%s file is not data", fileHome + File.separator + fileName)); } List<Map<String, Object>> accounts = (List<Map<String, Object>>) aclAccessConfigMap.get(AclConstants.CONFIG_ACCOUNTS); Map<String, Object> updateAccountMap = null; what is file is not data, is it right sematic? Map<String, Object> aclAccessConfigMap = AclUtils.getYamlDataObject(fileHome + File.separator + fileName, Map.class); if (aclAccessConfigMap == null || aclAccessConfigMap.isEmpty()) { + throw new AclException(String.format("%s file not found or isEmpty", fileHome + File.separator + fileName)); } List<Map<String, Object>> accounts = (List<Map<String, Object>>) aclAccessConfigMap.get(AclConstants.CONFIG_ACCOUNTS); Map<String, Object> updateAccountMap = null;
codereview_java_data_5514
+ "[^,]*hive-metastore[^,]*?\\.jar" + "|" + "[^,]*hive-hcatalog-core[^,]*?\\.jar"); } - public boolean isNotifyEnabled() { return Boolean.parseBoolean(getOptional("kylin.job.notification-enabled", FALSE)); } IMO, `isNotificationEnabled` looks better? + "[^,]*hive-metastore[^,]*?\\.jar" + "|" + "[^,]*hive-hcatalog-core[^,]*?\\.jar"); } + public boolean isNotificationEnabled() { return Boolean.parseBoolean(getOptional("kylin.job.notification-enabled", FALSE)); }
codereview_java_data_5521
* username: username * password: password * credentials-file: credentialsFile - * credentials-refresh-interval: 5000 * http-logging: HEADERS * ssl: * key-store: keystore.p12 ```suggestion * credentials-refresh-interval: 5 ``` * username: username * password: password * credentials-file: credentialsFile + * credentials-refresh-interval: 5 * http-logging: HEADERS * ssl: * key-store: keystore.p12
codereview_java_data_5531
this.brokerController.getTopicConfigManager().updateTopicConfig(topicConfig); - if (brokerController.getBrokerConfig().getRegisterNameServerPeriod() == 0) { - this.brokerController.registerBrokerAll(false, true, true); - } return null; } - private RemotingCommand deleteTopic(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { final RemotingCommand response = RemotingCommand.createResponseCommand(null); DeleteTopicRequestHeader requestHeader = If could not get the topic route info immediately after the topic is created, this may fail some unit tests and confuse some users. It may be better to detect if the topic route info is changed, and then decide whether it is necessary to register or not. this.brokerController.getTopicConfigManager().updateTopicConfig(topicConfig); + this.brokerController.registerIncrementBrokerData(topicConfig,this.brokerController.getTopicConfigManager().getDataVersion()); return null; } + private synchronized RemotingCommand deleteTopic(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { final RemotingCommand response = RemotingCommand.createResponseCommand(null); DeleteTopicRequestHeader requestHeader =
codereview_java_data_5537
SingleMemberAnnotationExpr retrieved = retrievedOpt.get(); assertEquals("Path", retrieved.getName().asString()); pmmlRestResourceGenerator.setPathValue(TEMPLATE); - try { - String classPrefix = getSanitizedClassName(KIE_PMML_MODEL.getName()); - String expected = URLEncoder.encode(classPrefix).replaceAll("\\+", " "); - assertEquals(expected, retrieved.getMemberValue().asStringLiteralExpr().asString()); - } catch (Exception e) { - fail(e); - } } @Test I would prefer to add throw to the method signature instead of catching the exception and calling fail. SingleMemberAnnotationExpr retrieved = retrievedOpt.get(); assertEquals("Path", retrieved.getName().asString()); pmmlRestResourceGenerator.setPathValue(TEMPLATE); + String classPrefix = getSanitizedClassName(KIE_PMML_MODEL.getName()); + String expected = URLEncoder.encode(classPrefix).replaceAll("\\+", " "); + assertEquals(expected, retrieved.getMemberValue().asStringLiteralExpr().asString()); } @Test
codereview_java_data_5539
places = getFromWikidataQuery(curLatLng, lang, radius); } catch (Exception e) { Timber.d("exception in fetching nearby places", e.getLocalizedMessage()); - return null; } Timber.d("%d results at radius: %f", places.size(), radius); if (places.size() >= MIN_RESULTS) { return an empty list instead. places = getFromWikidataQuery(curLatLng, lang, radius); } catch (Exception e) { Timber.d("exception in fetching nearby places", e.getLocalizedMessage()); + return places; } Timber.d("%d results at radius: %f", places.size(), radius); if (places.size() >= MIN_RESULTS) {
codereview_java_data_5540
} var encrypter = newCryptoService.getEncrypter(); encrypter.init(initParams); return encrypter; } Should this also load the TABLE_CRYPTO_ENCRYPT_SERVICE decrypter under the expectation that the service currently configured to encrypt will also be needed to decrypt? If not, should we add a note to the Property.java making it clear that the service much be set in both locations? } var encrypter = newCryptoService.getEncrypter(); + log.debug("New {} CryptoService({}) created. Calling init()", scope, + encrypter.getClass().getName()); encrypter.init(initParams); return encrypter; }
codereview_java_data_5549
String stringPref = prefs.getString(key, defaultEnum.name()); try { return Enum.valueOf(defaultEnum.getDeclaringClass(), stringPref); - } catch (Exception ex) { Log.w(K9.LOG_TAG, "Unable to convert preference key [" + key + "] value [" + stringPref + "] to enum of type " + defaultEnum.getDeclaringClass(), ex); should probably just catch `IllegalArgumentException` here String stringPref = prefs.getString(key, defaultEnum.name()); try { return Enum.valueOf(defaultEnum.getDeclaringClass(), stringPref); + } catch (IllegalArgumentException ex) { Log.w(K9.LOG_TAG, "Unable to convert preference key [" + key + "] value [" + stringPref + "] to enum of type " + defaultEnum.getDeclaringClass(), ex);
codereview_java_data_5550
@Test public void testFilterManuallyClosable() throws IOException { TestableCloseableIterable iterable = new TestableCloseableIterable(); - TestableCloseableIterable.TestableCloseableIterator iterator = - (TestableCloseableIterable.TestableCloseableIterator) iterable.iterator(); CloseableIterable<Integer> filtered = CloseableIterable.filter(iterable, x -> x > 5); nit: can we import `TestableCloseableIterator` and fit on one line? @Test public void testFilterManuallyClosable() throws IOException { TestableCloseableIterable iterable = new TestableCloseableIterable(); + TestableCloseableIterator iterator = (TestableCloseableIterator) iterable.iterator(); CloseableIterable<Integer> filtered = CloseableIterable.filter(iterable, x -> x > 5);
codereview_java_data_5560
private String password; private String sender; - public MailService(KylinConfig config) { - this(config.isNotifyEnabled(), config.isStarttlsEnabled(), config.getMailHost(), config.getSmtpPort(), config.getMailUsername(), config.getMailPassword(), config.getMailSender()); } private MailService(boolean enabled, boolean starttlsEnabled, String host, String port, String username, String password, String sender) { Revert these changes. private String password; private String sender; public MailService(KylinConfig config) { + this(config.isNotificationEnabled(), config.isStarttlsEnabled(), config.getMailHost(), config.getSmtpPort(), config.getMailUsername(), config.getMailPassword(), config.getMailSender()); } private MailService(boolean enabled, boolean starttlsEnabled, String host, String port, String username, String password, String sender) {
codereview_java_data_5561
@Override public SMTLibTerm transform(UninterpretedToken uninterpretedToken) { if (uninterpretedToken.sort() == Sort.KVARIABLE) { - if (binders.search(uninterpretedToken) != -1) { return new SMTLibTerm(uninterpretedToken.javaBackendValue()); } else { throw new SMTTranslationFailure("unbounded K variable: " + uninterpretedToken); contains() must be used here @Override public SMTLibTerm transform(UninterpretedToken uninterpretedToken) { if (uninterpretedToken.sort() == Sort.KVARIABLE) { + if (smtlibForallOrExistsBinders.contains(uninterpretedToken)) { return new SMTLibTerm(uninterpretedToken.javaBackendValue()); } else { throw new SMTTranslationFailure("unbounded K variable: " + uninterpretedToken);
codereview_java_data_5565
sb.append("]"); } - private static final Pattern identChar = Pattern.compile("[A-Za-z0-9\\-]"); - private static String[] asciiReadableEncodingKoreCalc() { - String[] koreEncoder = StringUtil.asciiReadableEncodingDefault; koreEncoder[0x2d] = "-"; koreEncoder[0x3c] = "-LT-"; koreEncoder[0x3e] = "-GT-"; are you sure this is actually doing a copy? it could just be assigning a pointer to the object, which would mean you're overwriting the default. You might want to use Arrays.copyOf. sb.append("]"); } private static String[] asciiReadableEncodingKoreCalc() { + String[] koreEncoder = Arrays.copyOf(StringUtil.asciiReadableEncodingDefault, StringUtil.asciiReadableEncodingDefault.length); koreEncoder[0x2d] = "-"; koreEncoder[0x3c] = "-LT-"; koreEncoder[0x3e] = "-GT-";
codereview_java_data_5578
case DECIMAL: return new BigDecimal(asString); case DATE: - return (int) LocalDate.parse(asString, DateTimeFormatter.ofPattern("yyyy-MM-dd")).toEpochDay(); default: throw new UnsupportedOperationException( "Unsupported type for fromPartitionString: " + type); As an option, we can also rely on Iceberg here: ``` Literal.of(asString).to(Types.DateType.get()).value(); ``` case DECIMAL: return new BigDecimal(asString); case DATE: + return Literal.of(asString).to(Types.DateType.get()).value(); default: throw new UnsupportedOperationException( "Unsupported type for fromPartitionString: " + type);
codereview_java_data_5584
import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.utils.ToString; import software.amazon.awssdk.utils.Validate; /** * Represents a completed download directory transfer to Amazon S3. It can be used to track Defensive copy in case of builder reuse? import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.utils.ToString; import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; /** * Represents a completed download directory transfer to Amazon S3. It can be used to track
codereview_java_data_5590
package org.apache.iceberg.mr.hive; import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory; import org.apache.iceberg.expressions.And; import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.expressions.Not; I think we should probably have tests for all the filter literal types here, It seems like we are only checking Longs? Especially given the special code around other specific types. package org.apache.iceberg.mr.hive; +import java.math.BigDecimal; import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; import org.apache.iceberg.expressions.And; import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.expressions.Not;
codereview_java_data_5591
account = preferences.getAccount(accountUuids[0]); List<String> folderServerIds = search.getFolderServerIds(); singleFolderMode = folderServerIds.size() == 1; } } This doesn't set `account` to null in case the condition isn't met. account = preferences.getAccount(accountUuids[0]); List<String> folderServerIds = search.getFolderServerIds(); singleFolderMode = folderServerIds.size() == 1; + } else { + account = null; } }
codereview_java_data_5603
* Serializes an AST or a partial AST to JSON. */ public class JavaParserJsonSerializer { public void serialize(Node node, JsonGenerator generator) { requireNonNull(node); Log.info("Serializing Node to JSON."); Perhaps we could use a constant for that or maybe give a way to set this name in some sort of configuration * Serializes an AST or a partial AST to JSON. */ public class JavaParserJsonSerializer { + public static final String SERIALIZED_CLASS_KEY = "!"; + public void serialize(Node node, JsonGenerator generator) { requireNonNull(node); Log.info("Serializing Node to JSON.");
codereview_java_data_5606
void createOAuth2LoginFilter(BeanReference sessionStrategy, BeanReference authManager) { Element oauth2LoginElt = DomUtils.getChildElementByTagName(this.httpElt, Elements.OAUTH2_LOGIN); if (oauth2LoginElt != null) { - OAuth2LoginBeanDefinitionParser parser = new OAuth2LoginBeanDefinitionParser(); - BeanDefinition filterBean = parser.parse(oauth2LoginElt, this.pc); - filterBean.getPropertyValues().addPropertyValue("authenticationManager", authManager); // retrieve the other bean result BeanDefinition oauth2LoginAuthProvider = parser.getOAuth2LoginAuthenticationProvider(); Rename `filterBean` to `oauth2LoginFilterBean` void createOAuth2LoginFilter(BeanReference sessionStrategy, BeanReference authManager) { Element oauth2LoginElt = DomUtils.getChildElementByTagName(this.httpElt, Elements.OAUTH2_LOGIN); if (oauth2LoginElt != null) { + OAuth2LoginBeanDefinitionParser parser = new OAuth2LoginBeanDefinitionParser(requestCache); + BeanDefinition oauth2LoginFilterBean = parser.parse(oauth2LoginElt, this.pc); + oauth2LoginFilterBean.getPropertyValues().addPropertyValue("authenticationManager", authManager); // retrieve the other bean result BeanDefinition oauth2LoginAuthProvider = parser.getOAuth2LoginAuthenticationProvider();
codereview_java_data_5607
} } - /** - * Scale up or down the number of application instances. - * @param application App in the stream to scale. - * @param count Number of instance to scale to. - */ - public void scaleApplicationInstances(StreamApplication application, int count) { - this.scaleApplicationInstances(application, count, new HashMap<>()); - } - /** * Scale up or down the number of application instances. * @param application App in the stream to scale. We can get rid of this } } /** * Scale up or down the number of application instances. * @param application App in the stream to scale.
codereview_java_data_5613
package org.apache.rocketmq.common.protocol; -import org.apache.rocketmq.remoting.protocol.RemotingSysRequestCode; - -public class RequestCode extends RemotingSysRequestCode { public static final int SEND_MESSAGE = 10; I am not sure it's a good idea that `RequestCode` in `common` module has a dependency `RemotingSysRequestCode` in `remoting` module. package org.apache.rocketmq.common.protocol; +public class RequestCode { public static final int SEND_MESSAGE = 10;
codereview_java_data_5624
} } - private File getNodePrivateKeyFile(@Nullable final File nodePrivateKeyFile) { return nodePrivateKeyFile != null ? nodePrivateKeyFile : KeyPairUtil.getDefaultKeyFile(dataDir()); Pantheon doesn't currently use `@Nullable` so better to leave it off for consistency. } } + private File getNodePrivateKeyFile() { return nodePrivateKeyFile != null ? nodePrivateKeyFile : KeyPairUtil.getDefaultKeyFile(dataDir());
codereview_java_data_5630
private static Set<String> ownedObservables(DAG dag) { return StreamSupport.stream(Spliterators.spliteratorUnknownSize(dag.iterator(), 0), false) .map(vertex -> (String) vertex.getMetaSupplier().getTags().get(ObservableRepository.OWNED_OBSERVABLE)) .collect(Collectors.toSet()); } The returned set includes `null` if there is any vertex which is not an observable sink. private static Set<String> ownedObservables(DAG dag) { return StreamSupport.stream(Spliterators.spliteratorUnknownSize(dag.iterator(), 0), false) .map(vertex -> (String) vertex.getMetaSupplier().getTags().get(ObservableRepository.OWNED_OBSERVABLE)) + .filter(Objects::nonNull) .collect(Collectors.toSet()); }
codereview_java_data_5631
try { List<ImapResponse> expungeResponses = executeSimpleCommand("EXPUNGE"); - handleExpungeResponses(expungeResponses); return ImapUtility.extractVanishedUids(expungeResponses); } catch (IOException ioe) { throw ioExceptionHandler(connection, ioe); } } - private void handleExpungeResponses(List<ImapResponse> imapResponses) { for (ImapResponse imapResponse : imapResponses) { Long highestModSeq = ImapUtility.extractHighestModSeq(imapResponse); if (highestModSeq != null) { This doesn't really 'handle' the response - it just searches them for a higher mod seq. `updateHighestModSequenceFromResponses`. try { List<ImapResponse> expungeResponses = executeSimpleCommand("EXPUNGE"); + updateHighestModSeqFromResponses(expungeResponses); return ImapUtility.extractVanishedUids(expungeResponses); } catch (IOException ioe) { throw ioExceptionHandler(connection, ioe); } } + private void updateHighestModSeqFromResponses(List<ImapResponse> imapResponses) { for (ImapResponse imapResponse : imapResponses) { Long highestModSeq = ImapUtility.extractHighestModSeq(imapResponse); if (highestModSeq != null) {
codereview_java_data_5637
import de.danoeh.antennapod.core.feed.FeedMedia; import de.danoeh.antennapod.core.gpoddernet.model.GpodnetEpisodeAction; import de.danoeh.antennapod.core.preferences.GpodnetPreferences; -import de.danoeh.antennapod.core.preferences.UserPreferences; import de.danoeh.antennapod.core.service.playback.PlaybackService; import de.danoeh.antennapod.core.storage.DBTasks; import de.danoeh.antennapod.core.storage.DBWriter; import de.danoeh.antennapod.core.storage.DownloadRequestException; import de.danoeh.antennapod.core.storage.DownloadRequester; import de.danoeh.antennapod.core.util.NetworkUtils; /** Do these need to be different? It never makes sense for them to both be set at the same time. I can see it making the code more readable. Should they be initialized to some value? import de.danoeh.antennapod.core.feed.FeedMedia; import de.danoeh.antennapod.core.gpoddernet.model.GpodnetEpisodeAction; import de.danoeh.antennapod.core.preferences.GpodnetPreferences; import de.danoeh.antennapod.core.service.playback.PlaybackService; +import de.danoeh.antennapod.core.storage.DBReader; import de.danoeh.antennapod.core.storage.DBTasks; import de.danoeh.antennapod.core.storage.DBWriter; import de.danoeh.antennapod.core.storage.DownloadRequestException; import de.danoeh.antennapod.core.storage.DownloadRequester; +import de.danoeh.antennapod.core.util.LongList; import de.danoeh.antennapod.core.util.NetworkUtils; /**
codereview_java_data_5642
package com.hazelcast.jet.impl.serialization; import com.hazelcast.internal.nio.BufferObjectDataInput; -import com.hazelcast.internal.serialization.impl.AbstractSerializationService; public interface DataInput { This is confusing with the standard `java.io.DataInput`. We should use different name, esp. when the purpose is similar. Maybe `JetDataInput`. I was confused how come you implement just few methods when `DataInput` has many more. package com.hazelcast.jet.impl.serialization; import com.hazelcast.internal.nio.BufferObjectDataInput; +import com.hazelcast.internal.serialization.InternalSerializationService; public interface DataInput {
codereview_java_data_5658
description = "Refresh delay of websocket subscription sync in milliseconds. " + "default: ${DEFAULT-VALUE}", - defaultValue = "5000" ) private void setRefreshDelay(final Long refreshDelay) { - if (refreshDelay < 1 || refreshDelay > 3600000) { throw new ParameterException( new CommandLine(this), "refreshDelay must be a positive integer smaller than 3600000 (1 hour)"); what happens if I specify 1.5 ? Why long not int? description = "Refresh delay of websocket subscription sync in milliseconds. " + "default: ${DEFAULT-VALUE}", + defaultValue = "" + WebSocketConfiguration.DEFAULT_WEBSOCKET_REFRESH_DELAY ) private void setRefreshDelay(final Long refreshDelay) { + if (refreshDelay < DEFAULT_MIN_REFRESH_DELAY || refreshDelay > DEFAULT_MAX_REFRESH_DELAY) { throw new ParameterException( new CommandLine(this), "refreshDelay must be a positive integer smaller than 3600000 (1 hour)");
codereview_java_data_5663
} if (proposedBlock.getHeader().getNumber() - != signedPayload.getPayload().getRoundIdentifier().getRoundNumber()) { LOG.info("Invalid proposal/block - message sequence does not align with block number."); return false; } this looks like it's comparing the chain height to the round number instead of the sequence number } if (proposedBlock.getHeader().getNumber() + != signedPayload.getPayload().getRoundIdentifier().getSequenceNumber()) { LOG.info("Invalid proposal/block - message sequence does not align with block number."); return false; }
codereview_java_data_5664
ConsumeQueue logic = this.findConsumeQueue(topic, queueId); if (logic != null) { long consumeQueueOffset = logic.getMinOffsetInQueue(); - MessageExt msgExt = null; long commitLogOffset = 0L; if (realOffset) { - for (;consumeQueueOffset <= getMaxOffsetInQueue(topic,queueId);consumeQueueOffset++) { commitLogOffset = getCommitLogOffsetInQueue(topic, queueId, consumeQueueOffset); - msgExt = lookMessageByOffset(commitLogOffset); - if (null != msgExt) { return consumeQueueOffset; } } IMO, this is a very CPU intensive operation. It is possible to get a lot of cold data and decode it, which will affect the stability of the broker. ConsumeQueue logic = this.findConsumeQueue(topic, queueId); if (logic != null) { long consumeQueueOffset = logic.getMinOffsetInQueue(); long commitLogOffset = 0L; if (realOffset) { + long minCommitoffset = this.commitLog.getMinOffset(); + long maxOffsetInQueue = getMaxOffsetInQueue(topic,queueId); + for (;consumeQueueOffset <= maxOffsetInQueue;consumeQueueOffset++) { commitLogOffset = getCommitLogOffsetInQueue(topic, queueId, consumeQueueOffset); + if (commitLogOffset >= minCommitoffset) { return consumeQueueOffset; } }
codereview_java_data_5670
} } - @Override - public void onConfigurationChanged(Configuration newConfig) { - super.onConfigurationChanged(newConfig); - } } Why do we need to override this if all we're doing is passing it up? } } }
codereview_java_data_5673
} public KPrint(KompileOptions kompileOptions) { - this(new KExceptionManager(new GlobalOptions()), FileUtil.testFileUtil(), new TTYInfo(false, false, false), new PrintOptions(), kompileOptions); } @Inject I should think this should be kompileOptions.global } public KPrint(KompileOptions kompileOptions) { + this(new KExceptionManager(kompileOptions.global), FileUtil.testFileUtil(), new TTYInfo(false, false, false), new PrintOptions(), kompileOptions); } @Inject
codereview_java_data_5674
import java.net.URI; import java.util.Collections; import java.util.List; -import java.util.logging.Logger; import java.util.stream.Collectors; import java.util.stream.Stream; -import zipkin2.elasticsearch.ElasticsearchStorage; final class HostsConverter { - static final Logger LOG = Logger.getLogger(ElasticsearchStorage.class.getName()); static List<URI> convert(String hosts) { if (hosts == null) return Collections.singletonList(URI.create("http://localhost:9200")); Realize this was probably copied, but since it's zipkin-server, I think it's supposed to be either log4j2 or slf4j import java.net.URI; import java.util.Collections; import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; final class HostsConverter { + static final Logger LOGGER = LogManager.getLogger(); static List<URI> convert(String hosts) { if (hosts == null) return Collections.singletonList(URI.create("http://localhost:9200"));
codereview_java_data_5679
private Date playbackCompletionDate; private int startPosition = -1; private int playedDurationWhenStarted; - private String lastPlaybackSpeed = null; // if null: unknown, will be checked private Boolean hasEmbeddedPicture; I think a float value fits better. private Date playbackCompletionDate; private int startPosition = -1; private int playedDurationWhenStarted; + private float lastPlaybackSpeed = LAST_PLAYBACK_SPEED_UNSET; // if null: unknown, will be checked private Boolean hasEmbeddedPicture;
codereview_java_data_5692
* @return the iterator */ public Iterator<ConfigurationError> configErrors() { - return configErrors == null ? Collections.emptyIterator() : configErrors.iterator(); } /** this should be ```suggestion return configErrors == null ? Collections<ConfigurationError>.emptyIterator() : configErrors.iterator(); ``` * @return the iterator */ public Iterator<ConfigurationError> configErrors() { + return configErrors == null ? Collections<ConfigurationError>.emptyIterator() : configErrors.iterator(); } /**
codereview_java_data_5695
} private SynchronizerConfiguration buildSyncConfig() { - synchronizerConfigurationBuilder.syncMode(DEFAULT_SYNC_MODE); synchronizerConfigurationBuilder.maxTrailingPeers(maxTrailingPeers); return synchronizerConfigurationBuilder.build(); } It would be good not to remove the sync-mode flag. I have an open PR that starts using it again. :) } private SynchronizerConfiguration buildSyncConfig() { + synchronizerConfigurationBuilder.syncMode(syncMode); synchronizerConfigurationBuilder.maxTrailingPeers(maxTrailingPeers); return synchronizerConfigurationBuilder.build(); }
codereview_java_data_5699
this.compressExt = compressExt; } - public static LogrotateCompressionSettings gzip() { return new LogrotateCompressionSettings(Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent()); } i'd consider renaming this to `default()`, since it's just an implementation detail of logrotate that it's using `gzip` to compress this.compressExt = compressExt; } + public static LogrotateCompressionSettings empty() { return new LogrotateCompressionSettings(Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent()); }
codereview_java_data_5704
final Optional<BlockHeader> genesisBlockHeader = context.getBlockchain().getBlockHeader(GENESIS_BLOCK_NUMBER); if (!genesisBlockHeader.isPresent()) { - LOG.error("Genesis block cannot be retriefed from chain."); return false; } final CliqueExtraData extraData = CliqueExtraData.decode(genesisBlockHeader.get()); ```suggestion LOG.error("Genesis block cannot be retrieved from chain."); ``` final Optional<BlockHeader> genesisBlockHeader = context.getBlockchain().getBlockHeader(GENESIS_BLOCK_NUMBER); if (!genesisBlockHeader.isPresent()) { + LOG.error("Genesis block cannot be retrieved from chain."); return false; } final CliqueExtraData extraData = CliqueExtraData.decode(genesisBlockHeader.get());
codereview_java_data_5706
} else { conf = new Configuration(); } - } - - public WriteBuilder forTable(Table table) { - schema(table.schema()); - setAll(table.properties()); - return this; } public WriteBuilder metadata(String property, String value) { I think this change is unrelated and should go in a separate PR. It's a good idea to add this, just not here. Also, I think this should do some translation from table properties to ORC properties, like taking `write.orc.compression-codec` and setting the correct property for ORC. } else { conf = new Configuration(); } + // overwrite output file by default + overwrite(true); } public WriteBuilder metadata(String property, String value) {
codereview_java_data_5708
long currentLogicOffset = mappedFile.getWrotePosition() + mappedFile.getFileFromOffset(); if (expectLogicOffset < currentLogicOffset) { log.warn("build consume queue idempotent, expectLogicOffset: {} currentLogicOffset: {} Topic: {} QID: {} Diff: {}", expectLogicOffset, currentLogicOffset, this.topic, this.queueId, expectLogicOffset - currentLogicOffset); return true; How to understand the meaning of building consume queue idempotent? long currentLogicOffset = mappedFile.getWrotePosition() + mappedFile.getFileFromOffset(); if (expectLogicOffset < currentLogicOffset) { + //commit's log repeat build consume queue. log.warn("build consume queue idempotent, expectLogicOffset: {} currentLogicOffset: {} Topic: {} QID: {} Diff: {}", expectLogicOffset, currentLogicOffset, this.topic, this.queueId, expectLogicOffset - currentLogicOffset); return true;
codereview_java_data_5718
testCancelScript(); testEncoding(); testClobPrimaryKey(); - testComment(); deleteDb("runscript"); } Please, move this test to `createTable.sql`. `SCRIPT TABLE TEST1` output is not too big and I don't see any reason to use custom checks for presence of substrings. testCancelScript(); testEncoding(); testClobPrimaryKey(); deleteDb("runscript"); }
codereview_java_data_5720
@Override public ExpireSnapshotsActionResult execute() { SparkContext context = spark().sparkContext(); JobGroupInfo info = JobGroupUtils.getJobGroupInfo(context); return withJobGroupInfo(info, () -> { Can we simplify this a bit? ``` @Override public ExpireSnapshotsActionResult execute() { JobGroupInfo info = withJobGroupInfo("EXPIRE", "EXPIRE-SNAPSHOTS"); withJobGroupInfo(info, this::doExecute) } ``` @Override public ExpireSnapshotsActionResult execute() { + JobGroupInfo info = newJobGroupInfo("EXPIRE", "EXPIRE-SNAPSHOTS"); + return withJobGroupInfo(info, this::doExecute); + } + + private ExpireSnapshotsActionResult doExecute() { SparkContext context = spark().sparkContext(); JobGroupInfo info = JobGroupUtils.getJobGroupInfo(context); return withJobGroupInfo(info, () -> {
codereview_java_data_5722
return drawerLayout != null && navDrawer != null && drawerLayout.isDrawerOpen(navDrawer); } - @Override - public boolean onCreateOptionsMenu(Menu menu) { - return super.onCreateOptionsMenu(menu); - } - - @Override - public boolean onPrepareOptionsMenu(Menu menu) { - return super.onPrepareOptionsMenu(menu); - } - public interface AudioplayerContentFragment { public void onDataSetChanged(Playable media); } If we're not doing anything here, is there any need to override this function (or the next)? This isn't an important fix. If there's nothing else I wouldn't worry about it. return drawerLayout != null && navDrawer != null && drawerLayout.isDrawerOpen(navDrawer); } public interface AudioplayerContentFragment { public void onDataSetChanged(Playable media); }
codereview_java_data_5723
import org.apache.maven.shared.dependency.analyzer.DefaultClassAnalyzer; import org.gradle.api.Project; import org.gradle.api.artifacts.Configuration; -import org.gradle.api.artifacts.ConfigurationContainer; -import org.gradle.api.artifacts.ModuleDependency; import org.gradle.api.artifacts.ResolvedArtifact; import org.gradle.api.file.Directory; import org.gradle.api.logging.Logger; Does it detect things like `Class.forName(...)` or other dynamic class loading? import org.apache.maven.shared.dependency.analyzer.DefaultClassAnalyzer; import org.gradle.api.Project; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.ResolvedArtifact; import org.gradle.api.file.Directory; import org.gradle.api.logging.Logger;
codereview_java_data_5725
if (checkInit) { DbException.throwInternalError(); } - Collections.sort(filters, TableFilter.ORDER_IN_FORM_COMPARATOR); expandColumnList(); visibleColumnCount = expressions.size(); ArrayList<String> expressionSQL; what is a "form" ? if (checkInit) { DbException.throwInternalError(); } + Collections.sort(filters, TableFilter.ORDER_IN_FROM_COMPARATOR); expandColumnList(); visibleColumnCount = expressions.size(); ArrayList<String> expressionSQL;
codereview_java_data_5726
edge.add("to", e.getDestName()); edge.add("toOrdinal", e.getDestOrdinal()); edge.add("priority", e.getPriority()); - edge.add("distributedTo", e.getDistributedTo() + ""); edge.add("type", e.getRoutingPolicy().toString().toLowerCase()); edges.add(edge); } `String.valueOf(e.getDistributedTo())` is nicer than this "append empty string" idiom. edge.add("to", e.getDestName()); edge.add("toOrdinal", e.getDestOrdinal()); edge.add("priority", e.getPriority()); + edge.add("distributedTo", String.valueOf(e.getDistributedTo())); edge.add("type", e.getRoutingPolicy().toString().toLowerCase()); edges.add(edge); }
codereview_java_data_5730
if (syncMode == FolderMode.NONE && oldSyncMode != FolderMode.NONE) { return true; } - return syncMode != FolderMode.NONE && oldSyncMode == FolderMode.NONE; } public synchronized FolderMode getFolderPushMode() { If you combine the last two `return` statements you might as well only have one. However, I think the old way makes the code easier to read/understand. if (syncMode == FolderMode.NONE && oldSyncMode != FolderMode.NONE) { return true; } + if (syncMode != FolderMode.NONE && oldSyncMode == FolderMode.NONE) { + return true; + } + return false; } public synchronized FolderMode getFolderPushMode() {
codereview_java_data_5731
@Deprecated public Date getSessionCredentitalsExpiration() { - credentialsLock.readLock().lock(); - try { - return sessionCredentialsExpiration; - } finally { - credentialsLock.readLock().unlock(); - } } public String getIdentityPoolId() { This method should call the new method, to avoid having the implementation logic duplicated in two places. @Deprecated public Date getSessionCredentitalsExpiration() { + return getSessionCredentialsExpiration(); } public String getIdentityPoolId() {
codereview_java_data_5743
* conveyor has as many 1-to-1 concurrent queues as there are upstream * tasklets contributing to it. */ -public abstract class ConcurrentInboundEdgeStream { - // Prevent subclassing private ConcurrentInboundEdgeStream() { } We prevent subclassing of an abstract class? I think we can remove the `abstract` modifier. This class became a utility class with a few implementations of `InboundEdgeStream` in it. * conveyor has as many 1-to-1 concurrent queues as there are upstream * tasklets contributing to it. */ +public final class ConcurrentInboundEdgeStream { + // Prevents instantiation private ConcurrentInboundEdgeStream() { }
codereview_java_data_5749
String pathToDelete = path + "/" + createdNodeName; LOG.debug("[{}] Failed to acquire lock in tryLock(), deleting all at path: {}", vmLockPrefix, pathToDelete); - recursiveDelete(pathToDelete, NodeMissingPolicy.SKIP); createdNodeName = null; } With ZooKeeper 3.5.x there is a static utility method `ZKUtil.deleteRecursive(zooKeeper, path);` that may eliminate the need for most, if not all of this code. (I'm not sure what the NodeMissingPolicy is expected to do here, so that may be a factor) Also, not sure if it would be possible / desirable, but maybe some kind of sanity check before calling a recursive delete? One thing that comes to mind is checking that the path starts with /accumulo/[uuid]/lock or whatever the appropriate base path for the lock is? String pathToDelete = path + "/" + createdNodeName; LOG.debug("[{}] Failed to acquire lock in tryLock(), deleting all at path: {}", vmLockPrefix, pathToDelete); + recursiveDelete(zooKeeper, pathToDelete, NodeMissingPolicy.SKIP); createdNodeName = null; }
codereview_java_data_5755
*/ @Nonnull public static <K, V, R> ProcessorMetaSupplier readHdfsP( - @Nonnull Configuration configuration, @Nonnull BiFunctionEx<K, V, R> mapper ) { configuration = SerializableConfiguration.asSerializable(configuration); if (configuration.get("mapreduce.job.inputformat.class") != null) { - return new ReadHdfsNewApiP.MetaSupplier<>(configuration, mapper); } else { - return new ReadHdfsOldApiP.MetaSupplier<>((JobConf) configuration, mapper); } } Can we replace the string with the MRJobConfig#INPUT_FORMAT_CLASS_ATTR? */ @Nonnull public static <K, V, R> ProcessorMetaSupplier readHdfsP( + @Nonnull Configuration configuration, @Nonnull BiFunctionEx<K, V, R> projectionFn ) { configuration = SerializableConfiguration.asSerializable(configuration); if (configuration.get("mapreduce.job.inputformat.class") != null) { + return new ReadHdfsNewApiP.MetaSupplier<>(configuration, projectionFn); } else { + return new ReadHdfsOldApiP.MetaSupplier<>((JobConf) configuration, projectionFn); } }
codereview_java_data_5762
} - @SuppressWarnings("WeakerAccess") // public interface - public void resetVisibleLimits() { try { getLocalStore().resetVisibleLimits(getDisplayCount()); } catch (MessagingException e) { This method is only used by this class. We could make this `private` now and increase visibility later should that become necessary. } + private void resetVisibleLimits() { try { getLocalStore().resetVisibleLimits(getDisplayCount()); } catch (MessagingException e) {
codereview_java_data_5765
} persistService.insertTenantInfoAtomic("1", namespaceId, namespaceName, namespaceDesc, "nacos", System.currentTimeMillis()); - namespaceService.addTenantId(namespaceId); return true; } replace method name by addNamespaceId? } persistService.insertTenantInfoAtomic("1", namespaceId, namespaceName, namespaceDesc, "nacos", System.currentTimeMillis()); + namespaceService.addNamespaceId(namespaceId); return true; }
codereview_java_data_5777
"2.1.0"), TABLE_COMPACTION_DISPATCHER_OPTS("table.compaction.dispatcher.opts.", null, PropertyType.PREFIX, "Options for the table compaction dispatcher", "2.1.0"), - TABLE_COMPACTION_SELECTION_EXPIRATION("table.compaction.selection.expiration.ms", "1m", PropertyType.TIMEDURATION, "User compactions select files and are then queued for compaction, preventing these files " + "from being used in system compactions. This timeout allows system compactions to cancel " + "the hold queued user compactions have on files, when its queued for more than the " + "specified time. If a system compaction cancels a hold and runs, then the user compaction" - + " can reselect and hold files after the system compaction runs. ", "2.1.0"), TABLE_COMPACTION_SELECTOR("table.compaction.selector", "", PropertyType.CLASSNAME, "A configurable selector for a table that can periodically select file for mandatory " small nit - there is some space at the end of the description. "2.1.0"), TABLE_COMPACTION_DISPATCHER_OPTS("table.compaction.dispatcher.opts.", null, PropertyType.PREFIX, "Options for the table compaction dispatcher", "2.1.0"), + TABLE_COMPACTION_SELECTION_EXPIRATION("table.compaction.selection.expiration.ms", "2m", PropertyType.TIMEDURATION, "User compactions select files and are then queued for compaction, preventing these files " + "from being used in system compactions. This timeout allows system compactions to cancel " + "the hold queued user compactions have on files, when its queued for more than the " + "specified time. If a system compaction cancels a hold and runs, then the user compaction" + + " can reselect and hold files after the system compaction runs.", "2.1.0"), TABLE_COMPACTION_SELECTOR("table.compaction.selector", "", PropertyType.CLASSNAME, "A configurable selector for a table that can periodically select file for mandatory "
codereview_java_data_5780
bottomSheetCallback.onSlide(null, 1.0f); } else if (Intent.ACTION_VIEW.equals(intent.getAction())) { handleDeeplink(intent.getData()); - } else if (Intent.ACTION_CREATE_SHORTCUT.equals(intent.getAction())) { - intent = new Intent(this, SelectSubscriptionActivity.class); - intent.setAction(Intent.ACTION_CREATE_SHORTCUT); - startActivity(intent); } // to avoid handling the intent twice when the configuration changes setIntent(new Intent(MainActivity.this, MainActivity.class)); Can't you just add `SelectSubscriptionActivity` directly to the manifest instead of opening `MainActivity` that then starts it? bottomSheetCallback.onSlide(null, 1.0f); } else if (Intent.ACTION_VIEW.equals(intent.getAction())) { handleDeeplink(intent.getData()); } // to avoid handling the intent twice when the configuration changes setIntent(new Intent(MainActivity.this, MainActivity.class));
codereview_java_data_5790
import de.danoeh.antennapod.activity.SplashActivity; import de.danoeh.antennapod.core.ApCoreEventBusIndex; import de.danoeh.antennapod.core.ClientConfig; -import de.danoeh.antennapod.core.preferences.UserPreferences; import de.danoeh.antennapod.error.CrashReportWriter; import de.danoeh.antennapod.error.RxJavaErrorHandlerSetup; import de.danoeh.antennapod.spa.SPAUtil; Please revert the unrelated change to keep the git history clean import de.danoeh.antennapod.activity.SplashActivity; import de.danoeh.antennapod.core.ApCoreEventBusIndex; import de.danoeh.antennapod.core.ClientConfig; import de.danoeh.antennapod.error.CrashReportWriter; import de.danoeh.antennapod.error.RxJavaErrorHandlerSetup; import de.danoeh.antennapod.spa.SPAUtil;
codereview_java_data_5804
public final class UpdateMapP<T, K, V> extends AsyncHazelcastWriterP { private final String mapName; private final FunctionEx<? super T, ? extends K> toKeyFn; private final BiFunctionEx<? super V, ? super T, ? extends V> updateFn; We need to apply backpressure. public final class UpdateMapP<T, K, V> extends AsyncHazelcastWriterP { + private static final int PENDING_ITEM_COUNT_LIMIT = 16_384; private final String mapName; private final FunctionEx<? super T, ? extends K> toKeyFn; private final BiFunctionEx<? super V, ? super T, ? extends V> updateFn;
codereview_java_data_5809
/* - * Copyright 2005 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. back to the future :P /* + * Copyright 2020 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License.
codereview_java_data_5811
} if (pref instanceof EditTextPreference) { EditTextPreference editTextPref = (EditTextPreference) pref; - if (pref.getTitle().toString().toLowerCase().contains("password")) { pref.setSummary("******"); } else if (editTextPref.getText() != null && !editTextPref.getText().equals("")) { ((EditTextPreference) pref).setDialogMessage(editTextPref.getDialogMessage()); is it possible to make this working for all languages? like use R.id somehow? } if (pref instanceof EditTextPreference) { EditTextPreference editTextPref = (EditTextPreference) pref; + if (pref.getKey().contains("danar_password")) { pref.setSummary("******"); } else if (editTextPref.getText() != null && !editTextPref.getText().equals("")) { ((EditTextPreference) pref).setDialogMessage(editTextPref.getDialogMessage());
codereview_java_data_5815
* ADDED} and {@link com.hazelcast.core.EntryEventType#UPDATED UPDATED} events * @param projectionFn the projection to map the events, you may use * {@link Util#mapEventToEntry()} to project new value from the event - * If the projection returns a {@code null} for some items, that items * will be filtered out. * @param initialPos describes which event to start receiving from * @param <T> type of emitted item Should be "If the projection returns {@code null} for an item, that item will be filtered out. There are some more instances of this text below. * ADDED} and {@link com.hazelcast.core.EntryEventType#UPDATED UPDATED} events * @param projectionFn the projection to map the events, you may use * {@link Util#mapEventToEntry()} to project new value from the event + * If the projection returns a {@code null} for an item, that item * will be filtered out. * @param initialPos describes which event to start receiving from * @param <T> type of emitted item
codereview_java_data_5820
name, generatePrivacyParameters(orion), keyFilePath, orion); } else { node = - pantheonNodeFactory.createIbftTwoNodePrivacyEnabled( name, generatePrivacyParameters(orion), keyFilePath, orion); } Not for here - but that's a horrible interface, think it might be doing 2 things (i.e. IBFT and Privacy) - maybe it should have been a builder? name, generatePrivacyParameters(orion), keyFilePath, orion); } else { node = + pantheonNodeFactory.createIbft2NodePrivacyEnabled( name, generatePrivacyParameters(orion), keyFilePath, orion); }
codereview_java_data_5822
SYNC_HTTP_CLIENT.close(); ASYNC_HTTP_CLIENT.close(); } - catch (Exception ignore) { } logger.warn("[HttpClientManager] Destruction of the end"); } Here, you can print the expcetion error info. SYNC_HTTP_CLIENT.close(); ASYNC_HTTP_CLIENT.close(); } + catch (Exception ex) { + logger.error("An exception occurred when the HTTP client was closed : {}", + ExceptionUtil.getStackTrace(ex)); } logger.warn("[HttpClientManager] Destruction of the end"); }
codereview_java_data_5824
import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Throwables; import com.google.protobuf.Message; import com.pinterest.secor.common.LogFilePath; import com.pinterest.secor.common.SecorConfig; The indentation seems inconsistent with rest of the classes. In secor, we use 4 space indentation (not TAB). import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.protobuf.Message; import com.pinterest.secor.common.LogFilePath; import com.pinterest.secor.common.SecorConfig;
codereview_java_data_5838
@Override public int getItemCount() { - return contributions==null?0:contributions.size(); } public void setContributions(List<Contribution> contributionList) { how could contributions be null? Create it at the field level if you want. It should also be final. There are minor style violations in this file, no spaces around operators @Override public int getItemCount() { + return contributions.size(); } public void setContributions(List<Contribution> contributionList) {
codereview_java_data_5856
Map<String, VariableInstance> variableInstances = runtimeService.getVariableInstances(processInstance.getId()); assertThat(variableInstances.get("stringVar")) - .extracting("name", "value") .containsExactly("stringVar", "coca-cola"); List<String> variableNames = new ArrayList<>(); Let's not use reflection extraction of field names. Why not use `VariableInstance::getName, VariableInstance::getValue` instead? There are other places where this is done as well. Map<String, VariableInstance> variableInstances = runtimeService.getVariableInstances(processInstance.getId()); assertThat(variableInstances.get("stringVar")) + .extracting(VariableInstance::getName, VariableInstance::getValue) .containsExactly("stringVar", "coca-cola"); List<String> variableNames = new ArrayList<>();
codereview_java_data_5858
long length = file.length()/1024; String img_width = exif_data.getAttribute(ExifInterface.TAG_IMAGE_WIDTH); String img_height = exif_data.getAttribute(ExifInterface.TAG_IMAGE_LENGTH); - if(img_width.isEmpty() || img_height.isEmpty() || img_width.equals("0") || img_height.equals("0") ) { try { BitmapFactory.Options options = new BitmapFactory.Options(); options.inJustDecodeBounds = true; Position literals first in comparisons, if the second argument is null then NullPointerExceptions can be avoided, they will just return false. long length = file.length()/1024; String img_width = exif_data.getAttribute(ExifInterface.TAG_IMAGE_WIDTH); String img_height = exif_data.getAttribute(ExifInterface.TAG_IMAGE_LENGTH); + if("0".equals(img_width) || "0".equals(img_height)) { try { BitmapFactory.Options options = new BitmapFactory.Options(); options.inJustDecodeBounds = true;
codereview_java_data_5864
MulticastSocket multicastSocket = null; SocketAddress sa = null; NetworkInterface ni = PMS.get().getServer().getNetworkInterface(); - try { - sa = new InetSocketAddress(getIPv4MulticastAddress(), UPNP_PORT); - } catch (IOException e1) { - } - try { multicastSocket = getNewMulticastSocket(); multicastSocket.joinGroup(sa, ni); for (String NT: NT_LIST) { Is this an error we want to ignore? What are the consequences of this error if it happens? MulticastSocket multicastSocket = null; SocketAddress sa = null; NetworkInterface ni = PMS.get().getServer().getNetworkInterface(); try { multicastSocket = getNewMulticastSocket(); + sa = new InetSocketAddress(getIPv4MulticastAddress(), UPNP_PORT); multicastSocket.joinGroup(sa, ni); for (String NT: NT_LIST) {
codereview_java_data_5865
this.slaveDiskTotal = slaveDiskTotal; } - @JsonIgnore - public boolean isOverloaded() { - // Any host where load5 is > 1 is overloaded. Also consider a higher threshold for load1 to take into account spikes large enough to be disruptive - return systemLoad5Min > 1.0 || systemLoad1Min > 1.5; - } - public double getCpusUsed() { return cpusUsed; } Do we want to make these values tunable knobs as well? this.slaveDiskTotal = slaveDiskTotal; } public double getCpusUsed() { return cpusUsed; }
codereview_java_data_5868
try { if ((replServer.get() == null) && !getConfiguration().get(Property.REPLICATION_NAME).isEmpty()) { - log.info(Property.REPLICATION_NAME.getKey() + " was set, starting repl services."); replServer.set(setupReplication()); } } catch (UnknownHostException | KeeperException | InterruptedException e) { ```suggestion log.info("{} was set, starting repl services.", Property.REPLICATION_NAME.getKey()); ``` try { if ((replServer.get() == null) && !getConfiguration().get(Property.REPLICATION_NAME).isEmpty()) { + log.info("{} was set, starting repl services.", Property.REPLICATION_NAME.getKey()); replServer.set(setupReplication()); } } catch (UnknownHostException | KeeperException | InterruptedException e) {
codereview_java_data_5873
ServiceContainerUtil.registerServiceInstance(componentManager, key, implementation); Disposer.register( parentDisposable, - () -> new ComponentManagerWrapper(componentManager).unregisterComponent(key.getName())); } } Even though this can be implemented as a Wrapper, it's not directly necessary. It would also be possible to introduce a static method to `BaseSdkCompat` which takes `componentManager` and `key.getName()` as method parameters. Going for a method in `BaseSdkCompat` is more light-weight and hence preferred. Can you please switch to that approach? ServiceContainerUtil.registerServiceInstance(componentManager, key, implementation); Disposer.register( parentDisposable, + () -> BaseSdkTestCompat.unregisterComponent(componentManager, key.getName())); } }
codereview_java_data_5881
} void printNodeInfo(ILogger log, String addToProductName) { - log.info("Cluster name: " + node.getConfig().getClusterName()); log.info(versionAndAddressMessage(addToProductName)); log.fine(serializationVersionMessage()); log.info('\n' + JET_LOGO); log.info(COPYRIGHT_LINE); This makes it print before the message "Hazelcast Jet <version> starting", that line should come first I think. } void printNodeInfo(ILogger log, String addToProductName) { log.info(versionAndAddressMessage(addToProductName)); + log.info(clusterNameMessage()); log.fine(serializationVersionMessage()); log.info('\n' + JET_LOGO); log.info(COPYRIGHT_LINE);
codereview_java_data_5884
@Override public void multicastToValidators(final MessageData message) { - validatorNodes.stream().forEach(v -> v.handleReceivedMessage(message)); } } could just use validatorNodes.forEach and avoid need to create stream @Override public void multicastToValidators(final MessageData message) { + validatorNodes.forEach(v -> v.handleReceivedMessage(message)); } }
codereview_java_data_5891
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import java.util.ArrayList; import java.util.List; import java.util.Random; import org.junit.Test; import net.sourceforge.pmd.lang.apex.ast.ASTMethod; Same here - please put the source code in a separate file - oh, it seems to be actually the same file, so you could reuse the file "MetadataDeployController.cls". import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; +import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Random; +import org.apache.commons.io.IOUtils; import org.junit.Test; import net.sourceforge.pmd.lang.apex.ast.ASTMethod;
codereview_java_data_5896
private void displayMediaInfo(@NonNull Playable media) { if (media.getClass() == FeedMedia.class) { String pubDateStr = DateUtils.formatAbbrev(getActivity(), ((FeedMedia) media).getPubDate()); - txtvPodcastTitle.setText(media.getFeedTitle() + ", " + pubDateStr); } else { txtvPodcastTitle.setText(media.getFeedTitle()); } Just a thought: Would a middle dot like on the queue screen work here, too? Not sure what I like more. (cc @keunes) private void displayMediaInfo(@NonNull Playable media) { if (media.getClass() == FeedMedia.class) { String pubDateStr = DateUtils.formatAbbrev(getActivity(), ((FeedMedia) media).getPubDate()); + txtvPodcastTitle.setText(media.getFeedTitle() + "・" + pubDateStr); } else { txtvPodcastTitle.setText(media.getFeedTitle()); }
codereview_java_data_5900
}; } - protected static final Schema SUPPORTED_PRIMITIVES = new Schema( required(100, "id", Types.LongType.get()), required(101, "data", Types.StringType.get()), required(102, "b", Types.BooleanType.get()), nit: Why protected? Do we expect `TestPartitionValues` to be extended and used in another package? }; } + private static final Schema SUPPORTED_PRIMITIVES = new Schema( required(100, "id", Types.LongType.get()), required(101, "data", Types.StringType.get()), required(102, "b", Types.BooleanType.get()),
codereview_java_data_5904
if (partition != null) { this.lazyPartitionSchema = (StructType) SparkSchemaUtil.convert(partition.type()); } else { - lazyPartitionSchema = new StructType(); } } I think partition metadata is the data file col's statistics. if (partition != null) { this.lazyPartitionSchema = (StructType) SparkSchemaUtil.convert(partition.type()); } else { + this.lazyPartitionSchema = new StructType(); } }
codereview_java_data_5907
// TODO: Determine reasonable defaults here public static final int DEFAULT_PIVOT_DISTANCE_FROM_HEAD = 500; public static final float DEFAULT_FULL_VALIDATION_RATE = .1f; - public static final int DEFAULT_FAST_SYNC_MINIMUM_PEERS = 1; private static final Duration DEFAULT_FAST_SYNC_MAXIMUM_PEER_WAIT_TIME = Duration.ofMinutes(5); private static final int DEFAULT_WORLD_STATE_HASH_COUNT_PER_REQUEST = 200; private static final int DEFAULT_WORLD_STATE_REQUEST_PARALLELISM = 10; Looks like this might've been a change for debugging? // TODO: Determine reasonable defaults here public static final int DEFAULT_PIVOT_DISTANCE_FROM_HEAD = 500; public static final float DEFAULT_FULL_VALIDATION_RATE = .1f; + public static final int DEFAULT_FAST_SYNC_MINIMUM_PEERS = 5; private static final Duration DEFAULT_FAST_SYNC_MAXIMUM_PEER_WAIT_TIME = Duration.ofMinutes(5); private static final int DEFAULT_WORLD_STATE_HASH_COUNT_PER_REQUEST = 200; private static final int DEFAULT_WORLD_STATE_REQUEST_PARALLELISM = 10;
codereview_java_data_5914
byte[] exp; if (byteBuffer.hasArray()) { exp = byteBuffer.array(); - pos = compressStart; } else { exp = Utils.newBytes(expLen); buff.position(compressStart).get(exp); Here we need `pos = byteBuffer.arrayOffset() + compressStart` I guess. byte[] exp; if (byteBuffer.hasArray()) { exp = byteBuffer.array(); + pos = byteBuffer.arrayOffset() + compressStart; } else { exp = Utils.newBytes(expLen); buff.position(compressStart).get(exp);
codereview_java_data_5926
LOG.warn("HivePrefix is not defined. Skip hive registration"); } } - if (hiveTableName != null && !mConfig.getSkipQuboleAddPartition()) { mQuboleClient.addPartition(hiveTableName, sb.toString()); } } catch (Exception e) { double negate might be a bit confusing. It might be better to have a positive parameter name, e.g. enableQubole = true LOG.warn("HivePrefix is not defined. Skip hive registration"); } } + if (hiveTableName != null && mConfig.getQuboleEnabled()) { mQuboleClient.addPartition(hiveTableName, sb.toString()); } } catch (Exception e) {
codereview_java_data_5938
private OAuth2AccessTokenResponseClient<OAuth2ClientCredentialsGrantRequest> clientCredentialsTokenResponseClient = new DefaultClientCredentialsTokenResponseClient(); - private Clock clock = Clock.systemUTC(); - private Duration accessTokenExpiresSkew = Duration.ofMinutes(1); - - /** * Constructs an {@code OAuth2AuthorizedClientArgumentResolver} using the provided parameters. * We need to provide a way for the user to configure `accessTokenExpiresSkew` private OAuth2AccessTokenResponseClient<OAuth2ClientCredentialsGrantRequest> clientCredentialsTokenResponseClient = new DefaultClientCredentialsTokenResponseClient(); /** * Constructs an {@code OAuth2AuthorizedClientArgumentResolver} using the provided parameters. *
codereview_java_data_5940
* * @param str An hexadecimal string representing a valid account address (strictly 20 bytes). * @return The parsed address. - * @throws NullPointerException if the provided string is {@code null}. * @throws IllegalArgumentException if the string is either not hexadecimal, or not the valid * representation of a 20 byte address. */ public static Address fromHexStringStrict(final String str) { - if (str == null) return null; return new Address(BytesValue.fromHexString(str)); } Looks like you return null rather than throw a null pointer exception when the hexstring is null * * @param str An hexadecimal string representing a valid account address (strictly 20 bytes). * @return The parsed address. + * @throws IllegalArgumentException if the provided string is {@code null}. * @throws IllegalArgumentException if the string is either not hexadecimal, or not the valid * representation of a 20 byte address. */ public static Address fromHexStringStrict(final String str) { + checkArgument(str != null); return new Address(BytesValue.fromHexString(str)); }
codereview_java_data_5944
service = services.get(csid); if (service == null) { log.error( - "Tablet {} returned non existant compaction service {} for compaction type {}. Check" + " the table compaction dispatcher configuration. Attempting to fall back to " + "{} service.", compactable.getExtent(), csid, ctype, DEFAULT_SERVICE); ```suggestion "Tablet {} returned non existent compaction service {} for compaction type {}. Check" ``` service = services.get(csid); if (service == null) { log.error( + "Tablet {} returned non-existent compaction service {} for compaction type {}. Check" + " the table compaction dispatcher configuration. Attempting to fall back to " + "{} service.", compactable.getExtent(), csid, ctype, DEFAULT_SERVICE);
codereview_java_data_5947
appendExecCmdParameters(cmd, BatchConstants.ARG_SEGMENT_ID, seg.getUuid()); appendExecCmdParameters(cmd, BatchConstants.ARG_PARTITION, getRowkeyDistributionOutputPath(jobId) + "/part-r-00000"); if(this.seg.getConfig().isHFileDistCP()){ - String partitionOutputPath = getRealizationRootPath(jobId) + "/rowkey_stats/part-r-00000_hfile"; - appendExecCmdParameters(cmd, BatchConstants.ARG_PARTITION, partitionOutputPath); }else { - appendExecCmdParameters(cmd, BatchConstants.ARG_PARTITION, - getRowkeyDistributionOutputPath(jobId) + "/part-r-00000"); } appendExecCmdParameters(cmd, BatchConstants.ARG_CUBOID_MODE, cuboidMode.toString()); appendExecCmdParameters(cmd, BatchConstants.ARG_HBASE_CONF_PATH, getHBaseConfFilePath(jobId)); Append paramters twice for `BatchConstants.ARG_PARTITION` . appendExecCmdParameters(cmd, BatchConstants.ARG_SEGMENT_ID, seg.getUuid()); appendExecCmdParameters(cmd, BatchConstants.ARG_PARTITION, getRowkeyDistributionOutputPath(jobId) + "/part-r-00000"); + String partitionOutputPath = null; if(this.seg.getConfig().isHFileDistCP()){ + partitionOutputPath = getRealizationRootPath(jobId) + "/rowkey_stats/part-r-00000_hfile"; }else { + partitionOutputPath = getRowkeyDistributionOutputPath(jobId) + "/part-r-00000"; } + appendExecCmdParameters(cmd, BatchConstants.ARG_PARTITION, partitionOutputPath); appendExecCmdParameters(cmd, BatchConstants.ARG_CUBOID_MODE, cuboidMode.toString()); appendExecCmdParameters(cmd, BatchConstants.ARG_HBASE_CONF_PATH, getHBaseConfFilePath(jobId));