id stringlengths 23 26 | content stringlengths 182 2.49k |
|---|---|
codereview_java_data_3662 | public List<Node> getNodes() {
return distributorStatus.get().getNodes().stream()
- .map(summary -> new Node(summary.getNodeId(), summary.getUri(), summary.isUp(),
- summary.getMaxSessionCount(), summary.getStereotypes()))
.collect(ImmutableList.toImmutableList());
}
Break things on to one parameter per line now.
public List<Node> getNodes() {
return distributorStatus.get().getNodes().stream()
+ .map(summary -> new Node(summary.getNodeId(), summary.getUri(), summary.isUp(), summary.getMaxSessionCount()))
.collect(ImmutableList.toImmutableList());
} |
codereview_java_data_3664 | JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent());
closeResponse(response);
assertThatJson(responseNode)
- .when(Option.IGNORING_EXTRA_FIELDS, Option.IGNORING_EXTRA_ARRAY_ITEMS, Option.IGNORING_ARRAY_ORDER)
.isEqualTo("{"
+ " id: '" + formDefinition.getId() + "',"
+ " key: '" + formDefinition.getKey() + "',"
The `IGNORING_EXTRA_ARRAY_ITEMS` is extra here. We should verify that there are only 2 elements. Sorry I didn't see this before
JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent());
closeResponse(response);
assertThatJson(responseNode)
+ .when(Option.IGNORING_EXTRA_FIELDS, Option.IGNORING_ARRAY_ORDER)
.isEqualTo("{"
+ " id: '" + formDefinition.getId() + "',"
+ " key: '" + formDefinition.getKey() + "'," |
codereview_java_data_3665 | /** Test case for
* <a href="https://issues.apache.org/jira/browse/CALCITE-4724">[CALCITE-4724]
- * As for ClickHouse do not support values in from clause.
*/
@Test void testAliasedValueForClickHouse() {
final String query = "select 1";
1. the test descript should be the same as the CALCITE-4724. 2. the java doc lack of end label about \</a>
/** Test case for
* <a href="https://issues.apache.org/jira/browse/CALCITE-4724">[CALCITE-4724]
+ * ClickHouseSqlDialect `supportsAliasedValues` should return false. </a>
*/
@Test void testAliasedValueForClickHouse() {
final String query = "select 1"; |
codereview_java_data_3668 | * @return Whether condition is supported
*/
private static boolean canJoinOnCondition(RexNode node) {
- if (node.isAlwaysTrue() || node.isAlwaysFalse()) {
- return true;
- }
final List<RexNode> operands;
switch (node.getKind()) {
case AND:
case OR:
operands = ((RexCall) node).getOperands();
Add another case condition: ``` // *************** case LITERAL: return true; ``` is more suitable ?
* @return Whether condition is supported
*/
private static boolean canJoinOnCondition(RexNode node) {
final List<RexNode> operands;
switch (node.getKind()) {
+ case LITERAL:
+ // literal on a join condition would be TRUE or FALSE
+ return true;
case AND:
case OR:
operands = ((RexCall) node).getOperands(); |
codereview_java_data_3672 | int[] pri;
// user privilege config
UserPrivilegesConfig userPrivilegesConfig = user.getPrivilegesConfig();
- boolean isCheck = userPrivilegesConfig == null || !userPrivilegesConfig.isCheck();
for (String schema : user.getSchemas()) {
- if (isCheck || userPrivilegesConfig.getSchemaPrivilege(schema) == null) {
tableName = "*";
pri = ALL_PRIVILEGES;
RowDataPacket row = getRow(userName, schema, tableName, pri, c.getCharset().getResults());
isCheck should be noNeedCheck
int[] pri;
// user privilege config
UserPrivilegesConfig userPrivilegesConfig = user.getPrivilegesConfig();
+ boolean noNeedCheck = userPrivilegesConfig == null || !userPrivilegesConfig.isCheck();
for (String schema : user.getSchemas()) {
+ if (noNeedCheck || userPrivilegesConfig.getSchemaPrivilege(schema) == null) {
tableName = "*";
pri = ALL_PRIVILEGES;
RowDataPacket row = getRow(userName, schema, tableName, pri, c.getCharset().getResults()); |
codereview_java_data_3690 | private HttpResponseHandler<? extends Throwable> exceptionResponseHandler;
private Executor executor;
private CompletableFuture<Void> future;
private Builder() {
}
I think this might be misleading. `isDone` is set to true when the the stream completes normally as well (in `onEventComplete`)
private HttpResponseHandler<? extends Throwable> exceptionResponseHandler;
private Executor executor;
private CompletableFuture<Void> future;
+ private String serviceName;
private Builder() {
} |
codereview_java_data_3698 | }
private void useRequestSlot(final EthPeer peer) throws PeerNotConnected {
- peer.getNodeData(emptyList());
}
@SuppressWarnings("unchecked")
private void assertRequestSuccessful(final PendingPeerRequest pendingRequest) {
final Consumer<ResponseStream> onSuccess = mock(Consumer.class);
- pendingRequest.thenEither(onSuccess, error -> fail("Request should have executed", error));
verify(onSuccess).accept(any());
}
Seems like `getNodeData()` should throw an exception here. For now, it's probably worth at least sending a non-empty list to this method in order to future-proof these tests.
}
private void useRequestSlot(final EthPeer peer) throws PeerNotConnected {
+ peer.getNodeData(singletonList(Hash.ZERO));
}
@SuppressWarnings("unchecked")
private void assertRequestSuccessful(final PendingPeerRequest pendingRequest) {
final Consumer<ResponseStream> onSuccess = mock(Consumer.class);
+ pendingRequest.then(onSuccess, error -> fail("Request should have executed", error));
verify(onSuccess).accept(any());
} |
codereview_java_data_3704 | public static final String EMPTY = "";
public static String newString4UTF8(byte[] bytes) {
- return new String(bytes, Charset.forName("UTF-8"));
}
public static boolean isBlank(String str) {
like before said, I think use constant value will be better.
public static final String EMPTY = "";
public static String newString4UTF8(byte[] bytes) {
+ return new String(bytes, Charset.forName(Constants.ENCODE));
}
public static boolean isBlank(String str) { |
codereview_java_data_3705 | try (BufferedWriter nsWriter = new BufferedWriter(new FileWriter(namespaceScript, UTF_8))) {
nsWriter.write(createNsFormat.format(new String[] {namespace}));
TreeMap<String,String> props = new TreeMap<>();
- for (Entry<String,String> p : accumuloClient.namespaceOperations().getPropertiesMap(namespace)
- .entrySet()) {
- props.put(p.getKey(), p.getValue());
- }
for (Entry<String,String> entry : props.entrySet()) {
String defaultValue = getDefaultConfigValue(entry.getKey());
if (defaultValue == null || !defaultValue.equals(entry.getValue())) {
Many of these can be simplified with `Map.forEach`: ```suggestion accumuloClient.namespaceOperations().getPropertiesMap(namespace).forEach(props::put); ``` Basically, if the body of the loop doesn't throw a checked exception, this is possible, and almost always better.
try (BufferedWriter nsWriter = new BufferedWriter(new FileWriter(namespaceScript, UTF_8))) {
nsWriter.write(createNsFormat.format(new String[] {namespace}));
TreeMap<String,String> props = new TreeMap<>();
+ accumuloClient.namespaceOperations().getConfiguration(namespace).forEach(props::put);
for (Entry<String,String> entry : props.entrySet()) {
String defaultValue = getDefaultConfigValue(entry.getKey());
if (defaultValue == null || !defaultValue.equals(entry.getValue())) { |
codereview_java_data_3712 | return spec;
}
public static MetricsConfig fromProperties(Map<String, String> props) {
MetricsConfig spec = new MetricsConfig();
String defaultModeAsString = props.getOrDefault(DEFAULT_WRITE_METRICS_MODE, DEFAULT_WRITE_METRICS_MODE_DEFAULT);
Will we fail jobs if the default mode is invalid? Will it make sense to fallback `DEFAULT_WRITE_METRICS_MODE_DEFAULT`?
return spec;
}
+ @SuppressWarnings("checkstyle:CatchBlockLogException")
public static MetricsConfig fromProperties(Map<String, String> props) {
MetricsConfig spec = new MetricsConfig();
String defaultModeAsString = props.getOrDefault(DEFAULT_WRITE_METRICS_MODE, DEFAULT_WRITE_METRICS_MODE_DEFAULT); |
codereview_java_data_3713 | @Nonnull SourceBufferConsumerSide<? extends T> buffer,
@Nullable WatermarkGenerationParams<? super T> wmParams
) {
this.createFn = createFn;
this.fillBufferFn = fillBufferFn;
this.destroyFn = destroyFn;
Processor is not marked as non-cooperative.
@Nonnull SourceBufferConsumerSide<? extends T> buffer,
@Nullable WatermarkGenerationParams<? super T> wmParams
) {
+ setCooperative(false);
this.createFn = createFn;
this.fillBufferFn = fillBufferFn;
this.destroyFn = destroyFn; |
codereview_java_data_3714 | try {
BatchScanner scanner = client.createBatchScanner(level.metaTable(), Authorizations.EMPTY);
- List<Range> ranges =
- extents.stream().map(e -> e.toMetaRange()).collect(Collectors.toList());
scanner.setRanges(ranges);
configureColumns(scanner);
3 possible changes here. The first is `KeyExtent::toMetaRange` for the map function. The other two changes are attempts to make this line shorter for readability. Could use `var` here, or static import `toList()`. I did both below, but it's up to you what you're comfortable with. I think it's worth trying to get it to not wrap, though. ```suggestion var ranges = extents.stream().map(KeyExtent::toMetaRange).collect(toList()); ```
try {
BatchScanner scanner = client.createBatchScanner(level.metaTable(), Authorizations.EMPTY);
+ var ranges = extents.stream().map(KeyExtent::toMetaRange).collect(toList());
scanner.setRanges(ranges);
configureColumns(scanner); |
codereview_java_data_3718 | * are enabled. Otherwise empty metrics will be returned.
* <p>
* Keep in mind that the collections may occur at different times on
- * each member, metrics from various members aren't from the same instant
- * of time.
* <p>
* When a job is restarted (or resumed after being previously suspended)
* the metrics are reset too, their values will reflect only updates
"instant of time" -> "instant"
* are enabled. Otherwise empty metrics will be returned.
* <p>
* Keep in mind that the collections may occur at different times on
+ * each member, metrics from various members aren't from the same instant.
* <p>
* When a job is restarted (or resumed after being previously suspended)
* the metrics are reset too, their values will reflect only updates |
codereview_java_data_3728 | if (sgv.getMills() > latestDateInReceivedData)
latestDateInReceivedData = sgv.getMills();
}
- // Was that sgv more than 15 mins ago ?
- boolean moreThan15MinAgo = false;
if((System.currentTimeMillis()-latestDateInReceivedData)/(60 * 1000L) < 15L )
- moreThan15MinAgo = true;
- if(Notification.isAlarmForStaleData() && moreThan15MinAgo){
MainApp.bus().post(new EventDismissNotification(Notification.NSALARM));
}
BroadcastSgvs.handleNewSgv(sgvs, MainApp.instance().getApplicationContext(), isDelta);
should not be this lessThan15MinAgo ?
if (sgv.getMills() > latestDateInReceivedData)
latestDateInReceivedData = sgv.getMills();
}
+ // Was that sgv more less 15 mins ago ?
+ boolean lessThan15MinAgo = false;
if((System.currentTimeMillis()-latestDateInReceivedData)/(60 * 1000L) < 15L )
+ lessThan15MinAgo = true;
+ if(Notification.isAlarmForStaleData() && lessThan15MinAgo){
MainApp.bus().post(new EventDismissNotification(Notification.NSALARM));
}
BroadcastSgvs.handleNewSgv(sgvs, MainApp.instance().getApplicationContext(), isDelta); |
codereview_java_data_3734 | @Override
public Iterable<FileScanTask> split(long splitSize) {
- if (file.splitOffsets() != null) {
- return () -> new OffsetsBasedSplitScanTaskIterator(file.splitOffsets(), this);
- } else if (file.format().isSplittable()) {
- return () -> new FixedSizeSplitScanTaskIterator(splitSize, this);
}
return ImmutableList.of(this);
}
The first check should still be whether the format is splittable. If it isn't, we should not split even if there are recommended locations.
@Override
public Iterable<FileScanTask> split(long splitSize) {
+ if (file.format().isSplittable()) {
+ if (file.splitOffsets() != null) {
+ return () -> new OffsetsBasedSplitScanTaskIterator(file.splitOffsets(), this);
+ } else {
+ return () -> new FixedSizeSplitScanTaskIterator(splitSize, this);
+ }
}
return ImmutableList.of(this);
} |
codereview_java_data_3738 | * @param b the second key
* @return -1 if the first key is smaller, 1 if bigger, 0 if equal
*/
final int compare(Object a, Object b) {
return keyType.compare((K)a, (K)b);
}
`@SuppressWarnings("unchecked")` is needed on this method.
* @param b the second key
* @return -1 if the first key is smaller, 1 if bigger, 0 if equal
*/
+ @SuppressWarnings("unchecked")
final int compare(Object a, Object b) {
return keyType.compare((K)a, (K)b);
} |
codereview_java_data_3740 | * it before returning it. If the elements of the list are mutated, they
* must be copied as well.
* <p>
- * The returned function must not return {@code null} for any accumulator.
*/
@Nonnull
DistributedFunction<? super A, ? extends R> exportFn();
It should just say "must not return null", the rest of the sentence just opens room for confusion. It may be taken to mean "it must not return null regardless of the accumulator".
* it before returning it. If the elements of the list are mutated, they
* must be copied as well.
* <p>
+ * The returned function must never return {@code null}. In other words,
+ * for any accumulator it must return a non-null exported value.
*/
@Nonnull
DistributedFunction<? super A, ? extends R> exportFn(); |
codereview_java_data_3753 | static class PositionReader implements ParquetValueReader<Long> {
private long rowOffsetInCurrentRowGroup = -1;
- private long rowGroupRowOffsetInfile;
@Override
public Long read(Long reuse) {
rowOffsetInCurrentRowGroup = rowOffsetInCurrentRowGroup + 1;
- return rowGroupRowOffsetInfile + rowOffsetInCurrentRowGroup;
}
@Override
Nit: Should be `rowGroupRowOffsetInFile` to maintain consistent camel casing in the variable name.
static class PositionReader implements ParquetValueReader<Long> {
private long rowOffsetInCurrentRowGroup = -1;
+ private long rowGroupRowOffsetInFile;
@Override
public Long read(Long reuse) {
rowOffsetInCurrentRowGroup = rowOffsetInCurrentRowGroup + 1;
+ return rowGroupRowOffsetInFile + rowOffsetInCurrentRowGroup;
}
@Override |
codereview_java_data_3763 | progressBar.setVisibility(View.GONE);
}, error -> Log.e(TAG, Log.getStackTraceString(error)));
- if (UserPreferences.getFeedFilter() != UserPreferences.FEED_FILTER_NONE)
filterMsg.setVisibility(View.VISIBLE);
- else
filterMsg.setVisibility(View.GONE);
}
@Override
Please add braces. To see more checkstyle violations, please have a look at the output of the `static-analysis` CI task.
progressBar.setVisibility(View.GONE);
}, error -> Log.e(TAG, Log.getStackTraceString(error)));
+ if (UserPreferences.getFeedFilter() != UserPreferences.FEED_FILTER_NONE) {
filterMsg.setVisibility(View.VISIBLE);
+ } else {
filterMsg.setVisibility(View.GONE);
+ }
}
@Override |
codereview_java_data_3777 | private DefaultStreamService defaultStreamService;
@Mock
- private StreamDeploymentRepository stramDeploymentRepository;
@Before
public void setup() {
controller = new StreamDeploymentController(streamDefinitionRepository, deploymentIdRepository, appRegistry,
- appDeployer, metadataResolver, commonApplicationProperties, stramDeploymentRepository,
defaultStreamService);
}
`streamDeploymentRepository` is misspelled.
private DefaultStreamService defaultStreamService;
@Mock
+ private StreamDeploymentRepository streamDeploymentRepository;
@Before
public void setup() {
controller = new StreamDeploymentController(streamDefinitionRepository, deploymentIdRepository, appRegistry,
+ appDeployer, metadataResolver, commonApplicationProperties, streamDeploymentRepository,
defaultStreamService);
} |
codereview_java_data_3779 | final List<Address> committers =
IbftBlockHashing.recoverCommitterAddresses(header, ibftExtraData);
- if (!validateCommitters(committers, validatorProvider.getValidators())) {
- return false;
- }
- return true;
}
private boolean validateCommitters(
nit: this can be simplified to return validateCommitters(committers, validatorProvider.getValidators());
final List<Address> committers =
IbftBlockHashing.recoverCommitterAddresses(header, ibftExtraData);
+ return validateCommitters(committers, validatorProvider.getValidators());
}
private boolean validateCommitters( |
codereview_java_data_3781 | import static java.lang.String.format;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.jupiter.api.Assertions.assertThrows;
import org.flowable.common.engine.api.FlowableException;
import org.flowable.engine.impl.test.PluggableFlowableTestCase;
import org.flowable.engine.test.Deployment;
import org.flowable.task.api.Task;
import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.function.Executable;
/**
* @author Tijs Rademakers
Where as there are about 6 uses of `assertThrows` in the code base, the primary form used for this type of test is `org.assertj.core.api.Assertions.assertThatThrownBy;` with 1000+ uses. Please consider changing to the more common format. Thanks.
import static java.lang.String.format;
import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+import org.assertj.core.api.ThrowableAssert.ThrowingCallable;
import org.flowable.common.engine.api.FlowableException;
import org.flowable.engine.impl.test.PluggableFlowableTestCase;
import org.flowable.engine.test.Deployment;
import org.flowable.task.api.Task;
import org.junit.jupiter.api.Test;
/**
* @author Tijs Rademakers |
codereview_java_data_3783 | return create(
new PantheonFactoryConfigurationBuilder()
.setName(name)
- .jsonRpcEnabled()
.setJsonRpcConfiguration(jsonRpcConfigWithAdmin())
.webSocketEnabled()
.setDiscoveryEnabled(false)
I think this is redundant.
return create(
new PantheonFactoryConfigurationBuilder()
.setName(name)
.setJsonRpcConfiguration(jsonRpcConfigWithAdmin())
.webSocketEnabled()
.setDiscoveryEnabled(false) |
codereview_java_data_3788 | }
TokenEntry cpdToken = new TokenEntry(token.getImage(),
filename,
- token.getBeginLine() + 1,
- token.getBeginColumn() + 1,
- token.getEndColumn() + 1);
tokenEntries.add(cpdToken);
}
} catch (Exception e) {
It's the getBeginLine implementation that should add `+1`, same for the other methods
}
TokenEntry cpdToken = new TokenEntry(token.getImage(),
filename,
+ token.getBeginLine(),
+ token.getBeginColumn(),
+ token.getEndColumn());
tokenEntries.add(cpdToken);
}
} catch (Exception e) { |
codereview_java_data_3792 | assertEntryPointIdExists("simpleFactList");
}
- @Disabled
public void getDatasourceType() {
final Optional<Class<?>> dataSourceType = ruleUnitDescr.getDatasourceType("nonexisting");
Assertions.assertThat(dataSourceType).isNotPresent();
Can't we fix those tests instead of disabling them?
assertEntryPointIdExists("simpleFactList");
}
+ @Test
public void getDatasourceType() {
final Optional<Class<?>> dataSourceType = ruleUnitDescr.getDatasourceType("nonexisting");
Assertions.assertThat(dataSourceType).isNotPresent(); |
codereview_java_data_3793 | package org.apache.accumulo.core.clientImpl.bulk;
import static org.junit.Assert.assertEquals;
import java.util.SortedMap;
import java.util.TreeMap;
@keith-turner Try adding this line. Looks like the checkstyle is expecting Assert methods to be static imports. import static org.junit.Assert.assertEquals;
package org.apache.accumulo.core.clientImpl.bulk;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
import java.util.SortedMap;
import java.util.TreeMap; |
codereview_java_data_3805 | }
}
- private K parseKore(Module mod) {
- try {
- return KoreToK.parseKoreToK(options.fileToParse(), idsToLabelsProvider.getKoreToKLabels(), mod.sortAttributesFor());
- } catch (ParseError parseError) {
- throw KEMException.criticalError("Parse error" );
- }
- }
-
public static K deserialize(String stringToParse, InputModes inputMode) {
switch (inputMode) {
case JSON:
I don't like that `KRead` has a `KastOptions` put into it. Can we instead make this function just take a string to parse, and assume that reading the file has been handled elsewhere? The main reason is that we intend to make KRun and KProve go through KRead as well (so that we can avoid parse-time by passing in JSON input or BINARY input, for example), and they do their own file-handling code.
}
}
public static K deserialize(String stringToParse, InputModes inputMode) {
switch (inputMode) {
case JSON: |
codereview_java_data_3817 | import org.kframework.definition.Production;
import org.kframework.definition.Terminal;
import org.kframework.parser.Constant;
import org.kframework.parser.SetsTransformerWithErrors;
import org.kframework.parser.Term;
import org.kframework.parser.TermCons;
This is not a sound way of generating names for variables, because the user could write a variable named `_Gen0` in their rule. You need to either change it back to naming the variable "_" so the anonymous variables pass will give it a fresh name, or else you need to collect all the variables mentioned in the sentence and increment the counter inside a loop until you find one that isn't already used.
import org.kframework.definition.Production;
import org.kframework.definition.Terminal;
import org.kframework.parser.Constant;
+import org.kframework.parser.SafeTransformer;
import org.kframework.parser.SetsTransformerWithErrors;
import org.kframework.parser.Term;
import org.kframework.parser.TermCons; |
codereview_java_data_3835 | private static final Logger LOGGER = LoggerFactory.getLogger(NotifyCenter.class);
- public static int RING_BUFFER_SIZE = 16384;
- public static int SHARE_BUFFER_SIZE = 1024;
private static final AtomicBoolean CLOSED = new AtomicBoolean(false);
No `static final` use camel naming. Your changes can't pass checkstyle
private static final Logger LOGGER = LoggerFactory.getLogger(NotifyCenter.class);
+ public static int ringBufferSize = 16384;
+ public static int shareBufferSize = 1024;
private static final AtomicBoolean CLOSED = new AtomicBoolean(false); |
codereview_java_data_3837 | c.tableOperations().setProperty(tableName, propertyName, description1);
// Loop through properties to make sure the new property is added to the list
- int count = 0;
- for (Entry<String,String> property : c.tableOperations().getPropertiesMap(tableName)
- .entrySet()) {
- if (property.getKey().equals(propertyName) && property.getValue().equals(description1))
- count++;
- }
assertEquals(count, 1);
// Set the property as something different
This is a place that could be cleaned up quite a bit with a stream: ```java long count = c.tableOperations().getPropertiesMap(tableName).entrySet().stream().filter(e -> e.getKey().equals(propertyName) && e.getValue().equals(description1)).count(); ```
c.tableOperations().setProperty(tableName, propertyName, description1);
// Loop through properties to make sure the new property is added to the list
+ long count = c.tableOperations().getConfiguration(tableName).entrySet().stream()
+ .filter(e -> e.getKey().equals(propertyName) && e.getValue().equals(description1))
+ .count();
assertEquals(count, 1);
// Set the property as something different |
codereview_java_data_3842 | public Constraint<Double> applyBolusConstraints(Constraint<Double> insulin) {
if (statusResult != null) {
insulin.setIfSmaller(statusResult.maximumBolusAmount, String.format(MainApp.gs(R.string.limitingbolus), statusResult.maximumBolusAmount, MainApp.gs(R.string.pumplimit)), this);
}
return insulin;
}
You should leave that line, but skip it if the insulin amount is 0.
public Constraint<Double> applyBolusConstraints(Constraint<Double> insulin) {
if (statusResult != null) {
insulin.setIfSmaller(statusResult.maximumBolusAmount, String.format(MainApp.gs(R.string.limitingbolus), statusResult.maximumBolusAmount, MainApp.gs(R.string.pumplimit)), this);
+
+ if(insulin.value() != 0)
+ insulin.setIfGreater(statusResult.minimumBolusAmount, String.format(MainApp.gs(R.string.limitingbolus), statusResult.maximumBolusAmount, MainApp.gs(R.string.pumplimit)), this);
}
return insulin;
} |
codereview_java_data_3852 | import org.apache.calcite.avatica.util.ByteString;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
The MySQL says that > This function requires MySQL to have been compiled with a compression library such as zlib Should we follow that ?
import org.apache.calcite.avatica.util.ByteString;
+import org.apache.commons.lang3.StringUtils;
+
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer; |
codereview_java_data_3861 | // Decode the identity header when loading a draft.
// See buildIdentityHeader(TextBody) for a detailed description of the composition of this blob.
Map<IdentityField, String> k9identity = new HashMap<IdentityField, String>();
- if (message.getHeader(K9.IDENTITY_HEADER).length > 0 && message.getHeader(K9.IDENTITY_HEADER)[0] != null) {
- k9identity = IdentityHeaderParser.parse(message.getHeader(K9.IDENTITY_HEADER)[0]);
}
Identity newIdentity = new Identity();
Can you please extract `message.getHeader(K9.IDENTITY_HEADER)` to a local variable.
// Decode the identity header when loading a draft.
// See buildIdentityHeader(TextBody) for a detailed description of the composition of this blob.
Map<IdentityField, String> k9identity = new HashMap<IdentityField, String>();
+ String[] identityHeaders = message.getHeader(K9.IDENTITY_HEADER);
+
+ if (identityHeaders.length > 0 && identityHeaders[0] != null) {
+ k9identity = IdentityHeaderParser.parse(identityHeaders[0]);
}
Identity newIdentity = new Identity(); |
codereview_java_data_3864 | this.cacheType = StringUtils.toUpperEnglish(
ci.removeProperty("CACHE_TYPE", Constants.CACHE_TYPE_DEFAULT));
this.ignoreCatalogs = ci.getProperty("IGNORE_CATALOGS",
- dbSettings.ignoreCatalogs ? "TRUE" : "FALSE").equals("TRUE");
openDatabase(traceLevelFile, traceLevelSystemOut, closeAtVmShutdown, ci);
}
You should use `getProperty(String, boolean)` here.
this.cacheType = StringUtils.toUpperEnglish(
ci.removeProperty("CACHE_TYPE", Constants.CACHE_TYPE_DEFAULT));
this.ignoreCatalogs = ci.getProperty("IGNORE_CATALOGS",
+ dbSettings.ignoreCatalogs);
openDatabase(traceLevelFile, traceLevelSystemOut, closeAtVmShutdown, ci);
} |
codereview_java_data_3867 | public CompletableFuture<Void> eventStreamOperation(EventStreamOperationRequest eventStreamOperationRequest,
Publisher<InputEventStream> requestStream, EventStreamOperationResponseHandler asyncResponseHandler) {
try {
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
Multiple instances of JsonOperationMetadata are created in this operation
public CompletableFuture<Void> eventStreamOperation(EventStreamOperationRequest eventStreamOperationRequest,
Publisher<InputEventStream> requestStream, EventStreamOperationResponseHandler asyncResponseHandler) {
try {
+ eventStreamOperationRequest = applySignerOverride(eventStreamOperationRequest, EventStreamAws4Signer.create());
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build(); |
codereview_java_data_3868 | import com.google.gson.JsonPrimitive;
import com.pinterest.secor.util.BackOffUtil;
import org.apache.hadoop.hive.common.type.HiveDecimal;
-import org.apache.hadoop.hive.ql.exec.vector.*;
import org.apache.orc.TypeDescription;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Please list out the each import individually, don't use *
import com.google.gson.JsonPrimitive;
import com.pinterest.secor.util.BackOffUtil;
import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.TypeDescription;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory; |
codereview_java_data_3873 | }
break;
default:
break;
}
} else {
Why did you remove this line "rowBuilder.set(i, s);"? We still want to set the value if the type is not numeric.
}
break;
default:
+ rowBuilder.set(i, s);
break;
}
} else { |
codereview_java_data_3879 | return true;
}
- @SuppressWarnings("ByteBufferBackingArray")
private Object convertPartitionValue(Type type, Object value) {
if (type.typeId() == Types.BinaryType.get().typeId()) {
ByteBuffer buffer = (ByteBuffer) value;
- return new DataByteArray(buffer.get(new byte[buffer.remaining()]).array());
}
return value;
I don't think this is correct. This calls `buffer.get(new byte[...])`, which returns the `ByteBuffer` that `get` was called on. It is no different than returning `new DataByteArray(buffer.array())`. Calling `get` shows that the intent was to read the bytes into a new array and pass that to create a `DataByteArray`. The correct implementation is this: ```java ByteBuffer buffer = (ByteBuffer) value; byte[] bytes = new byte[buffer.remaining()]; buffer.get(bytes); return new DataByteArray(bytes); ```
return true;
}
private Object convertPartitionValue(Type type, Object value) {
if (type.typeId() == Types.BinaryType.get().typeId()) {
ByteBuffer buffer = (ByteBuffer) value;
+ byte[] bytes = new byte[buffer.remaining()];
+ buffer.get(bytes);
+ // Return the input buffer back to its original position.
+ buffer.position(buffer.position() - bytes.length);
+ return new DataByteArray(bytes);
}
return value; |
codereview_java_data_3885 | }
@Override
- public <R> Try<R> then(CheckedFunction1<T, R> f) {
return flatMap(value -> Try.of(() -> f.apply(value)));
}
- @Override
- public Try<T> thenRun(CheckedConsumer<T> f) {
- Try<Void> result = flatMap(value -> Try.run(() -> f.accept(value)));
- return result.isSuccess() ? this : new Failure<T>(result.failed().get());
- }
-
@Override
public boolean equals(Object obj) {
return (obj == this) || (obj instanceof Success && Objects.equals(value, ((Success<?>) obj).value));
This implementation is a bit shorter - we do not need to wrap it in a flatMap because we know that we are already in the Success case: ``` java public Try<T> andThen(CheckedConsumer<? super T> consumer) { return Try.run(() -> consumer.accept(value)).flatMap(ignored -> this); } ```
}
@Override
+ public <R> Try<R> mapTry(CheckedFunction1<T, R> f) {
return flatMap(value -> Try.of(() -> f.apply(value)));
}
@Override
public boolean equals(Object obj) {
return (obj == this) || (obj instanceof Success && Objects.equals(value, ((Success<?>) obj).value)); |
codereview_java_data_3893 | */
@SafeVarargs
@SuppressWarnings("varargs")
- static <U> Arbitrary<U> fixed(U... values) {
- return forAll(Gen.choose(values));
}
/**
Hi @talios, finally I'm able to answer... Thanks - the code looks great. Instead of `fixed` and `forAll` I would name the methods `of(T...)` and `ofAll(Gen)` to follow the naming scheme of the collections. What do you think? We already have `gen.arbitrary()` but I think having `Arbitrary.ofAll(Gen)` is an intuitive place to look for a factory.
*/
@SafeVarargs
@SuppressWarnings("varargs")
+ static <U> Arbitrary<U> of(U... values) {
+ return ofAll(Gen.choose(values));
}
/** |
codereview_java_data_3895 | createLbCleanupRequest(requestId, matchingActiveTaskIds);
}
} else {
- if (matchingActiveTaskIds.iterator().hasNext()) {
delete(requestCleanup, matchingActiveTaskIds);
} else {
Optional<SingularityRequestHistory> maybeHistory = requestHistoryHelper.getLastHistory(requestId);
if (maybeHistory.isPresent() && maybeHistory.get().getRequest().isLoadBalanced() && configuration.isDeleteRemovedRequestsFromLoadBalancer()) {
createLbCleanupRequest(requestId, matchingActiveTaskIds);
- requestManager.deleted(maybeHistory.get().getRequest(), RequestHistoryType.DELETED, System.currentTimeMillis(), Optional.<String>absent(), Optional.<String>absent());
}
cleanupDeployState(requestCleanup);
}
Maybe `Iterables.isEmpty(matchingActiveTaskIds)` just for cleanliness/readability
createLbCleanupRequest(requestId, matchingActiveTaskIds);
}
} else {
+ if (!Iterables.isEmpty(matchingActiveTaskIds)) {
delete(requestCleanup, matchingActiveTaskIds);
} else {
Optional<SingularityRequestHistory> maybeHistory = requestHistoryHelper.getLastHistory(requestId);
if (maybeHistory.isPresent() && maybeHistory.get().getRequest().isLoadBalanced() && configuration.isDeleteRemovedRequestsFromLoadBalancer()) {
createLbCleanupRequest(requestId, matchingActiveTaskIds);
+ requestManager.markDeleted(maybeHistory.get().getRequest(), RequestHistoryType.DELETED, System.currentTimeMillis(), Optional.<String>absent(), Optional.<String>absent());
}
cleanupDeployState(requestCleanup);
} |
codereview_java_data_3901 | * @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see Pattern
*/
- public Seq<CharSeq> split(String regex, int limit) {
- final Seq<String> split = Array.wrap(toString().split(regex, limit));
return split.map(CharSeq::of);
}
this is the `3.0.0` branch
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see Pattern
*/
+ @Deprecated(/* Use splitSeq instead, will be removed in 3.0.0 */)
+ @GwtIncompatible
+ public CharSeq[] split(String regex, int limit) {
+ return splitSeq(regex, limit).toJavaArray(CharSeq.class);
+ }
+
+ public Seq<CharSeq> splitSeq(String regex, int limit) {
+ final Seq<String> split = Array.wrap(back.split(regex, limit));
return split.map(CharSeq::of);
} |
codereview_java_data_3905 | jobMetrics.delete(id);
JobResult jobResult = jobResults.get(id);
if (jobResult != null) {
- jobResult.destroy(instance);
jobResults.delete(id);
}
});
so observables are only cleared if there's more than 1000 of them? That doesn't sound good enough to me. There's also TTL of 7 days on the JobResults, but if it's cleared this way then the Ringbuffer will actually will not be deleted, so there will be a leak.
jobMetrics.delete(id);
JobResult jobResult = jobResults.get(id);
if (jobResult != null) {
+ destroyObservables(jobResult.getOwnedObservables());
jobResults.delete(id);
}
}); |
codereview_java_data_3910 | *
* <ul>
* <li>{@link #existsUnique(Predicate)}</li>
* <li>{@link #isDistinct}</li>
* <li>{@link #isOrdered}</li>
- * <li>{@link #hasDefiniteSize()}</li>
* <li>{@link #isTraversableAgain()}</li>
* </ul>
*
Please move these below hasDefiniteSize()
*
* <ul>
* <li>{@link #existsUnique(Predicate)}</li>
+ * <li>{@link #hasDefiniteSize()}</li>
* <li>{@link #isDistinct}</li>
* <li>{@link #isOrdered}</li>
* <li>{@link #isTraversableAgain()}</li>
* </ul>
* |
codereview_java_data_3925 | @Override
protected String operation() {
- return DataOperations.APPEND;
}
@Override
This isn't an append because no data is added. Instead, use `DataOperations.REPLACE` to signal that the data in the table did not change.
@Override
protected String operation() {
+ return DataOperations.REPLACE;
}
@Override |
codereview_java_data_3926 | */
public static SQLException getJdbcSQLException(int errorCode)
{
- return getJdbcSQLException(errorCode, null);
}
/**
It should be `getJdbcSQLException(errorCode, (Throwable) null)` after renaming of methods, now this call is ambiguous.
*/
public static SQLException getJdbcSQLException(int errorCode)
{
+ return getJdbcSQLException(errorCode, (Throwable)null);
}
/** |
codereview_java_data_3937 | if (wasStored || store.getAutoCommitDelay() == 0) {
store.tryCommit();
} else {
- boolean empty = true;
- BitSet openTrans = openTransactions.get();
- for (int i = openTrans.nextSetBit(0); empty && i >= 0; i = openTrans.nextSetBit(i + 1)) {
- MVMap<Long, Object[]> undoLog = undoLogs[i];
- if (undoLog != null) {
- empty = undoLog.isEmpty();
- }
- }
- if (empty) {
// to avoid having to store the transaction log,
// if there is no open transaction,
// and if there have been many changes, store them now
May be replace it with ```Java if (undoLog != null && !undoLog.isEmpty) { return; } ``` and remove `empty` variable?
if (wasStored || store.getAutoCommitDelay() == 0) {
store.tryCommit();
} else {
+ if (isUndoEmpty()) {
// to avoid having to store the transaction log,
// if there is no open transaction,
// and if there have been many changes, store them now |
codereview_java_data_3939 | }
for (Tablet tablet : getOnlineTablets()) {
tablet.removeInUseLogs(candidates);
- if (candidates.size() == 0) {
break;
}
}
Should be `isEmpty()`?
}
for (Tablet tablet : getOnlineTablets()) {
tablet.removeInUseLogs(candidates);
+ if (candidates.isEmpty()) {
break;
}
} |
codereview_java_data_3945 | * @see Authentication
*/
public final class InMemoryOAuth2AuthorizedClientService implements OAuth2AuthorizedClientService {
private final ClientRegistrationRepository clientRegistrationRepository;
- private Map<OAuth2AuthorizedClientId, OAuth2AuthorizedClient> authorizedClients = new ConcurrentHashMap<>();
/**
* Constructs an {@code InMemoryOAuth2AuthorizedClientService} using the provided parameters.
If this constructor is called with a non-empty `Map<String, OAuth2AuthorizedClient> authorizedClients` and the `key(s)` are not correctly built as per `InMemoryOAuth2AuthorizedClientService.getIdentifier()` than a subsequent call to `loadAuthorizedClient()` would return `null` which would be in error. How can we ensure the `key(s)` in the provided `Map` are built correctly?
* @see Authentication
*/
public final class InMemoryOAuth2AuthorizedClientService implements OAuth2AuthorizedClientService {
+ private final Map<OAuth2AuthorizedClientId, OAuth2AuthorizedClient> authorizedClients = new ConcurrentHashMap<>();
private final ClientRegistrationRepository clientRegistrationRepository;
/**
* Constructs an {@code InMemoryOAuth2AuthorizedClientService} using the provided parameters. |
codereview_java_data_3952 | import tech.pegasys.pantheon.tests.acceptance.dsl.transaction.eth.EthTransactions;
public class ExpectBeneficiary implements Condition {
-
- private EthTransactions eth;
- private String beneficiary;
public ExpectBeneficiary(final EthTransactions eth, final PantheonNode node) {
this.eth = eth;
You could make these final, as they don't get changed
import tech.pegasys.pantheon.tests.acceptance.dsl.transaction.eth.EthTransactions;
public class ExpectBeneficiary implements Condition {
+ private final EthTransactions eth;
+ private final String beneficiary;
public ExpectBeneficiary(final EthTransactions eth, final PantheonNode node) {
this.eth = eth; |
codereview_java_data_3954 | public class Quotes {
/**
- * Convert strings with both quotes and ticks into: foo'"bar -> concat("foo'", '"', "bar")
*
* @param toEscape a text to escape quotes in, e.g. "f'oo"
* @return the same text with escaped quoted, e.g. "\"f'oo\""
This sentence doesn't make sense. Converts strings into what?
public class Quotes {
/**
+ * Convert strings with both quotes and ticks into a valid xpath component "concat(...)"
+ *
+ * For example, foo'"bar will be converted to concat("foo'", '"', "bar")
*
* @param toEscape a text to escape quotes in, e.g. "f'oo"
* @return the same text with escaped quoted, e.g. "\"f'oo\"" |
codereview_java_data_3955 | */
private boolean isRequiredToUploadAtTime(TopicPartition topicPartition) throws Exception{
final String topic = topicPartition.getTopic();
- final String topicFilter = mConfig.getKafkaTopicUploadAtMinuteMarkFilter();
- if (topicFilter == null || topicFilter.isEmpty()){ return false; }
- if (topic.matches(topicFilter)){
if (DateTime.now().minuteOfHour().get() == mConfig.getUploadMinuteMark()){
return true;
}
Is this because this is a slow volume topic? Otherwise modificationAgeSec would trigger the upload too.
*/
private boolean isRequiredToUploadAtTime(TopicPartition topicPartition) throws Exception{
final String topic = topicPartition.getTopic();
+ if (mTopicFilter == null || mTopicFilter.isEmpty()){
+ return false;
+ }
+ if (topic.matches(mTopicFilter)){
if (DateTime.now().minuteOfHour().get() == mConfig.getUploadMinuteMark()){
return true;
} |
codereview_java_data_3960 | throws AccumuloSecurityException, AccumuloException, NamespaceNotFoundException {
if (!exists(namespace))
throw new NamespaceNotFoundException(null, namespace, null);
- Map<String,String> copy = new TreeMap<>();
- this.getConfiguration(namespace).forEach(copy::put);
for (IteratorScope scope : scopes) {
String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
scope.name().toLowerCase(), name);
This could be: ```java Map<String,String> copy = Map.copyOf(this.getConfiguration(namespace)); ``` (so long as we aren't making changes to the copy, because this would make an immutable copy) There are a few such occurrences, if you want to change them.
throws AccumuloSecurityException, AccumuloException, NamespaceNotFoundException {
if (!exists(namespace))
throw new NamespaceNotFoundException(null, namespace, null);
+ Map<String,String> copy = Map.copyOf(this.getConfiguration(namespace));
for (IteratorScope scope : scopes) {
String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
scope.name().toLowerCase(), name); |
codereview_java_data_3964 | link = "https://github.com/palantir/gradle-baseline#baseline-error-prone-checks",
linkType = BugPattern.LinkType.CUSTOM,
severity = BugPattern.SeverityLevel.WARNING,
- summary = "Calling address.getHostName may result in a DNS lookup which is a network request, making the "
+ "invocation significantly more expensive than expected depending on the environment.\n"
+ "This check is intended to be advisory - it's fine to @SuppressWarnings(\"ReverseDnsLookup\") "
+ "in certain cases, but is usually not recommended.")
```suggestion summary = "Calling address.getHostName may result in a reverse DNS lookup which is a network request, making the " ```
link = "https://github.com/palantir/gradle-baseline#baseline-error-prone-checks",
linkType = BugPattern.LinkType.CUSTOM,
severity = BugPattern.SeverityLevel.WARNING,
+ summary = "Calling address.getHostName may result in a reverse DNS lookup which is a network request, making the "
+ "invocation significantly more expensive than expected depending on the environment.\n"
+ "This check is intended to be advisory - it's fine to @SuppressWarnings(\"ReverseDnsLookup\") "
+ "in certain cases, but is usually not recommended.") |
codereview_java_data_3965 | */
package com.alibaba.nacos.config.server.utils;
import java.io.*;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
Close the connection using the try-with-resource mechanism,like this ``` java try(ZipInputStream zipIn = new ZipInputStream(new ByteArrayInputStream(source))){ // ur code } ```
*/
package com.alibaba.nacos.config.server.utils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import java.io.*;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList; |
codereview_java_data_3973 | void loadDomainChangePublisher() {
String topicNames = System.getProperty(ZMS_PROP_DOMAIN_CHANGE_TOPIC_NAMES, "");
for (String topic : topicNames.split(",")) {
if (!topic.isEmpty()) {
if (domainChangePublishers == null) {
domainChangePublishers = new ArrayList<>();
}
- domainChangePublishers.add(createPublisher(topic.trim()));
}
}
}
instead of just calling topic.trim() on line 678, it's better that we trim it first, then check if it's empty, otherwise, we'll support values like: "message1, , message2" and we'll end up calling createPublisher with an empty string.
void loadDomainChangePublisher() {
String topicNames = System.getProperty(ZMS_PROP_DOMAIN_CHANGE_TOPIC_NAMES, "");
for (String topic : topicNames.split(",")) {
+ topic = topic.trim();
if (!topic.isEmpty()) {
if (domainChangePublishers == null) {
domainChangePublishers = new ArrayList<>();
}
+ ChangePublisher<DomainChangeMessage> publisher = createPublisher(topic);
+ if (publisher != null) {
+ domainChangePublishers.add(publisher);
+ }
}
}
} |
codereview_java_data_3974 | // for any given table. The logic here uses the setting of the first getItem in a table batch and then checks
// the rest are identical or throws an exception.
private KeysAndAttributes generateKeysAndAttributes(ReadBatch readBatch) {
- Collection<BatchableReadOperation> readOperations = readBatch.readOperations();
AtomicReference<Boolean> consistentRead = new AtomicReference<>();
AtomicBoolean firstRecord = new AtomicBoolean(true);
This can be rewritten to not suck anymore. It was only written this way due to a nuance in Java around raw type erasure, but now you've refactored it that no longer applies. We should just map it straight into requestItems.
// for any given table. The logic here uses the setting of the first getItem in a table batch and then checks
// the rest are identical or throws an exception.
private KeysAndAttributes generateKeysAndAttributes(ReadBatch readBatch) {
+ Collection<BatchableReadOperation> readOperations = readOperations(readBatch);
AtomicReference<Boolean> consistentRead = new AtomicReference<>();
AtomicBoolean firstRecord = new AtomicBoolean(true); |
codereview_java_data_3985 | *
* @param job
* Hadoop job instance to be configured
- * @param connectionInfo
* Connection information for Accumulo
* @since 2.0.0
*/
- public static void setConnectionInfo(JobConf job, ConnectionInfo connectionInfo) throws AccumuloSecurityException {
- ConnectionInfoImpl info = (ConnectionInfoImpl) connectionInfo;
setConnectorInfo(job, info.getPrincipal(), info.getAuthenticationToken());
- setZooKeeperInstance(job, info.getClientConfiguration());
}
/**
Another spot where the type should be the interface not the impl.
*
* @param job
* Hadoop job instance to be configured
+ * @param info
* Connection information for Accumulo
* @since 2.0.0
*/
+ public static void setConnectionInfo(JobConf job, ConnectionInfo info) throws AccumuloSecurityException {
setConnectorInfo(job, info.getPrincipal(), info.getAuthenticationToken());
+ setZooKeeperInstance(job, ConnectionInfoFactory.getClientConfiguration(info));
}
/** |
codereview_java_data_3986 | @Override
public boolean isValidAuthorizations(String user, List<ByteBuffer> auths) throws AccumuloSecurityException {
- if (auths.size() == 0) {
// avoid deserializing auths from ZK cache
return true;
}
You should call `.isEmpty()`
@Override
public boolean isValidAuthorizations(String user, List<ByteBuffer> auths) throws AccumuloSecurityException {
+ if (auths.isEmpty()) {
// avoid deserializing auths from ZK cache
return true;
} |
codereview_java_data_3989 | /**
- * Counts the operations matching the signature mask in this class.
*
* @param classNode The class on which to count
* @param mask The mask
*
- * @return The number of operations matching the signature mask
*/
protected int countMatchingFieldSigs(ASTAnyTypeDeclaration classNode, JavaFieldSigMask mask) {
int count = 0;
Copy/paste: Here it's about fields.
/**
+ * Counts the fields matching the signature mask in this class.
*
* @param classNode The class on which to count
* @param mask The mask
*
+ * @return The number of fields matching the signature mask
*/
protected int countMatchingFieldSigs(ASTAnyTypeDeclaration classNode, JavaFieldSigMask mask) {
int count = 0; |
codereview_java_data_3992 | proposals.clear();
}
- public ImmutableMap<Address, VoteType> getProposals() {
- return ImmutableMap.copyOf(proposals);
}
public Optional<VoteType> get(final Address address) {
Do we want to copy this or just return an immutable view of it e.g. `Collections.unmodifiableMap`?
proposals.clear();
}
+ public Map<Address, VoteType> getProposals() {
+ return Collections.unmodifiableMap(proposals);
}
public Optional<VoteType> get(final Address address) { |
codereview_java_data_3999 | }
@Provides
public AppDatabase provideAppDataBase() {
return Room.databaseBuilder(applicationContext, AppDatabase.class, "commons_room.db").build();
}
This should be `@Singelton`
}
@Provides
+ @Singleton
public AppDatabase provideAppDataBase() {
return Room.databaseBuilder(applicationContext, AppDatabase.class, "commons_room.db").build();
} |
codereview_java_data_4000 | return item;
}
- private void setEvaluatable(TableFilter join) {
- setEvaluatable(true);
- }
-
/**
* Set what plan item (index, cost, masks) to use.
*
function can now be inlined?
return item;
}
/**
* Set what plan item (index, cost, masks) to use.
* |
codereview_java_data_4011 | // org.jivesoftware.util.cache.CacheFactory.joinedCluster). This means that they now hold data that's
// available on all other cluster nodes. Data that's available on the local node needs to be added again.
restoreCacheContent();
}
@Override
Is it necessary to tell any listeners that previously available 'remote' sessions are no longer available?
// org.jivesoftware.util.cache.CacheFactory.joinedCluster). This means that they now hold data that's
// available on all other cluster nodes. Data that's available on the local node needs to be added again.
restoreCacheContent();
+
+ // It does not appear to be needed to invoke any kind of event listeners for the data that was gained by joining
+ // the cluster (eg: sessions connected to other cluster nodes, now suddenly available to the local cluster node):
+ // There are six caches in play here, but only the content of one of them goes accompanied by firing off event
+ // listeners (sessionInfoCache). However, when already running in a clustered environment, those events are
+ // never broadcasted over the cluster, so there shouldn't be a need to do so for all sessions that were
+ // gained/lost when joining or leaving a cluster either.
}
@Override |
codereview_java_data_4019 | {
Pair<Long, Long> messageAndThreadId;
- if (!message.getSyncContext().isPresent()) {
- messageAndThreadId = insertStandardTextMessage(masterSecret, envelope, message, smsMessageId);
- } else {
messageAndThreadId = insertSyncTextMessage(masterSecret, envelope, message, smsMessageId);
}
MessageNotifier.updateNotification(context, masterSecret, messageAndThreadId.second);
why the inverted condition?
{
Pair<Long, Long> messageAndThreadId;
+ if (message.getSyncContext().isPresent()) {
messageAndThreadId = insertSyncTextMessage(masterSecret, envelope, message, smsMessageId);
+ } else {
+ messageAndThreadId = insertStandardTextMessage(masterSecret, envelope, message, smsMessageId);
}
MessageNotifier.updateNotification(context, masterSecret, messageAndThreadId.second); |
codereview_java_data_4038 | pumpDescription.basalStep = 0.01d;
pumpDescription.basalMinimumRate = 0.02d;
- pumpDescription.isRefillingCapable = false;
//pumpDescription.storesCarbInfo = false; // uncomment when PumpDescription updated to include this
this.connector = Connector.get();
@jamorham Forgot to mention this earlier: this variable is oddly named: it's only `false` for MDI were priming/filling makes no sense for a pen. This flag (seach for usages) controls whether the prime/filling action is available (on the actions tab) - which I assume is supported by the Insight.
pumpDescription.basalStep = 0.01d;
pumpDescription.basalMinimumRate = 0.02d;
+ pumpDescription.isRefillingCapable = true;
//pumpDescription.storesCarbInfo = false; // uncomment when PumpDescription updated to include this
this.connector = Connector.get(); |
codereview_java_data_4041 | if (tree.isLeaf()) {
return value;
} else {
- return "(" + value + " " + (tree.getChildren().map(Node::toLispString).mkString(" ")) + ")";
}
}
could you please separate computations from string concatenation?
if (tree.isLeaf()) {
return value;
} else {
+ final String children = tree.getChildren().map(Node::toLispString).mkString(" ");
+ return "(" + value + " " + children + ")";
}
} |
codereview_java_data_4043 | if (removeIt) {
return; // nothing to do
} else {
- throw new IllegalStateException();
}
}
NormalAnnotationExpr parentExpr = findAll.get(0);
What about something like ```suggestion throw new IllegalStateException("Impossible to find annotation " + parentName + " on method " + dmnMethod.toString()); ```
if (removeIt) {
return; // nothing to do
} else {
+ throw new IllegalStateException("Impossible to find annotation " + parentName + " on method " + dmnMethod.toString());
}
}
NormalAnnotationExpr parentExpr = findAll.get(0); |
codereview_java_data_4051 | @SuppressWarnings("unchecked")
default N setName(String name) {
return setName(Name.parse(name));
}
Do you hate static imports? :)
@SuppressWarnings("unchecked")
default N setName(String name) {
+ assertNonEmpty(name);
return setName(Name.parse(name));
} |
codereview_java_data_4054 | new ExtensionAttribute(ATTRIBUTE_FORM_FIELD_VALIDATION),
new ExtensionAttribute(ATTRIBUTE_TASK_SERVICE_EXTENSIONID),
new ExtensionAttribute(ATTRIBUTE_TASK_USER_SKIP_EXPRESSION),
- new ExtensionAttribute(ATTRIBUTE_TASK_USER_SKIP_EXPRESSION),
new ExtensionAttribute(ATTRIBUTE_TASK_ID_VARIABLE_NAME));
public UserTaskXMLConverter() {
Curious, why is `ATTRIBUTE_TASK_USER_SKIP_EXPRESSION` defined twice? Lines 60 and 61.
new ExtensionAttribute(ATTRIBUTE_FORM_FIELD_VALIDATION),
new ExtensionAttribute(ATTRIBUTE_TASK_SERVICE_EXTENSIONID),
new ExtensionAttribute(ATTRIBUTE_TASK_USER_SKIP_EXPRESSION),
new ExtensionAttribute(ATTRIBUTE_TASK_ID_VARIABLE_NAME));
public UserTaskXMLConverter() { |
codereview_java_data_4057 | }
private FileVisitResult callback(Path absolutePath, ParserConfiguration configuration, Callback callback) throws IOException {
- if (!Files.exists(absolutePath)) {
- return TERMINATE;
- }
Path localPath = root.relativize(absolutePath);
Log.trace("Parsing %s", localPath);
ParseResult<CompilationUnit> result = new JavaParser(configuration).parse(COMPILATION_UNIT, provider(absolutePath));
Should this happen?
}
private FileVisitResult callback(Path absolutePath, ParserConfiguration configuration, Callback callback) throws IOException {
Path localPath = root.relativize(absolutePath);
Log.trace("Parsing %s", localPath);
ParseResult<CompilationUnit> result = new JavaParser(configuration).parse(COMPILATION_UNIT, provider(absolutePath)); |
codereview_java_data_4065 | import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
-import java.util.Arrays;
-import java.util.Collections;
import java.util.Map;
import org.apache.iceberg.types.Types;
import org.apache.spark.SparkConf;
Did we set something in `keyMetadata`? There is some custom serialization logic to handle this field.
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Maps;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Map;
import org.apache.iceberg.types.Types;
import org.apache.spark.SparkConf; |
codereview_java_data_4067 | public static final CalciteSystemProperty<Boolean> TOPDOWN_OPT =
booleanProperty("calcite.planner.topdown.opt", false);
- /**
- * Whether to enable index-based access for struct fields.
- *
- * <p>Note: the feature is experimental as it relies on field order which is JVM-dependent
- * (see CALCITE-2489).</p>
- */
- public static final CalciteSystemProperty<Boolean> ALLOW_FIELD_INDEX_ACCESS =
- booleanProperty("calcite.experimental.allow.field.index.access", false);
-
/**
* Whether to run integration tests.
*/
To make things more uniform with the properties above, I would rename it to: `calcite.enable.enumerable.fieldIndexAccess`. I added enumerable since it is strictly a property affecting the runtime not for parser, validator, etc.
public static final CalciteSystemProperty<Boolean> TOPDOWN_OPT =
booleanProperty("calcite.planner.topdown.opt", false);
/**
* Whether to run integration tests.
*/ |
codereview_java_data_4073 | */
public class MainnetTransactionValidator implements TransactionValidator {
- public static final BigInteger NO_CHAIN_ID = BigInteger.valueOf(-1);
-
public static MainnetTransactionValidator create() {
return new MainnetTransactionValidator(new FrontierGasCalculator(), false);
}
Shouldn't be using a sentinel value here - I suspect this constant can just go away and `Optional.empty()` used in its place.
*/
public class MainnetTransactionValidator implements TransactionValidator {
public static MainnetTransactionValidator create() {
return new MainnetTransactionValidator(new FrontierGasCalculator(), false);
} |
codereview_java_data_4074 | @Nonnull
List<Transform> upstream();
- boolean isLocalParallelismDetermined();
-
- void setLocalParallelismDetermined(boolean localParallelismDetermined);
-
void determineLocalParallelism(Context context);
void addToDag(Planner p);
We never use these methods.
@Nonnull
List<Transform> upstream();
void determineLocalParallelism(Context context);
void addToDag(Planner p); |
codereview_java_data_4076 | public void setHint(@NonNull String hint, @Nullable CharSequence subHint) {
this.hint = hint;
-
- if (subHint != null) {
- this.subHint = subHint;
- } else {
- this.subHint = null;
- }
if (this.subHint != null) {
super.setHint(new SpannableStringBuilder().append(ellipsizeToWidth(this.hint))
if else isn't necessary anymore, no?
public void setHint(@NonNull String hint, @Nullable CharSequence subHint) {
this.hint = hint;
+ this.subHint = subHint;
if (this.subHint != null) {
super.setHint(new SpannableStringBuilder().append(ellipsizeToWidth(this.hint)) |
codereview_java_data_4081 | javaConvention.getTargetCompatibility().toString())));
project.getPluginManager().apply(ScalaStylePlugin.class);
TaskCollection<ScalaStyleTask> scalaStyleTasks = project.getTasks().withType(ScalaStyleTask.class);
scalaStyleTasks
.configureEach(scalaStyleTask -> {
scalaStyleTask.setConfigLocation(project.getRootDir().toPath()
can you pull this up a line
javaConvention.getTargetCompatibility().toString())));
project.getPluginManager().apply(ScalaStylePlugin.class);
TaskCollection<ScalaStyleTask> scalaStyleTasks = project.getTasks().withType(ScalaStyleTask.class);
+ project.getTasks().named("check").configure(task -> task.dependsOn(scalaStyleTasks));
scalaStyleTasks
.configureEach(scalaStyleTask -> {
scalaStyleTask.setConfigLocation(project.getRootDir().toPath() |
codereview_java_data_4093 | import java.util.Collections;
import java.util.List;
import java.util.concurrent.ScheduledExecutorService;
import software.amazon.awssdk.annotations.Generated;
import software.amazon.awssdk.annotations.SdkInternalApi;
import software.amazon.awssdk.awscore.client.handler.AwsSyncClientHandler;
I'm not sure if using the scheduled executor service to send blocking request is a good idea. I was initially thinking of using the existing async response completion pool, but looks like that only gets set in async client, so we should create a default executor service in each sync batch manager and close it when the batch manager gets closed.
import java.util.Collections;
import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ThreadFactory;
import software.amazon.awssdk.annotations.Generated;
import software.amazon.awssdk.annotations.SdkInternalApi;
import software.amazon.awssdk.awscore.client.handler.AwsSyncClientHandler; |
codereview_java_data_4094 | PooledDataSource pooledDataSource = (PooledDataSource) standaloneInMemFormEngineConfiguration.getDataSource();
PoolState state = pooledDataSource.getPoolState();
- assertThat(state.getIdleConnectionCount()).isGreaterThan(0);
// then
// if the engine is closed
AssertJ also has `isPositive()`
PooledDataSource pooledDataSource = (PooledDataSource) standaloneInMemFormEngineConfiguration.getDataSource();
PoolState state = pooledDataSource.getPoolState();
+ assertThat(state.getIdleConnectionCount()).isPositive();
// then
// if the engine is closed |
codereview_java_data_4095 | RoleMember rm;
while (roleit.hasNext()) {
rm = roleit.next();
- if (rm != null && rm.getActive() != null && rm.getActive() == Boolean.FALSE) {
roleit.remove();
}
}
we don't really need rm.getActive() != null part since we're checking for m.getActive() == Boolean.FALSE
RoleMember rm;
while (roleit.hasNext()) {
rm = roleit.next();
+ if (rm != null && rm.getActive() == Boolean.FALSE) {
roleit.remove();
}
} |
codereview_java_data_4097 | @Option(
names = {"--nodes-whitelist"},
paramLabel = "<enode://id@host:port>",
- description =
- "Comma separated enode URLs for Permissioned networks. " + "Default is an empty list.",
split = ",",
- arity = "1..*"
)
private final Collection<String> nodesWhitelist = null;
It might be just me, but 'Default is an empty list' gives me a false impression that the default behaviour is not whitelisting anyone, therefore, not talking to any node. If the user doesn't use the 'nodes-whitelist' property, we won't do any permissioning. Do you think this can be misleading or is it just me? :P
@Option(
names = {"--nodes-whitelist"},
paramLabel = "<enode://id@host:port>",
+ description = "Comma separated enode URLs for permissioned networks. Default is an empty list.",
split = ",",
+ arity = "0..*"
)
private final Collection<String> nodesWhitelist = null; |
codereview_java_data_4106 | jet.newJobIfAbsent(p, config);
}
- private static void printResults(List<Long> top10numbers) {
- System.out.println("Top " + TOP + " random numbers observed since last print: ");
- for (int i = 0; i < top10numbers.size(); i++) {
- System.out.println(String.format("%d. %,d", i + 1, top10numbers.get(i)));
}
}
}
It might be better to say "Top N random numbers in the latest window"
jet.newJobIfAbsent(p, config);
}
+ private static void printResults(List<Long> topNumbers) {
+ StringBuilder sb = new StringBuilder(String.format("\nTop %d random numbers in the latest window: ", TOP));
+ for (int i = 0; i < topNumbers.size(); i++) {
+ sb.append(String.format("\n\t%d. %,d", i + 1, topNumbers.get(i)));
}
+ System.out.println(sb.toString());
}
} |
codereview_java_data_4109 | */
@XmlJavaTypeAdapter(JaxbAbstractIdSerializer.class)
public static class ID extends AbstractId {
- private static final long serialVersionUID = -155513612834787244L;
static final Cache<String,ID> cache = CacheBuilder.newBuilder().weakValues().build();
public static final ID METADATA = of("!0");
I don't think the serialVersionUID needs to change.
*/
@XmlJavaTypeAdapter(JaxbAbstractIdSerializer.class)
public static class ID extends AbstractId {
+ private static final long serialVersionUID = 7399913185860577809L;
static final Cache<String,ID> cache = CacheBuilder.newBuilder().weakValues().build();
public static final ID METADATA = of("!0"); |
codereview_java_data_4113 | normalizedMember + " from group: " + groupName, ctx.getApiName());
}
- // update our role and domain time-stamps, and invalidate local cache entry
con.updateGroupModTimestamp(domainName, groupName);
con.updateDomainModTimestamp(domainName);
// update our group and domain time-stamps, and invalidate local cache entry
normalizedMember + " from group: " + groupName, ctx.getApiName());
}
+ // update our group and domain time-stamps, and invalidate local cache entry
con.updateGroupModTimestamp(domainName, groupName);
con.updateDomainModTimestamp(domainName); |
codereview_java_data_4121 | }
String provider = ipAddressAttributes[0];
- String[] providerAddr = IpUtil.splitIpPortStr(provider);
- if (providerAddr.length != IpUtil.SPLIT_IP_PORT_RESULT_LENGTH) {
- // not ip:port string
return null;
}
Is the same behavior when provider without port like `11.11.11.11` ?
}
String provider = ipAddressAttributes[0];
+ String[] providerAddr;
+ try {
+ providerAddr = IpUtil.splitIpPortStr(provider);
+ } catch (Exception ex) {
return null;
} |
codereview_java_data_4126 | public void testNoDataflowConfig() {
SpringApplication app = new SpringApplication(LocalTestNoDataFlowServer.class);
context = app.run(new String[] { "--server.port=0", "--spring.jpa.database=H2", "--spring.flyway.enabled=false" });
- // we still have deployer beans
assertThat(context.containsBean("appRegistry"), is(false));
}
}
Is this note a todo?
public void testNoDataflowConfig() {
SpringApplication app = new SpringApplication(LocalTestNoDataFlowServer.class);
context = app.run(new String[] { "--server.port=0", "--spring.jpa.database=H2", "--spring.flyway.enabled=false" });
assertThat(context.containsBean("appRegistry"), is(false));
}
} |
codereview_java_data_4134 | import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.io.CloseableIterable;
-class StaticTableScan extends BaseTableScan {
private final Function<StaticTableScan, DataTask> buildTask;
// Metadata table name that the buildTask that this StaticTableScan will return data for.
private final String scannedTableName;
Should this PR also add a similar update to `FilesTableScan`? It also looks like we may need to update `BaseAllMetadataTableScan`. I see that `AllManifestsTableScan` overrides `useSnapshot` and `asOfTime` but not the incremental methods that also don't work.
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.io.CloseableIterable;
+public class StaticTableScan extends BaseTableScan {
private final Function<StaticTableScan, DataTask> buildTask;
// Metadata table name that the buildTask that this StaticTableScan will return data for.
private final String scannedTableName; |
codereview_java_data_4136 | }
@ApiModelProperty(required=false, value="Additional artifacts to download for this run")
- public Optional<List<SingularityMesosArtifact>> getExtraArtifacts() {
return extraArtifacts;
}
Instead of passing Optional lists around, let's just default it to an empty list if the constructor arg is null. Same for the other objects that use this. Will save a lot of isPresent being throw around
}
@ApiModelProperty(required=false, value="Additional artifacts to download for this run")
+ public List<SingularityMesosArtifact> getExtraArtifacts() {
return extraArtifacts;
} |
codereview_java_data_4145 | import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.impl.ClientContext;
import org.apache.accumulo.core.client.impl.Table;
import org.apache.accumulo.core.client.impl.Tables;
import org.apache.accumulo.core.conf.Property;
Should run a build without disabling the impsort plugin, just to make sure unused imports get removed.
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.impl.Table;
import org.apache.accumulo.core.client.impl.Tables;
import org.apache.accumulo.core.conf.Property; |
codereview_java_data_4146 | import io.openmessaging.rocketmq.config.ClientConfig;
import io.openmessaging.rocketmq.domain.ConsumeRequest;
import io.openmessaging.rocketmq.domain.NonStandardKeys;
-import io.openmessaging.rocketmq.utils.OMSUtil;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
It's not a good name.
import io.openmessaging.rocketmq.config.ClientConfig;
import io.openmessaging.rocketmq.domain.ConsumeRequest;
import io.openmessaging.rocketmq.domain.NonStandardKeys;
+import io.openmessaging.rocketmq.utils.OMSClientUtil;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List; |
codereview_java_data_4157 | private void showSnackBarWithRetry() {
progressBar.setVisibility(View.GONE);
- if (onClickListener == null) {
- onClickListener = view -> {
- setAchievements();
- };
- }
ViewUtil.showDismissibleSnackBar(findViewById(android.R.id.content),
- R.string.achievements_fetch_failed, R.string.retry, onClickListener);
}
/**
just curious why do you need to maintain `View.OnClickListener` and do a null check. The function below will set the click listener and not add one. So theres no need to take care of singleton.
private void showSnackBarWithRetry() {
progressBar.setVisibility(View.GONE);
ViewUtil.showDismissibleSnackBar(findViewById(android.R.id.content),
+ R.string.achievements_fetch_failed, R.string.retry, view -> setAchievements());
}
/** |
codereview_java_data_4162 | <R_NEW> AggregateOperation1<T, A, R_NEW> andThen(DistributedFunction<? super R, ? extends R_NEW> thenFn);
/**
- * Turns this aggregate operation into a collector which can be passed to
* {@link java.util.stream.Stream#collect(Collector)}.
*/
@Nonnull
"Adapts" might be better than "turns", it's adapter pattern
<R_NEW> AggregateOperation1<T, A, R_NEW> andThen(DistributedFunction<? super R, ? extends R_NEW> thenFn);
/**
+ * Adapts this aggregate operation to a collector which can be passed to
* {@link java.util.stream.Stream#collect(Collector)}.
*/
@Nonnull |
codereview_java_data_4163 | Object object = f.get(parentNode);
if (object == null)
continue;
- if (List.class.isAssignableFrom(object.getClass())) {
- List<?> l = (List<?>) object;
success &= l.remove(this);
} else if (object == this) {
f.set(parentNode, null);
Can you use Collection here?
Object object = f.get(parentNode);
if (object == null)
continue;
+ if (Collection.class.isAssignableFrom(object.getClass())) {
+ Collection<?> l = (Collection<?>) object;
success &= l.remove(this);
} else if (object == this) {
f.set(parentNode, null); |
codereview_java_data_4175 | Preconditions.checkArgument(repeatedElement.isRepetition(Type.Repetition.REPEATED),
"Invalid list: inner group is not repeated");
- Preconditions.checkArgument(repeatedElement.isPrimitive() || repeatedElement.asGroupType().getFieldCount() <= 1,
"Invalid list: repeated group is not a single field or primitive: %s", list);
visitor.beforeRepeatedElement(repeatedElement);
It would be good to wrap so the condition starts on the next line, since it is long now.
Preconditions.checkArgument(repeatedElement.isRepetition(Type.Repetition.REPEATED),
"Invalid list: inner group is not repeated");
+ Preconditions.checkArgument(
+ repeatedElement.isPrimitive() || repeatedElement.asGroupType().getFieldCount() <= 1,
"Invalid list: repeated group is not a single field or primitive: %s", list);
visitor.beforeRepeatedElement(repeatedElement); |
codereview_java_data_4180 | static void checkCompatibility(PartitionSpec spec, Schema schema) {
for (PartitionField field : spec.fields) {
Type sourceType = schema.findType(field.sourceId());
- ValidationException.check(sourceType != null,
- "Cannot find source column for partition field: %s", field);
ValidationException.check(sourceType.isPrimitiveType(),
"Cannot partition by non-primitive source field: %s", sourceType);
ValidationException.check(
Looks unrelated to Apache release?
static void checkCompatibility(PartitionSpec spec, Schema schema) {
for (PartitionField field : spec.fields) {
Type sourceType = schema.findType(field.sourceId());
ValidationException.check(sourceType.isPrimitiveType(),
"Cannot partition by non-primitive source field: %s", sourceType);
ValidationException.check( |
codereview_java_data_4184 | import javax.inject.Inject;
import javax.inject.Singleton;
@Singleton
public class UploadRepository {
Same with this class
import javax.inject.Inject;
import javax.inject.Singleton;
+/**
+ * The repository class for UploadActivity
+ */
@Singleton
public class UploadRepository { |
codereview_java_data_4186 | final Throwable originalErr = error;
executionService.schedule(() -> {
try {
- if (!tryRetry(partitions, entries, doneLatch, completionFuture)) {
completionFuture.completeExceptionally(originalErr);
- return;
}
- int value = doneLatch.decrementAndGet();
- assert value > 0 : "value=" + value;
} catch (Exception e) {
logger.severe("Exception during retry", e);
completionFuture.completeExceptionally(originalErr);
couldn't value be 0 here if the retry completes before the assert?
final Throwable originalErr = error;
executionService.schedule(() -> {
try {
+ // We should not handle pendingOps getting to 0 here, it will be increased again by
+ // the retry operations.
+ pendingOps.decrementAndGet();
+ if (!tryRetry(partitions, entries, pendingOps, completionFuture)) {
completionFuture.completeExceptionally(originalErr);
}
} catch (Exception e) {
logger.severe("Exception during retry", e);
completionFuture.completeExceptionally(originalErr); |
codereview_java_data_4187 | }
public static SingularityDeployKey fromPendingTask(SingularityPendingTask pendingTask) {
- return new SingularityDeployKey(pendingTask.getTaskId().getRequestId(), pendingTask.getTaskId().getDeployId());
}
public static SingularityDeployKey fromDeployMarker(SingularityDeployMarker deployMarker) {
consider using `Objects.hashCode(deployId, requestId)`
}
public static SingularityDeployKey fromPendingTask(SingularityPendingTask pendingTask) {
+ return new SingularityDeployKey(pendingTask.getPendingTaskId().getRequestId(), pendingTask.getPendingTaskId().getDeployId());
}
public static SingularityDeployKey fromDeployMarker(SingularityDeployMarker deployMarker) { |
codereview_java_data_4199 | return lst;
}
- @Override
- public List<Short> check(SystemEnvironment env, Mutation mutation) {
- context = env.getServerContext();
- return check((Environment) env, mutation);
- }
-
@Override
public List<Short> check(Environment env, Mutation mutation) {
ArrayList<Short> violations = null;
Could possibly not have a SystemConstraint and only have a SystemEnv. This class could just cast Env to SystemEnv.
return lst;
}
@Override
public List<Short> check(Environment env, Mutation mutation) {
+ context = ((SystemEnvironment)env).getServerContext();
ArrayList<Short> violations = null; |
codereview_java_data_4211 | return buff.toString();
}
if (table.isView() && ((TableView) table).isRecursive()) {
- buff.append(table.getSchema().getSQL()).append('.').append(table.getName());
} else {
buff.append(table.getSQL());
}
It looks like `buff.append(table.getSQL());` should be used unconditionally instead. This if-else statement is useless after such change.
return buff.toString();
}
if (table.isView() && ((TableView) table).isRecursive()) {
+ buff.append(table.getSchema().getSQL()).append('.').append(Parser.quoteIdentifier(table.getName()));
} else {
buff.append(table.getSQL());
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.