id stringlengths 24 28 | content stringlengths 121 2.08k |
|---|---|
codereview_python_data_1870 | sections = (max_end - max_start) / 3
idx = max_start + int(sections) + int(sections/2)
- return lrs[idx].item(), (lrs[idx].item(), losses[idx])
# Cell
def slide(lrs:list, losses:list, num_it:int, lr_diff:int=15, thresh:float=.005, adjust_value:float=1.):
I think this will fail if the suggestion isn't a tensor (which it might not be). Instead I think this should always work: ```suggestion return float(lrs[idx]), (float(lrs[idx]), losses[idx]) ```
sections = (max_end - max_start) / 3
idx = max_start + int(sections) + int(sections/2)
+ return float(lrs[idx]), (float(lrs[idx]), losses[idx])
# Cell
def slide(lrs:list, losses:list, num_it:int, lr_diff:int=15, thresh:float=.005, adjust_value:float=1.): |
codereview_python_data_1873 | template = config.val.url.searchengines[engine]
url = qurl_from_user_input(template.format(urllib.parse.quote(term)))
- if config.val.url.open_base_url and \
- term in config.val.url.searchengines.keys():
url = qurl_from_user_input(config.val.url.searchengines[term])
url.setPath(None)
url.setFragment(None)
No need for the `.keys()`, iterating over a dictionary gives you its keys (and thus you can also do `key in some_dict`). With that, it also fits on one line :wink:
template = config.val.url.searchengines[engine]
url = qurl_from_user_input(template.format(urllib.parse.quote(term)))
+ if config.val.url.open_base_url and term in config.val.url.searchengines:
url = qurl_from_user_input(config.val.url.searchengines[term])
url.setPath(None)
url.setFragment(None) |
codereview_python_data_1878 | module=module,
)
- objtype: Optional[so.Object] = schema.get_by_id(type_id, None)
created = objtype is None
if objtype is None:
components = list(components)
`get_by_id` also supports the `type` argument, please use that.
module=module,
)
+ objtype = schema.get_by_id(type_id, None, type=so.Object)
created = objtype is None
if objtype is None:
components = list(components) |
codereview_python_data_1894 | 'name': int(),
'hostname': int(),
'image': int(),
- 'pull': str(),
'registry': {
'url': int(),
'credentials': {
Can you change this to `'pull': init()`, this will force the validation to error, which down below the assert will need updating.
'name': int(),
'hostname': int(),
'image': int(),
+ 'pull': int(),
'registry': {
'url': int(),
'credentials': { |
codereview_python_data_1898 | from google.cloud.security.common.data_access import firewall_rule_dao
from google.cloud.security.common.data_access import folder_dao
from google.cloud.security.common.data_access import forwarding_rules_dao
-from google.cloud.security.common.data_access import ke_dao
from google.cloud.security.common.data_access import instance_dao
from google.cloud.security.common.data_access import instance_group_dao
from google.cloud.security.common.data_access import instance_group_manager_dao
from google.cloud.security.common.data_access import instance_template_dao
from google.cloud.security.common.data_access import organization_dao
from google.cloud.security.common.data_access import project_dao
from google.cloud.security.common.data_access import service_account_dao
The import is now out of order.
from google.cloud.security.common.data_access import firewall_rule_dao
from google.cloud.security.common.data_access import folder_dao
from google.cloud.security.common.data_access import forwarding_rules_dao
from google.cloud.security.common.data_access import instance_dao
from google.cloud.security.common.data_access import instance_group_dao
from google.cloud.security.common.data_access import instance_group_manager_dao
from google.cloud.security.common.data_access import instance_template_dao
+from google.cloud.security.common.data_access import ke_dao
from google.cloud.security.common.data_access import organization_dao
from google.cloud.security.common.data_access import project_dao
from google.cloud.security.common.data_access import service_account_dao |
codereview_python_data_1901 | wrap_func = _wrap_op_fn(op_class, wrapper_name)
setattr(module, wrapper_name, wrap_func)
if submodule:
- setattr(fn_module, '.'.join(submodule + [wrapper_name]), wrap_func)
from nvidia.dali.external_source import external_source
I don't think this is necessary - but this is: ```suggestion wrap_func.__module__ = module.__name__ ```
wrap_func = _wrap_op_fn(op_class, wrapper_name)
setattr(module, wrapper_name, wrap_func)
if submodule:
+ wrap_func.__module__ = module.__name__
from nvidia.dali.external_source import external_source |
codereview_python_data_1905 | except shellutil.CommandError as cmd_err:
if chk_err:
- msg = """Failed to eject dvd: ret={0}
- [stdout]
- {1}
-
- [stderr]
- {2}
- """.format(cmd_err.returncode, cmd_err.stdout, cmd_err.stderr)
raise OSUtilError(msg)
need to remove extra spaces from message
except shellutil.CommandError as cmd_err:
if chk_err:
+ msg = "Failed to eject dvd: ret={0}\n[stdout]\n{1}\n\n[stderr]\n{2}"\
+ .format(cmd_err.returncode, cmd_err.stdout, cmd_err.stderr)
raise OSUtilError(msg) |
codereview_python_data_1910 | for model in model_infos:
pwc_model_info = OrderedDict()
pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0]
# get metadata
memory = round(model['results']['memory'] / 1024, 1)
- epochs = model['epochs']
meta_data = OrderedDict()
meta_data['Training Memory (GB)'] = memory
meta_data['Epochs'] = epochs
The weight should be filled in automatically.
for model in model_infos:
pwc_model_info = OrderedDict()
pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0]
+ pwc_model_info['In Collection'] = 'Please fill in Collection name'
# get metadata
memory = round(model['results']['memory'] / 1024, 1)
+ epochs = get_real_epoch(model['config'])
meta_data = OrderedDict()
meta_data['Training Memory (GB)'] = memory
meta_data['Epochs'] = epochs |
codereview_python_data_1913 | # TODO: remove inplace=False
if isinstance(instance, (Seq, MutableSeq)):
instance = instance.reverse_complement(inplace=False)
- if isinstance(instance, (str, SeqRecord)):
instance = instance.reverse_complement()
instances.append(instance)
return instances
This looks wrong - what if the instance is none of the above? You won't attempt to take a reverse complement, nor raise an exception.
# TODO: remove inplace=False
if isinstance(instance, (Seq, MutableSeq)):
instance = instance.reverse_complement(inplace=False)
+ elif isinstance(instance, (str, SeqRecord)):
instance = instance.reverse_complement()
+ else:
+ raise RuntimeError("instance has unexpected type %s" % type(instance))
instances.append(instance)
return instances |
codereview_python_data_1914 | end=kwargs.pop('end', None),
trading_calendar=self.trading_calendar,
)
- self.capital_base = self.sim_params.capital_base
self.perf_tracker = None
# Pull in the environment's new AssetFinder for quick reference
Looks like this is only used for the `__repr__` now. Could we kill it and have that method reference `self.sim_params.capital_base` instead, so we have one less piece of state to keep track of?
end=kwargs.pop('end', None),
trading_calendar=self.trading_calendar,
)
self.perf_tracker = None
# Pull in the environment's new AssetFinder for quick reference |
codereview_python_data_1915 | self._push(CalciteSortNode(collations))
def _process_filter(self, op):
- self._maybe_add_projection(op)
-
condition = self._translate(op.condition)
self._push(CalciteFilterNode(condition))
With this projection, we cannot use filters by `rowid` which may be required since it can be used as an index. Applying a projection after the filter is a better option. We should add projection only when the filter is applied to a scan.
self._push(CalciteSortNode(collations))
def _process_filter(self, op):
condition = self._translate(op.condition)
self._push(CalciteFilterNode(condition))
+
+ if isinstance(self._input_node(0), CalciteScanNode):
+ # if filter was applied over scan, then we need additional
+ # projection to remove rowid column
+ self._add_projection(op.input[0]) |
codereview_python_data_1919 | name='pontoon.contributors.save_custom_homepage'),
# AJAX: Save preferred source locale
- url(r'^save-custom-preferred-source-locale/$', views.save_custom_preferred_source_locale,
- name='pontoon.contributors.save_custom_preferred_source_locale'),
-
- url(r'^user-preferred-source-locale/$', views.user_preferred_source_locale,
- name='pontoon.contributors.user_preferred_source_locale'),
]
Nit: You should omit `custom`.
name='pontoon.contributors.save_custom_homepage'),
# AJAX: Save preferred source locale
+ url(r'^save-preferred-source-locale/$', views.save_preferred_source_locale,
+ name='pontoon.contributors.save_preferred_source_locale'),
] |
codereview_python_data_1924 | def update_kinesis(method, path, data, headers, response=None, return_forward_info=False):
if return_forward_info:
- if os.environ['KINESIS_RETURN_ERRORS'] == 'True':
return 500
else:
return True
Instead of querying os.environ directly, could we have a config variable (in constants.py) that is initialized with os.environ? Something like ``` KINESIS_RETURN_ERRORS = os.environ.get('KINESIS_RETURN_ERRORS') == 'True' ``` Then we can just use that variable in the if statement: ``` if constants.KINESIS_RETURN_ERRORS: ... ```
def update_kinesis(method, path, data, headers, response=None, return_forward_info=False):
if return_forward_info:
+ if constants.KINESIS_RETURN_ERRORS:
return 500
else:
return True |
codereview_python_data_1926 | rdprop = rdprops["_MDAnalysis_%s" % prop]
mdaprop = getattr(mol2.atoms[idx], prop)
assert rdprop == mdaprop
-
-
-@pytest.mark.skipif(rdkit_installed == True, reason="test minimal dependency")
-class TestRequiresRDKit(object):
- def test_converter_requires_rdkit(self):
- u = mda.Universe(mol2_molecule)
- with pytest.raises(ImportError) as e:
- u.atoms.convert_to("RDKIT")
- assert "RDKit is required for the RDKitConverter" in str(e.value)
One other possibility is to move this up and use block_import to allow to fake a minimal dependency.
rdprop = rdprops["_MDAnalysis_%s" % prop]
mdaprop = getattr(mol2.atoms[idx], prop)
assert rdprop == mdaprop |
codereview_python_data_1929 | fmt = config.get('completion', 'timestamp-format')
if fmt is None:
- def fmt_atime(atime):
return ''
else:
def fmt_atime(atime):
Why define two functions at all, instead of just having one function and doing the `if fmt is None:` check in there?
fmt = config.get('completion', 'timestamp-format')
if fmt is None:
+ def fmt_atime(_atime):
return ''
else:
def fmt_atime(atime): |
codereview_python_data_1936 | import gettext
-from PyQt5.QtCore import QLocale
-
LOCALE_DIR = os.path.join(os.path.dirname(__file__), 'locale')
language = gettext.translation('electrum', LOCALE_DIR, fallback=True)
you must not assume that qt is available, in the core lib.
import gettext
LOCALE_DIR = os.path.join(os.path.dirname(__file__), 'locale')
language = gettext.translation('electrum', LOCALE_DIR, fallback=True) |
codereview_python_data_1939 | "engine": engine,
"squeeze": squeeze,
"skipfooter": skipfooter,
- "kwds": kwds,
}
return cls.from_pandas(pandas.read_excel(**kwargs))
@classmethod
I think we need to do an `update` instead of setting `kwds=kwds` here. It will treat `kwds` as a keyword if we do it this way. ```python kwargs = {...} kwargs.update(kwds) return cls.from_pandas(...) ```
"engine": engine,
"squeeze": squeeze,
"skipfooter": skipfooter,
}
+ kwargs.update(kwds)
return cls.from_pandas(pandas.read_excel(**kwargs))
@classmethod |
codereview_python_data_1940 | torch.cuda.synchronize()
time_backward += timer.since_last_check()
bar.update()
-print(f'\nCARAFE time forward: {(time_forward + 1e-3) * 1e3 / loop_num} '
- f'ms/iter | time backward: {(time_backward + 1e-3) * 1e3 / loop_num}'
- ' ms/iter')
time_naive_forward = 0
time_naive_backward = 0
Compute `(time_forward + 1e-3) * 1e3 / loop_num` ahead.
torch.cuda.synchronize()
time_backward += timer.since_last_check()
bar.update()
+forward_speed = (time_forward + 1e-3) * 1e3 / loop_num
+backward_speed = (time_backward + 1e-3) * 1e3 / loop_num
+print(f'\nCARAFE time forward: {forward_speed} '
+ f'ms/iter | time backward: {backward_speed} ms/iter')
time_naive_forward = 0
time_naive_backward = 0 |
codereview_python_data_1943 | locale = request.GET["locale"]
page_results_limit = int(request.GET.get("limit", 100))
page = int(request.GET.get("page", 1))
- except MultiValueDictKeyError as e:
return JsonResponse(
{"status": False, "message": "Bad Request: {error}".format(error=e)},
status=400,
We should also catch a `ValueError` here in case `limit` or `page` aren't integers. Then we also don't need to catch `PageNotAnInteger` below.
locale = request.GET["locale"]
page_results_limit = int(request.GET.get("limit", 100))
page = int(request.GET.get("page", 1))
+ except (MultiValueDictKeyError, ValueError) as e:
return JsonResponse(
{"status": False, "message": "Bad Request: {error}".format(error=e)},
status=400, |
codereview_python_data_1944 | if index is None:
self.tab_next()
return
- if index == 0:
index = self._count()
- if index < 0:
index = self._count() + index + 1
if 1 <= index <= self._count():
Not something directly related to your change, but I think it'd be cleaner to combine this into an `if`/`elif`/`elif` with the check above/below, as they're mutually exclusive - could you please do that?
if index is None:
self.tab_next()
return
+ elif index == 0:
index = self._count()
+ elif index < 0:
index = self._count() + index + 1
if 1 <= index <= self._count(): |
codereview_python_data_1948 | if not target_path:
raise TargetNotFoundError('Failed to find target ' + target_name)
- return engine_impl.reproduce(target_path, testcase_path, arguments, timeout)
class TestcaseRunner(object):
Why not leave this logic in the constructor? Isn't it better to fail earlier?
if not target_path:
raise TargetNotFoundError('Failed to find target ' + target_name)
+ return engine_impl.reproduce(target_path, testcase_path, list(arguments),
+ timeout)
class TestcaseRunner(object): |
codereview_python_data_1959 | ct.punct(">"))
def __getstate__(self):
- # type: () -> Dict[str, Any]
"""
Creates a basic representation of the instance, used in
conjunction with __setstate__() e.g. by pickle
You might use a stricter type for `__getstate__ `and `__setstate__` as `res` is `List[PacketField]`, listname is `str` and stats is `List[Packet]`.
ct.punct(">"))
def __getstate__(self):
+ # type: () -> Dict[str, Union[List[PacketField], List[Packet], str]]
"""
Creates a basic representation of the instance, used in
conjunction with __setstate__() e.g. by pickle |
codereview_python_data_1960 | if __name__ == '__main__':
- tf.enable_v2_behavior()
tf.test.main()
Shouldn't be required as we only test with TF2.
if __name__ == '__main__':
tf.test.main() |
codereview_python_data_1964 | for row in data:
curr_strategy = data_types.FuzzStrategyProbability()
curr_strategy.strategy_name = str(row['strategy'])
- curr_strategy.strategy_probability = float(row['bandit_weight'])
strategy_data.append(curr_strategy)
- ndb.delete_multi(
- data_types.FuzzStrategyProbability.query().fetch(keys_only=True))
ndb.put_multi(strategy_data)
Sorry to make you update this again, but could you try this with a list comprehension along the lines of: [entity.key for entity in ndb_utils.get_all_for_query(...)] If there are issues with that for whatever reason it's fine as-is. It looks like we do this in a few other places throughout the code without using get_all_for_query.
for row in data:
curr_strategy = data_types.FuzzStrategyProbability()
curr_strategy.strategy_name = str(row['strategy'])
+ curr_strategy.probability = float(row['bandit_weight'])
strategy_data.append(curr_strategy)
+ ndb.delete_multi([
+ entity.key for entity in ndb_utils.get_all_from_model(
+ data_types.FuzzStrategyProbability)
+ ])
ndb.put_multi(strategy_data) |
codereview_python_data_1965 | ) -> tf.Tensor:
"""Computes the (weighted) mean of elements across dimensions of a tensor.
"""
- return tf.divide(
- tf.reduce_sum(tf.multiply(weights, input_tensor), axis=axis, keepdims=keepdims),
- tf.reduce_sum(weights, axis=axis, keepdims=keepdims),
- )
@tf.keras.utils.register_keras_serializable(package="Addons")
Would it be possible to use multiple statements here for readability? Also, is there a difference between `tf.divide` and `/` in tensorflow? There are so many division operators, I have a hard time keeping track of them.
) -> tf.Tensor:
"""Computes the (weighted) mean of elements across dimensions of a tensor.
"""
+ if weights is None:
+ return tf.reduce_mean(input_tensor, axis=None, keepdims=False)
+
+ weighted_sum = tf.reduce_sum(weights * input_tensor, axis=axis, keepdims=keepdims)
+ sum_of_weights = tf.reduce_sum(weights, axis=axis, keepdims=keepdims)
+ average = weighted_sum / sum_of_weights
+ return average
@tf.keras.utils.register_keras_serializable(package="Addons") |
codereview_python_data_1969 | elif tag == "//":
if len(record.sequence) != scount:
raise ValueError(
- "The number of sequences specified in the record"
- " (%d) does not agree with the number of sequences found (%d)"
% (scount, len(record.sequence))
)
return record
We don't have any automated check for this, but I think we usually put the trailing space on end of the previous line, rather than a leading space on the continuation line.
elif tag == "//":
if len(record.sequence) != scount:
raise ValueError(
+ "The number of sequences specified in the record "
+ "(%d) does not agree with the number of sequences found (%d)"
% (scount, len(record.sequence))
)
return record |
codereview_python_data_1970 | self.request_log = []
self.keep_alive = True
self.session = None
def tearDown(self):
pass
In case of API testing there is whole bunch of "success" codes of 2xx. Maybe cover them all with assert2xx instead of just 200? Those who want specific code would go with assertStatusCode
self.request_log = []
self.keep_alive = True
self.session = None
+ self.default_address = None
+ self.path_prefix = None
def tearDown(self):
pass |
codereview_python_data_1972 | from django.db.models.functions import Concat
from django.conf import settings
-from pontoon.base.models import Entity, TranslatedResource, Translation
from pontoon.pretranslation.pretranslate import (
get_translations,
update_changed_instances,
I haven't tested this yet, but I wonder if this works as expected when called from `pontoon/sync/tasks.py`.
from django.db.models.functions import Concat
from django.conf import settings
+from pontoon.base.models import Project, Entity, TranslatedResource, Translation
from pontoon.pretranslation.pretranslate import (
get_translations,
update_changed_instances, |
codereview_python_data_1982 | import lightgbm as lgb
import numpy as np
-import pandas as pd
import pytest
from scipy import sparse
@jmoralez Please make `pandas` optional for basic tests like you did in sklearn tests: ``` pd = pytest.importorskip("pandas") ```
import lightgbm as lgb
import numpy as np
import pytest
from scipy import sparse |
codereview_python_data_1983 | class TxnAuthorAgreementHandlerV1(TxnAuthorAgreementHandler):
- def _update_txn_author_agreement(self, text, version, seq_no, txn_time, retired=False):
digest = StaticTAAHelper.taa_digest(text, version)
data = encode_state_value({
TXN_AUTHOR_AGREEMENT_TEXT: text,
Why is it implemented in node, instead of plenum?
class TxnAuthorAgreementHandlerV1(TxnAuthorAgreementHandler):
+ def _update_txn_author_agreement(self, digest, seq_no, txn_time, text, version, retired=False):
digest = StaticTAAHelper.taa_digest(text, version)
data = encode_state_value({
TXN_AUTHOR_AGREEMENT_TEXT: text, |
codereview_python_data_1985 | reduction='mean',
avg_factor=None,
class_weight=None,
- ignore_index=255):
"""Calculate the binary CrossEntropy loss.
Args:
255 is so confusing
reduction='mean',
avg_factor=None,
class_weight=None,
+ ignore_index=-100):
"""Calculate the binary CrossEntropy loss.
Args: |
codereview_python_data_1986 | cls: Type[NameT],
name: Union[SchemaName, str],
module: Optional[str] = None,
- ) -> Any:
if not name:
raise NameError('name must not be empty')
This change doesn't seem to be correct.
cls: Type[NameT],
name: Union[SchemaName, str],
module: Optional[str] = None,
+ ) -> NameT:
if not name:
raise NameError('name must not be empty') |
codereview_python_data_1991 | from graphite.node import LeafNode, BranchNode
from graphite.render.hashing import compactHash
from graphite.util import unpickle, logtime, is_local_interface
from graphite.finders.utils import BaseFinder
from graphite.readers.remote import RemoteReader
Shouldn't this be inside the loop?
from graphite.node import LeafNode, BranchNode
from graphite.render.hashing import compactHash
from graphite.util import unpickle, logtime, is_local_interface
+from graphite.future import FetchInProgress, wait_for_result
from graphite.finders.utils import BaseFinder
from graphite.readers.remote import RemoteReader |
codereview_python_data_1992 | self._proc.error.connect(self.on_proc_error)
editor = config.get('general', 'editor')
executable = editor[0]
- args = [arg.replace('{}', self._filename) if '{}' in arg else arg for arg in editor[1:]]
log.procs.debug("Calling \"{}\" with args {}".format(executable, args))
self._proc.start(executable, args)
I think you don't need the `... if '{}' in arg else arg` part - if the arg doesn't contain `{}`, `arg.replace('{}', ...)` will return the unchanged string anyways.
self._proc.error.connect(self.on_proc_error)
editor = config.get('general', 'editor')
executable = editor[0]
+ args = [arg.replace('{}', self._filename) for arg in editor[1:]]
log.procs.debug("Calling \"{}\" with args {}".format(executable, args))
self._proc.start(executable, args) |
codereview_python_data_2007 | low_quality_rate=stats["low_quality"] - stats["last_low_quality"],
no_match_rate=stats["no_match"] - stats["last_no_match"],
listens_per_sec=listens_per_sec,
- listens_matched_p=stats["listens_matched"] / stats["listen_count"] * 100.0,
legacy_index_date=datetime.date.fromtimestamp(self.legacy_listens_index_date).strftime("%Y-%m-%d"))
stats["last_exact_match"] = stats["exact_match"]
Calculation looks good but don't know what format etc grafana expects this to be in.
low_quality_rate=stats["low_quality"] - stats["last_low_quality"],
no_match_rate=stats["no_match"] - stats["last_no_match"],
listens_per_sec=listens_per_sec,
+ listens_matched_p=stats["listens_matched"] / (stats["listen_count"] or .000001) * 100.0,
legacy_index_date=datetime.date.fromtimestamp(self.legacy_listens_index_date).strftime("%Y-%m-%d"))
stats["last_exact_match"] = stats["exact_match"] |
codereview_python_data_2008 | self.get_url(method)
def get_url(self,method):
- self.master.prompt("Url:", "http://www.example.com/", self.new_request, method)
def new_request(self, url, method):
try:
Url -> URL (as in the flow editor)
self.get_url(method)
def get_url(self,method):
+ self.master.prompt("URL:", "http://www.example.com/", self.new_request, method)
def new_request(self, url, method):
try: |
codereview_python_data_2017 | text="URL: <b>{}</b>".format(
html.escape(url.toDisplayString())),
yes_action=functools.partial(QDesktopServices.openUrl, url),
- url=urlstr)
return True
elif (info.domain, info.error) in ignored_errors:
log.webview.debug("Ignored error on {}: {} (error domain: {}, "
You should re-stringify it here with `QUrl.FullyEncoded`.
text="URL: <b>{}</b>".format(
html.escape(url.toDisplayString())),
yes_action=functools.partial(QDesktopServices.openUrl, url),
+ url=info.url.toString(QUrl.FullyEncoded))
return True
elif (info.domain, info.error) in ignored_errors:
log.webview.debug("Ignored error on {}: {} (error domain: {}, " |
codereview_python_data_2021 | bbox_head=None,
mask_roi_extractor=None,
mask_head=None,
- mask_iou_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
Move this argument to `MaskScoringRCNN`.
bbox_head=None,
mask_roi_extractor=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None): |
codereview_python_data_2029 | entry.qualified_name = self.builtin_scope().qualify_name(name)
return entry
- def _is_package_scope_or_module(self):
- # Returns True for all ModuleScopes representing package or scopes representing
- # modules. Otherwise returns False.
- # Note: For package pkg_a Cython creates two modules scopes: pkg_a and pkg_a.__init__.
- # The main purpose of this helper method is to detect pkg_a ModuleScope.
- path = self.context.search_include_directories(self.qualified_name, suffix='.pyx')
- if not path:
- path = self.context.search_include_directories(self.qualified_name, suffix='.py')
- contains_init = os.path.basename(path) in ('__init__.pyx', '__init__.py') if path else False
- return self.is_package or not contains_init
-
def find_module(self, module_name, pos, relative_level=-1):
# Find a module in the import namespace, interpreting
# relative imports relative to this module's parent.
I wonder if it's even correct that we create two separate module scopes. And a good thing. It seems to me that the answer might be no.
entry.qualified_name = self.builtin_scope().qualify_name(name)
return entry
def find_module(self, module_name, pos, relative_level=-1):
# Find a module in the import namespace, interpreting
# relative imports relative to this module's parent. |
codereview_python_data_2040 | with ctx.new() as orderctx:
orderctx.expr_exposed = False
- # In OPDER BY we compile ir.Set as a subquery:
# SELECT SetRel.value FROM SetRel)
subq = relgen.set_as_subquery(
expr.expr, as_value=True, ctx=orderctx)
Typo fix opportunity: ```suggestion # In ORDER BY we compile ir.Set as a subquery: ```
with ctx.new() as orderctx:
orderctx.expr_exposed = False
+ # In ORDER BY we compile ir.Set as a subquery:
# SELECT SetRel.value FROM SetRel)
subq = relgen.set_as_subquery(
expr.expr, as_value=True, ctx=orderctx) |
codereview_python_data_2041 | urlstr = info.url.toDisplayString()
order = config.get('network', 'scheme-order')
- if info.url.scheme() in order and order[-1:][0] != info.url.scheme():
next_scheme = order[order.index(info.url.scheme()) + 1]
log.webview.info('Trying next scheme: {}'.format(next_scheme))
info.url.setScheme(next_scheme)
`[-1:][0]` is the same as `[-1]`
urlstr = info.url.toDisplayString()
order = config.get('network', 'scheme-order')
+ if info.url.scheme() in order and order[-1] != info.url.scheme():
next_scheme = order[order.index(info.url.scheme()) + 1]
log.webview.info('Trying next scheme: {}'.format(next_scheme))
info.url.setScheme(next_scheme) |
codereview_python_data_2046 | __tablename__ = violations_tablename
id = Column(Integer, primary_key=True)
- inventory_id = Column(String(256))
resource_id = Column(String(256), nullable=False)
resource_type = Column(String(256), nullable=False)
rule_name = Column(String(256))
rule_index = Column(Integer, default=0)
violation_type = Column(String(256), nullable=False)
data = Column(Text)
def __repr__(self):
"""String representation.
Should this be inventory_id or inventory_index_id? I think it's better to have this as a foreign key and reference back to the inventory/inventory_index table.
__tablename__ = violations_tablename
id = Column(Integer, primary_key=True)
+ inventory_index_id = Column(String(256))
resource_id = Column(String(256), nullable=False)
resource_type = Column(String(256), nullable=False)
+ full_name = Column(String(1024))
rule_name = Column(String(256))
rule_index = Column(Integer, default=0)
violation_type = Column(String(256), nullable=False)
data = Column(Text)
+ inventory_data = Column(Text)
def __repr__(self):
"""String representation. |
codereview_python_data_2057 | attr_dict.update(ndict)
else:
attr_dict = attr
- self.add_node(n, **attr_dict)
def remove_node(self, n):
"""Remove node n.
You are only testing for whether the node is hashable, but this code is supposed to check if the node is already in the DiGraph.
attr_dict.update(ndict)
else:
attr_dict = attr
+ self.add_node(n)
+ if attr_dict:
+ self._node[n].update(attr)
def remove_node(self, n):
"""Remove node n. |
codereview_python_data_2060 | request.setAttribute(QNetworkRequest.CacheLoadControlAttribute,
QNetworkRequest.AlwaysNetwork)
- if request.url().scheme().lower() != 'data':
- suggested_fn = (utils.sanitize_filename(title) + ".html" if title
- else urlutils.filename_from_url(request.url()))
else:
# We might be downloading a binary blob embedded on a page or even
# generated dynamically via javascript. We try to figure out a more
Oh, good job at finding `utils.sanitize_filename`! I compltely forgot about it
request.setAttribute(QNetworkRequest.CacheLoadControlAttribute,
QNetworkRequest.AlwaysNetwork)
+ if suggested_fn is not None:
+ pass
+ elif request.url().scheme().lower() != 'data':
+ suggested_fn = urlutils.filename_from_url(request.url())
else:
# We might be downloading a binary blob embedded on a page or even
# generated dynamically via javascript. We try to figure out a more |
codereview_python_data_2063 | for pipeline in pipelines:
pipeline.run()
- if notifier_configs.get('violation').get('cscc').get('enabled'):
- CsccPipeline().run(
- violations_as_dict,
- notifier_configs.get('violation').get('cscc').get('gcs_path'))
if __name__ == '__main__':
Do you think it's possible that the user only update the application and didn't update the configuration? This might throw null pointer exception.
for pipeline in pipelines:
pipeline.run()
+ if notifier_configs.get('violation'):
+ if notifier_configs.get('violation').get('cscc').get('enabled'):
+ CsccPipeline().run(
+ violations_as_dict,
+ notifier_configs.get('violation').get('cscc').get('gcs_path'))
if __name__ == '__main__': |
codereview_python_data_2069 | <h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
- <p>Details: cache-sea4460-SEA 1645545924 2021587045</p>
<hr>
<p>Varnish cache server</p>
</body>
One issue with this design is if I come up with a custom sampler (e.g., TimeSensitiveEdgeDataLoader), which uses the custom method (e.g., `sample_historical_neighbors()`) which I've added to my GraphStorage class (probably a child of DGLGraph, but perhaps not), the sampler would no longer be able to invoke the custom sampling function since it's not in this list. Could we decouple our GraphStorage and FeatureStorage, such both get passed into a dataloader (for backward compatibility allow passing in `feature_storage=None` if the GraphStorage object is also a FeatureStore-like object)?
<h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
+ <p>Details: cache-sea4483-SEA 1645545925 731129331</p>
<hr>
<p>Varnish cache server</p>
</body> |
codereview_python_data_2074 | import sys
-if sys.version_info[0] == 3 and sys.version_info[1] <= 5:
sys.exit('Please use python version 3.6 or higher')
# get the version
If `sys.version_info[0]` is `2`, then it won't generate the error. You can also use this simple construct: `sys.version_info < (3, 6)`. Other nice examples: ``` ~ -> python2 -c "import sys; print(sys.version_info<=(3, 6), sys.version_info)" (True, sys.version_info(major=2, minor=7, micro=14, releaselevel='final', serial=0)) ~ -> python3 -c "import sys; print(sys.version_info<=(3, 6), sys.version_info)" False sys.version_info(major=3, minor=6, micro=3, releaselevel='final', serial=0) ```
import sys
+if sys.version_info < (3, 6):
sys.exit('Please use python version 3.6 or higher')
# get the version |
codereview_python_data_2084 | self._save_goal_state()
self._update_host_plugin(new_goal_state.container_id, new_goal_state.role_config_name)
- except Exception as e: # pylint: disable=C0103
- raise ProtocolError("Error processing goal state: {0}".format(ustr(e)))
def try_update_goal_state(self):
"""
NIT: Missed pylint error
self._save_goal_state()
self._update_host_plugin(new_goal_state.container_id, new_goal_state.role_config_name)
+ except Exception as exception:
+ raise ProtocolError("Error processing goal state: {0}".format(ustr(exception)))
def try_update_goal_state(self):
""" |
codereview_python_data_2088 | label : [None | string]
Label for legend
- margins : Sequence of 2 numbers or None (default=None)
- The sequence contains horizontal and vertical axis margins. Adjust to avoid image being clipped.
Returns
-------
The description here should be modified to reflect the changes below, maybe something like: ```suggestion margins : float or 2-tuple, optional Sets the padding for axis autoscaling. This can be helpful in cases where nodes are clipped near the edges of an image - increasing the margin prevents clipping. Values should be in the range ``[0, 1]``. see :meth:`matplotlib.axes.Axes.margins` for details. The default is `None`, which uses the Matplotlib default. ```
label : [None | string]
Label for legend
+ margins : float or 2-tuple, optional
+ Sets the padding for axis autoscaling. This can be helpful in cases
+ where nodes are clipped near the edges of an image - increasing the
+ margin prevents clipping. Values should be in the range ``[0, 1]``.
+ see :meth:`matplotlib.axes.Axes.margins` for details. The default
+ is `None`, which uses the Matplotlib default.
Returns
------- |
codereview_python_data_2089 | else:
"""change non-float data to float data, need to copy"""
data = np.array(mat.reshape(mat.size), dtype=np.float32)
- ptr_data, type_ptr_data, new_data = c_float_array(data)
n_preds = self.__get_num_preds(num_iteration, mat.shape[0],
predict_type)
preds = np.zeros(n_preds, dtype=np.float64)
@guolinke If we don't need `new_data` maybe it's better to name it like non-used variable: ``` ptr_data, type_ptr_data, _ = c_float_array(data) ``` ?
else:
"""change non-float data to float data, need to copy"""
data = np.array(mat.reshape(mat.size), dtype=np.float32)
+ ptr_data, type_ptr_data, _ = c_float_array(data)
n_preds = self.__get_num_preds(num_iteration, mat.shape[0],
predict_type)
preds = np.zeros(n_preds, dtype=np.float64) |
codereview_python_data_2097 | parsed = urlparse(url)
try:
- return (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
- pass
- return ('', '')
def to_native_string(string, encoding='ascii'):
I know I'm guilty of them, but I'm not the biggest fan of `except: pass` statements. Perhaps this could be rewritten as: ``` python try: auth = (unquote(parsed.username), unquote(parsed.password)) except (AttributeError, TypeError): auth = ('', '') return auth ``` But, you know, with the proper indentation. ;)
parsed = urlparse(url)
try:
+ auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
+ auth = ('', '')
+ else:
+ auth = ('', '')
+ return auth
def to_native_string(string, encoding='ascii'): |
codereview_python_data_2100 | mean = self.sum / self.count
total = self.squared_sum - self.sum * mean
raw_scores = 1 - (self.res / total)
- n = tf.cast(self.num_examples, dtype=tf.float32)
-
- num = tf.multiply(tf.subtract(1.0, raw_scores), tf.subtract(n, 1.0))
- den = tf.subtract(tf.subtract(n, self.num_preds), 1.0)
- scores = tf.subtract(1.0, tf.divide(num, den)) if self.penalize else raw_scores
if self.multioutput == "raw_values":
return scores
It is better that you move the code in an internal function that converts R2 in an adjusted R2 so that it is computed only if it is required the adjusted version.
mean = self.sum / self.count
total = self.squared_sum - self.sum * mean
raw_scores = 1 - (self.res / total)
+ scores = (
+ _calculate_adjr2(raw_scores, self.num_examples, self.num_preds)
+ if self.penalize
+ else raw_scores
+ )
if self.multioutput == "raw_values":
return scores |
codereview_python_data_2102 | elif name == 'motd':
emitter = dnf.automatic.emitter.MotdEmitter(system_name)
emitters.append(emitter)
- else:
- if name != 'None':
- assert False
return emitters
In think we should make confreader to convert None to real None
elif name == 'motd':
emitter = dnf.automatic.emitter.MotdEmitter(system_name)
emitters.append(emitter)
+ else:
+ assert False
return emitters |
codereview_python_data_2104 | @typechecked
def __init__(self,
units: int,
- projection: Union[int, str] = None,
use_bias: bool = False,
kernel_initializer: types.Initializer = "glorot_uniform",
recurrent_initializer: types.Initializer = "glorot_uniform",
```suggestion projection: Optional[int] = None, ```
@typechecked
def __init__(self,
units: int,
+ projection: Optional[int] = None,
use_bias: bool = False,
kernel_initializer: types.Initializer = "glorot_uniform",
recurrent_initializer: types.Initializer = "glorot_uniform", |
codereview_python_data_2118 | verify=True, cert=None, proxies=None, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses."""
- hist = [resp, ] # keep track of history; seed it with the original response
url = self.get_redirect_target(resp)
while url:
No need for the trailing comma in the list.
verify=True, cert=None, proxies=None, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses."""
+ hist = [resp] # keep track of history; seed it with the original response
url = self.get_redirect_target(resp)
while url: |
codereview_python_data_2129 | **Default Window Length:** 1
"""
window_length = 1
- missing_value = nan
def _validate(self):
super(PeerCount, self)._validate()
missing value of nan is the default for factors with dtype float64 (which is the default dtype for factors), so we shouldn't need this.
**Default Window Length:** 1
"""
window_length = 1
def _validate(self):
super(PeerCount, self)._validate() |
codereview_python_data_2137 | @mock.patch('pymongo.MongoClient.__init__')
@mock.patch('time.sleep')
def test_connection_error(mock_sleep, mock_client):
- from pymongo.errors import ConnectionFailure
from bigchaindb.backend import connect
# force the driver to trow ConnectionFailure
for code other than bigchaindb we can import at the following the PEP 8 style guide
@mock.patch('pymongo.MongoClient.__init__')
@mock.patch('time.sleep')
def test_connection_error(mock_sleep, mock_client):
from bigchaindb.backend import connect
# force the driver to trow ConnectionFailure |
codereview_python_data_2139 | def test_server_finishes_on_error(self):
"""the server thread exits even if an exception exits the context manager"""
server = Server.basic_response_server()
- try:
with server:
raise Exception()
- except Exception:
- pass
assert len(server.handler_results) == 0
Can we assert that the raise makes it out? `with pytest.raises()` would do that, rather than a `try...except`.
def test_server_finishes_on_error(self):
"""the server thread exits even if an exception exits the context manager"""
server = Server.basic_response_server()
+ with pytest.raises(Exception):
with server:
raise Exception()
assert len(server.handler_results) == 0 |
codereview_python_data_2140 | task_configuration_error('{}.{}'.format(outer, inner))
return None
- if config.evolve_captured:
- parser.error('"evolve_captured" has been removed in favor of the EvolveTask')
- return None
if not (config.location or config.location_cache):
parser.error("Needs either --use-location-cache or --location.")
Can we make this a deprecation warning that lets the bot keep running? I'd like to avoid breaking peoples' configs if possible.
task_configuration_error('{}.{}'.format(outer, inner))
return None
+ if "evolve_captured" in load:
+ logger.warning('The evolve_captured argument is no longer supported. Please use the EvolvePokemon task instead')
if not (config.location or config.location_cache):
parser.error("Needs either --use-location-cache or --location.") |
codereview_python_data_2143 | assert_almost_equal(Q.mean(), 0.0, decimal=1)
- def test_villin_folded(self):
# one folded, one unfolded
f = MDAnalysis.Universe(contacts_villin_folded)
You don't use the self argument. So please remove it and add the `staticmethod` decorator.
assert_almost_equal(Q.mean(), 0.0, decimal=1)
+ @staticmethod
+ def test_villin_folded():
# one folded, one unfolded
f = MDAnalysis.Universe(contacts_villin_folded) |
codereview_python_data_2144 | category_torch_type[category] = to_torch_type[np.dtype(category_tensors[category].dtype())]
if type(category_tensors[category]) is TensorGPU:
if not torch_gpu_device:
- torch.device('cuda', dev_id)
category_device[category] = torch_gpu_device
else:
category_device[category] = torch_cpu_device
```suggestion torch_gpu_device = torch.device('cuda', dev_id) ```
category_torch_type[category] = to_torch_type[np.dtype(category_tensors[category].dtype())]
if type(category_tensors[category]) is TensorGPU:
if not torch_gpu_device:
+ torch_gpu_device = torch.device('cuda', dev_id)
category_device[category] = torch_gpu_device
else:
category_device[category] = torch_cpu_device |
codereview_python_data_2151 | from .dist_tensor import DistTensor
from .partition import partition_graph, load_partition, load_partition_book
from .graph_partition_book import GraphPartitionBook, PartitionPolicy
-from .graph_partition_book import NodePartitionPolicy, EdgePartitionPolicy
from .sparse_emb import SparseAdagrad, DistEmbedding
from .rpc import *
If they are not exposed, remove it.
from .dist_tensor import DistTensor
from .partition import partition_graph, load_partition, load_partition_book
from .graph_partition_book import GraphPartitionBook, PartitionPolicy
from .sparse_emb import SparseAdagrad, DistEmbedding
from .rpc import * |
codereview_python_data_2152 | (initial_finished, initial_inputs) = sampler.initialize(input_tensors)
cell_input = initial_inputs
cell_state = cell.get_initial_state(...)
- for time_step in range(max_output_length):
cell_output, cell_state = cell(cell_input, cell_state)
sample_ids = sampler.sample(time_step, cell_output, cell_state)
(finished, cell_input, cell_state) = sampler.next_inputs(
- time_step, cell_output, cell_state, sample_ids)
if tf.reduce_all(finished):
break
```
Should we prefer using `tf.range` here?
(initial_finished, initial_inputs) = sampler.initialize(input_tensors)
cell_input = initial_inputs
cell_state = cell.get_initial_state(...)
+ for time_step in tf.range(max_output_length):
cell_output, cell_state = cell(cell_input, cell_state)
sample_ids = sampler.sample(time_step, cell_output, cell_state)
(finished, cell_input, cell_state) = sampler.next_inputs(
+ time_step, cell_output, cell_state, sample_ids)
if tf.reduce_all(finished):
break
``` |
codereview_python_data_2153 | def _when(self, entry, next_time_to_run, mktime=time.mktime):
adjust = self.adjust
- return (mktime(entry._default_now().timetuple()) +
(adjust(next_time_to_run) or 0))
def populate_heap(self, event_t=event_t, heapify=heapq.heapify):
This is a protected method. You should probably create instead a public method or getter in `ScheduleEntry`.
def _when(self, entry, next_time_to_run, mktime=time.mktime):
adjust = self.adjust
+ return (mktime(entry.default_now().timetuple()) +
(adjust(next_time_to_run) or 0))
def populate_heap(self, event_t=event_t, heapify=heapq.heapify): |
codereview_python_data_2155 | that accepts exactly one argument (:meth:`~nvidia.dali.types.SampleInfo` objects that
represent the index of the requested sample).
If batch is set to True, the ``source`` can be either a callable, an iterable or a generator function.
- Callable in batch mode must accept exactly one argument - an integer that represents the index of the
- batch within the epoch that the callable should return.
Irrespective of ``batch`` value, callables should produce requested sample or batch solely based on
the SampleInfo instance or index in batch, so that they can be run in parallel in a number of workers.
We didn't add any reference to BatchInfo in the External Source doc? I'm surprised that we missed it, I think it should be added, not sure if here.
that accepts exactly one argument (:meth:`~nvidia.dali.types.SampleInfo` objects that
represent the index of the requested sample).
If batch is set to True, the ``source`` can be either a callable, an iterable or a generator function.
+ Callable in batch mode must accept exactly one argument - either :meth:`~nvidia.dali.types.BatchInfo`
+ instance or an integer (see `batch_info`).
Irrespective of ``batch`` value, callables should produce requested sample or batch solely based on
the SampleInfo instance or index in batch, so that they can be run in parallel in a number of workers. |
codereview_python_data_2158 | self.service_account_key_file = kwargs.get('service_account_key_file')
self.vpc_host_project_id = kwargs.get('vpc_host_project_id')
self.vpc_host_network = kwargs.get('vpc_host_network') or 'default'
- self.vpc_host_subnetwork = kwargs.get('vpc_host_subnetwork') \
- or 'default'
self.config_filename = (kwargs.get('config') or
'forseti-setup-{}.cfg'.format(
self.datetimestamp))
We don't use the backslash for line continuation. Can you please try this? ``` self.vpc_host_subnetwork = ( kwargs.get('vpc_host_subnetwork') or 'default') ```
self.service_account_key_file = kwargs.get('service_account_key_file')
self.vpc_host_project_id = kwargs.get('vpc_host_project_id')
self.vpc_host_network = kwargs.get('vpc_host_network') or 'default'
+ self.vpc_host_subnetwork = (
+ kwargs.get('vpc_host_subnetwork') or 'default')
self.config_filename = (kwargs.get('config') or
'forseti-setup-{}.cfg'.format(
self.datetimestamp)) |
codereview_python_data_2164 | q.title = "Save file to:"
q.text = "Please enter a location for <b>{}</b>".format(
html.escape(url.toDisplayString()))
- q.url = url.toString(QUrl.RemoveUserInfo)
q.mode = usertypes.PromptMode.download
q.completed.connect(q.deleteLater)
q.default = _path_suggestion(suggested_filename)
`url.toString(QUrl.RemovePassword | QUrl.FullyEncoded)` is probably more appropriate here. It's fine to keep the username here, and we should encode special characters for the yanked URL (like `:yank url` does). Same for all other places where you convert a URL to a string.
q.title = "Save file to:"
q.text = "Please enter a location for <b>{}</b>".format(
html.escape(url.toDisplayString()))
+ q.url = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded)
q.mode = usertypes.PromptMode.download
q.completed.connect(q.deleteLater)
q.default = _path_suggestion(suggested_filename) |
codereview_python_data_2165 | # sub-hook.
return hooks[0]
else:
- return super(DelegatingHooks, cls).__new__(cls, hooks)
-
- def __init__(self, hooks):
- self._hooks = hooks
# Implement all interface methods by delegating to corresponding methods on
# input hooks.
we don't want to forward `hooks` in py3, right?
# sub-hook.
return hooks[0]
else:
+ self = super(DelegatingHooks, cls).__new__(cls)
+ self._hooks = hooks
+ return self
# Implement all interface methods by delegating to corresponding methods on
# input hooks. |
codereview_python_data_2171 | class TestTransTable(object):
- Ridx = np.array([0, 0, 2, 2, 1, 1, 3, 3, 1, 2])
- Sidx = np.array([0, 1, 1, 0])
-
@pytest.fixture()
def tt(self):
- return TransTable(10, 4, 2, self.Ridx, self.Sidx)
def test_a2r(self, tt):
for aix, rix in zip(
these aren't used anywhere but the fixture, so can move these into there
class TestTransTable(object):
@pytest.fixture()
def tt(self):
+ Ridx = np.array([0, 0, 2, 2, 1, 1, 3, 3, 1, 2])
+ Sidx = np.array([0, 1, 1, 0])
+ return TransTable(10, 4, 2, Ridx, Sidx)
def test_a2r(self, tt):
for aix, rix in zip( |
codereview_python_data_2172 | return dh
@classmethod
- def from_store(cls, path, basename, key_size, passphrase: str = None):
ca_path = os.path.join(path, basename + "-ca.pem")
if not os.path.exists(ca_path):
key, ca = cls.create_store(path, basename, key_size)
Let's make this argument bytes right away and call `options.cert_passphrase.encode()` when calling it. The earlier we convert this, the more likely we are to not run into encoding issues. This also removes the slightly ugly `None if passphrase is None else`... at least here. :) ```suggestion def from_store(cls, path, basename, key_size, passphrase: typing.Optional[bytes] = None): ```
return dh
@classmethod
+ def from_store(cls, path, basename, key_size, passphrase: typing.Optional[bytes] = None):
ca_path = os.path.join(path, basename + "-ca.pem")
if not os.path.exists(ca_path):
key, ca = cls.create_store(path, basename, key_size) |
codereview_python_data_2173 | <h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
- <p>Details: cache-sea4421-SEA 1645545908 1547545002</p>
<hr>
<p>Varnish cache server</p>
</body>
This is a little bit strange. Shouldn't it transform a tensor to a graph?
<h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
+ <p>Details: cache-sea4477-SEA 1645545908 3705130302</p>
<hr>
<p>Varnish cache server</p>
</body> |
codereview_python_data_2174 | import cheese
def report_cheese(name):
- print("Found cheese: " + name.decode('utf-8'))
cheese.find(report_cheese)
It feels like the caller should do the decoding here.
import cheese
def report_cheese(name):
+ print("Found cheese: " + name)
cheese.find(report_cheese)
+ |
codereview_python_data_2175 | # ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
-"""Base IO classes optimized for pandas on Ray execution."""
from .io import (
ExperimentalPandasOnRayIO,
there's nothing about "experimental" in here
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
+"""Experimental Base IO classes optimized for pandas on Ray execution."""
from .io import (
ExperimentalPandasOnRayIO, |
codereview_python_data_2177 | # Cell
add_docs(TfmdLists,
setup="Transform setup with self",
- decode="From `Pipeline",
- show="From `Pipeline",
overlapping_splits="All splits that are in more than one split",
subset="New `TfmdLists` with same tfms that only includes items in `i`th split",
infer_idx="Finds the index where `self.tfms` can be applied to `x`, depending on the type of `x`",
Looks like two lines are missing closing backticks here
# Cell
add_docs(TfmdLists,
setup="Transform setup with self",
+ decode="From `Pipeline`",
+ show="From `Pipeline`",
overlapping_splits="All splits that are in more than one split",
subset="New `TfmdLists` with same tfms that only includes items in `i`th split",
infer_idx="Finds the index where `self.tfms` can be applied to `x`, depending on the type of `x`", |
codereview_python_data_2178 | }
if fulfillment.type_name == 'threshold-sha-256':
- subfulfillments = [
_fulfillment_to_details(cond['body'])
for cond in fulfillment.subconditions
]
return {
'type': 'threshold-sha-256',
'threshold': fulfillment.threshold,
- 'subfulfillments': subfulfillments,
}
raise UnsupportedTypeError(fulfillment.type_name)
@r-marques The current default limit for threshold is defined at depth 10 here. Any take on if we should limit this? And if so, to what depth?
}
if fulfillment.type_name == 'threshold-sha-256':
+ subconditions = [
_fulfillment_to_details(cond['body'])
for cond in fulfillment.subconditions
]
return {
'type': 'threshold-sha-256',
'threshold': fulfillment.threshold,
+ 'subconditions': subconditions,
}
raise UnsupportedTypeError(fulfillment.type_name) |
codereview_python_data_2183 | schema, new_scalar, catenate=False)
if needs_recreate:
- cond = dbops.EnumExists(type_name)
self.pgops.add(
- dbops.DropEnum(name=type_name, conditions=[cond]))
self.pgops.add(dbops.CreateEnum(
dbops.Enum(name=type_name, values=new_enum_values)))
Why is the condition necessary?
schema, new_scalar, catenate=False)
if needs_recreate:
self.pgops.add(
+ dbops.DropEnum(name=type_name))
self.pgops.add(dbops.CreateEnum(
dbops.Enum(name=type_name, values=new_enum_values))) |
codereview_python_data_2186 | return src_type.is_float and src_type.rank <= dst_type.rank
return False
-def best_match(arg_types, functions, pos=None, env=None, args=None,
- validate_types_fully=False):
"""
Given a list args of arguments and a list of functions, choose one
to call which seems to be the "best" fit for this list of arguments.
Hmm. Do we need this flag? I'd rather not add a "do your job" and a "really do your job" kind of behavioural distinction. Comparing argument types doesn't seem like a bad idea in general.
return src_type.is_float and src_type.rank <= dst_type.rank
return False
+def best_match(arg_types, functions, pos=None, env=None, args=None):
"""
Given a list args of arguments and a list of functions, choose one
to call which seems to be the "best" fit for this list of arguments. |
codereview_python_data_2187 | KeyError: 'foo'
"""
- def __init__(self, cache=None):
if cache is not None:
self._cache = cache
else:
self._cache = {}
- def get(self, key, dt, cleanup=basic_cleanup):
"""Get the value of a cached object.
Parameters
I would probably make this a parameter to `ExpiringCache` instead of to `get`.
KeyError: 'foo'
"""
+ def __init__(self, cache=None, cleanup=lambda value_to_clean: None):
if cache is not None:
self._cache = cache
else:
self._cache = {}
+ self.cleanup = cleanup
+
+ def get(self, key, dt):
"""Get the value of a cached object.
Parameters |
codereview_python_data_2189 | assert self._host is None
return
- if not utils.raises(ValueError, ipaddress.IPv6Address, parsed.netloc[1:-1]):
# Using QUrl parsing to minimize ipv6 addresses
url = QUrl()
- url.setHost(parsed.hostname)
self._host = url.host()
return
I think you could remove this, and instead raise a `ParseError` with `url.errorString()` if `url.isValid()` is false after setting the host. However, you should probably only do this `if parsed.netloc.startswith('['):` which kinda mirrors the Chromium code.
assert self._host is None
return
+ if parsed.netloc.startswith('['):
# Using QUrl parsing to minimize ipv6 addresses
url = QUrl()
+ url.setHost("[" + parsed.hostname + "]")
+ if url.host() == "":
+ raise ParseError("Invalid IPv6 URL"+parsed.hostname)
self._host = url.host()
return |
codereview_python_data_2210 | """
-from __future__ import print_function
-
-from Bio._py3k import _as_string
-
class SwissProtParserError(ValueError):
"""An error occurred while parsing a SwissProt file."""
This won't work yet: ``` ====================================================================== ERROR: test_AlignIO ---------------------------------------------------------------------- Traceback (most recent call last): File "run_tests.py", line 336, in runTest suite = loader.loadTestsFromName(name) File "C:\Py\lib\unittest\loader.py", line 91, in loadTestsFromName module = __import__('.'.join(parts_copy)) File "C:\projects\biopython\Tests\test_AlignIO.py", line 14, in <module> from Bio import SeqIO File "C:\projects\biopython\build\lib.win32-2.7\Bio\SeqIO\__init__.py", line 406, in <module> from . import SwissIO File "C:\projects\biopython\build\lib.win32-2.7\Bio\SeqIO\SwissIO.py", line 24, in <module> from Bio import SwissProt File "C:\projects\biopython\build\lib.win32-2.7\Bio\SwissProt\__init__.py", line 27 def __init__(self, *args, line=None): ^ SyntaxError: invalid syntax ====================================================================== ERROR: test_AlignIO_MauveIO ```
"""
class SwissProtParserError(ValueError):
"""An error occurred while parsing a SwissProt file.""" |
codereview_python_data_2211 | class CorpusTag(Model):
"""Corpus Tags for use in cross-pollination."""
tag = StringProperty()
- fuzz_target = StringProperty()
def coverage_information_date_to_string(date):
Nit: fuzz_target_name for consistency with other data types (like FuzzTargetJob).
class CorpusTag(Model):
"""Corpus Tags for use in cross-pollination."""
tag = StringProperty()
+ fuzz_target_name = StringProperty()
def coverage_information_date_to_string(date): |
codereview_python_data_2217 | device = d.split(':')[1]
break
break
- except OSError:
- pass
return device
def set_hostname_record(self, hostname):
Any value in logging? It isn't clear to me what would happen that we are now ignoring.
device = d.split(':')[1]
break
break
+ except OSError as oe:
+ logger.warn('Could not obtain device for IDE port {0}: {1}', port_id, ustr(oe))
return device
def set_hostname_record(self, hostname): |
codereview_python_data_2218 | @staticmethod
def apply_stealth(executor, code):
- if executor not in ['shellcode_amd64', 'shellcode_386']:
- options = dict(windows=lambda c: obfuscate_ps1(c),
- darwin=lambda c: obfuscate_bash(c),
- linux=lambda c: obfuscate_bash(c))
- return options[executor](code)
- return code
@staticmethod
def decode_bytes(s):
I don't like the hard-coded executor names here ^^
@staticmethod
def apply_stealth(executor, code):
+ options = dict(windows=lambda c: obfuscate_ps1(c),
+ darwin=lambda c: obfuscate_bash(c),
+ linux=lambda c: obfuscate_bash(c))
+ return options[executor](code)
@staticmethod
def decode_bytes(s): |
codereview_python_data_2219 | # Wait a maximum of MIN_SECONDS_ALLOWED_FOR_CELL_CHECK seconds before requesting nearby cells
if (seconds_since_last_check < self.MIN_SECONDS_ALLOWED_FOR_CELL_CHECK):
# Sleep a bit longer for the Pokemon to appear
self._log('Waiting for the Pokemon to appear...')
Check your intendation: the statement as it is will sleep unconditionally!
# Wait a maximum of MIN_SECONDS_ALLOWED_FOR_CELL_CHECK seconds before requesting nearby cells
if (seconds_since_last_check < self.MIN_SECONDS_ALLOWED_FOR_CELL_CHECK):
+ time.sleep(self.MIN_SECONDS_ALLOWED_FOR_CELL_CHECK - seconds_since_last_check)
# Sleep a bit longer for the Pokemon to appear
self._log('Waiting for the Pokemon to appear...') |
codereview_python_data_2223 | if context.client_context:
headers["X-Amz-Client-Context"] = context.client_context
- def event_serializer(o):
- if isinstance(o, datetime):
- return o.isoformat()
-
- data = json.dumps(event, default=event_serializer) if isinstance(event, dict) else str(event)
LOG.debug("Forwarding Lambda invocation to LAMBDA_FORWARD_URL: %s" % config.LAMBDA_FORWARD_URL)
result = safe_requests.post(url, data, headers=headers)
content = run_safe(lambda: to_str(result.content)) or result.content
nit: we could use `json_safe(..)` from `common.py` here: ``` data = json.dumps(json_safe(event)) ``` (It uses `"%Y-%m-%dT%H:%M:%SZ"` as the date format, if that works for us here).
if context.client_context:
headers["X-Amz-Client-Context"] = context.client_context
+ data = json.dumps(json_safe(event)) if isinstance(event, dict) else str(event)
LOG.debug("Forwarding Lambda invocation to LAMBDA_FORWARD_URL: %s" % config.LAMBDA_FORWARD_URL)
result = safe_requests.post(url, data, headers=headers)
content = run_safe(lambda: to_str(result.content)) or result.content |
codereview_python_data_2224 | Args:
index: The index of the tab to get a size hint for.
ellipsis: Whether to use ellipsis to calculate width
- instead of the tab's text.
- Forced to false for pinned tabs.
Return:
A QSize of the smallest tab size we can make.
"""
`false` -> `False` - and while you're at it, mind indenting this and the previous line a space more? :wink:
Args:
index: The index of the tab to get a size hint for.
ellipsis: Whether to use ellipsis to calculate width
+ instead of the tab's text.
+ Forced to False for pinned tabs.
Return:
A QSize of the smallest tab size we can make.
""" |
codereview_python_data_2228 | window = self._tabbed_browser.window()
if window.isFullScreen():
- window.setWindowState(window._state_before_fullscreen & ~Qt.WindowFullScreen)
else:
- window._state_before_fullscreen = window.windowState()
window.showFullScreen()
Make this a public attribute (remove the leading `_`) if you want to access it from the outside.
window = self._tabbed_browser.window()
if window.isFullScreen():
+ window.setWindowState(window.state_before_fullscreen & ~Qt.WindowFullScreen)
else:
+ window.state_before_fullscreen = window.windowState()
window.showFullScreen()
+ log.misc.debug('state before fullscreen: {}'
+ .format(debug.qflags_key(Qt, window.state_before_fullscreen))) |
codereview_python_data_2239 | elif len(transform_or_transforms.get_shape()) == 2:
transforms = transform_or_transforms
else:
raise ValueError(
"Transforms should have rank 1 or 2, but got rank %d"
- % len(transform_or_transforms.get_shape()))
# Invert transformations
transforms = flat_transforms_to_matrices(transforms=transforms)
ditto ```suggestion raise ValueError( "transforms should have rank 1 or 2, but got rank %d" % len(transform_or_transforms.get_shape())) ```
elif len(transform_or_transforms.get_shape()) == 2:
transforms = transform_or_transforms
else:
+ transforms = transform_or_transforms
raise ValueError(
"Transforms should have rank 1 or 2, but got rank %d"
+ % len(transforms.get_shape()))
# Invert transformations
transforms = flat_transforms_to_matrices(transforms=transforms) |
codereview_python_data_2245 | return url
def check_forms_can_be_destroyed(self, tab):
# Check for user modified fields in a single tab
confirm_quit = config.get('ui', 'confirm-quit')
if tab.isModified() and 'forms' in confirm_quit:
You should add a docstring explaining what this does and what the tab argument and return value mean.
return url
def check_forms_can_be_destroyed(self, tab):
+ """If needed ask user for confirmation to close a tab
+
+ Args:
+ tab: The QWebView to be closed.
+
+ Return:
+ True if tab can be destroyed and false otherwise.
+ """
# Check for user modified fields in a single tab
confirm_quit = config.get('ui', 'confirm-quit')
if tab.isModified() and 'forms' in confirm_quit: |
codereview_python_data_2248 | shutil.copy(whl, docker_build_dir / whl.name)
subprocess.check_call([
"docker",
- "buildx"
"build",
"--platform linux/amd64,linux/arm64,darwin/amd64,darwin/arm64",
"--tag", be.docker_tag,
You've missed the comma separator here. Subprocess gladly issues the command without erroring out since the new PEP guidelines concatenate two strings adjacent to each other. The CI fails since it ran `docker buildxbuild` instead of `docker buildx build` specified in the Docker docs.
shutil.copy(whl, docker_build_dir / whl.name)
subprocess.check_call([
"docker",
+ "buildx",
"build",
"--platform linux/amd64,linux/arm64,darwin/amd64,darwin/arm64",
"--tag", be.docker_tag, |
codereview_python_data_2252 | if k is None:
nodes = G
else:
- nodes = seed.sample(sorted(G.nodes()), k)
for s in nodes:
# single source shortest paths
if weight is None: # use BFS
Unfortunately, nodes are not sortable in general.Using `sorted` should be fine in tests where we know what the nodes are, but in the package code itself we should use `list`.
if k is None:
nodes = G
else:
+ nodes = seed.sample(list(G.nodes()), k)
for s in nodes:
# single source shortest paths
if weight is None: # use BFS |
codereview_python_data_2253 | fnames = test_utils.filter_files(data_dir, data_extension)
nfiles = len(fnames)
- for i in range(len(fnames), 10): # At leat 10 elements
fnames.append(fnames[-1])
nfiles = len(fnames)
_input_epoch = [
Could you add `TODO: remove` here? I'll remove these 3 lines with my `more_audio_data` PR
fnames = test_utils.filter_files(data_dir, data_extension)
nfiles = len(fnames)
+ # TODO(janton): Workaround for audio data (not enough samples)
+ # To be removed when more audio samples are added
+ for i in range(len(fnames), 10): # At least 10 elements
fnames.append(fnames[-1])
nfiles = len(fnames)
_input_epoch = [ |
codereview_python_data_2260 | def test_restarter_can_initialize_after_pool_restart(txnPoolNodeSet):
'''
- 1. Schedule restart after restart_timeout seconds
- 2. Add restart schedule message to ActionLog
- 3. Add start restart message to ActionLog
- 4. Check that Restarter can be create (emulate case after node restart).
'''
unow = datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
restarted_node = txnPoolNodeSet[-1]
Seems like after test simplification this item is obsolete?
def test_restarter_can_initialize_after_pool_restart(txnPoolNodeSet):
'''
+ 1. Add restart schedule message to ActionLog
+ 2. Add start restart message to ActionLog
+ 3. Check that Restarter can be create (emulate case after node restart).
'''
unow = datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
restarted_node = txnPoolNodeSet[-1] |
codereview_python_data_2266 | c = unique_atoms[mask]
positions[mask] = mdamath.make_whole(c, inplace=False)
# Apply reference shift if required:
if ref == 'com':
masses = c.masses
total_mass = masses.sum()
Could you check the indentation here. From the diff it looks like you removed the `if` statement but did not remove the indentation on the block.
c = unique_atoms[mask]
positions[mask] = mdamath.make_whole(c, inplace=False)
# Apply reference shift if required:
+ if reference is not None:
if ref == 'com':
masses = c.masses
total_mass = masses.sum() |
codereview_python_data_2271 | self.assertAlmostEqual(evals_result['valid_0']['binary_logloss'][-1], ret, places=5)
params['feature_fraction'] = 0.5
gbm2 = lgb.train(params, lgb_train,
- num_boost_round=25,
- valid_sets=lgb_eval,
- verbose_eval=False,
- evals_result=evals_result)
ret2 = log_loss(y_test, gbm2.predict(X_test))
self.assertNotEqual(ret, ret2)
Seems that valid set is not used in the further code.
self.assertAlmostEqual(evals_result['valid_0']['binary_logloss'][-1], ret, places=5)
params['feature_fraction'] = 0.5
gbm2 = lgb.train(params, lgb_train,
+ num_boost_round=25)
ret2 = log_loss(y_test, gbm2.predict(X_test))
self.assertNotEqual(ret, ret2) |
codereview_python_data_2275 | # if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
- if rescale and not isinstance(scale_factors[0], float):
- scale_factors = det_bboxes.new_tensor(scale_factors)
-
det_bboxes = det_bboxes[..., :4]
if rescale:
- det_bboxes *= scale_factors.unsqueeze(1)
batch_index = torch.arange(
det_bboxes.size(0), device=det_bboxes.device).float().view(
Put the logic of rescale together for a more clear logic. For example ```python det_bboxes = det_bboxes[..., :4] if rescale: if not isinstance(scale_factors[0], float): scale_factors = det_bboxes.new_tensor(scale_factors) det_bboxes = det_bboxes * scale_factor.unsqueeze(-1) ```
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
det_bboxes = det_bboxes[..., :4]
if rescale:
+ if not isinstance(scale_factors[0], float):
+ scale_factors = det_bboxes.new_tensor(scale_factors)
+ det_bboxes = det_bboxes * scale_factors.unsqueeze(1)
batch_index = torch.arange(
det_bboxes.size(0), device=det_bboxes.device).float().view( |
codereview_python_data_2281 | elif archive_type in (ArchiveType.TAR, ArchiveType.TAR_LZMA):
if archive_type == ArchiveType.TAR_LZMA:
- # Import lzma here so that if lzma installation fails (as it may on
- # Windows), other archives can still be opened.
- # TODO(metzman): Determine if this actually fails on Windows and move this
- # to the top of the file if it doesn't.
- from backports import lzma
-
lzma_file = lzma.LZMAFile(archive_path)
tar_archive = tarfile.open(fileobj=lzma_file)
else:
Remove this, as i am changing this in another CL, otherwise your cl will conflict.
elif archive_type in (ArchiveType.TAR, ArchiveType.TAR_LZMA):
if archive_type == ArchiveType.TAR_LZMA:
lzma_file = lzma.LZMAFile(archive_path)
tar_archive = tarfile.open(fileobj=lzma_file)
else: |
codereview_python_data_2289 | # Hidrophobicity
-# 1 Kyte & Doolittle index of hydrophobicity
# J. Mol. Biol. 157:105-132(1982).
# "KyteDoolittle"
kd = {"A": 1.8, "R": -4.5, "N": -3.5, "D": -3.5, "C": 2.5,
Are the numbers (1 to 27) meaningful? If you can avoid this is will make any future additions easier. Also perhaps do these in key order?
# Hidrophobicity
+# Kyte & Doolittle index of hydrophobicity
# J. Mol. Biol. 157:105-132(1982).
# "KyteDoolittle"
kd = {"A": 1.8, "R": -4.5, "N": -3.5, "D": -3.5, "C": 2.5, |
codereview_python_data_2294 | # model settings
model = dict(
- type='MaskRCNN',
pretrained='open-mmlab://resnet101_caffe',
backbone=dict(
type='ResNet',
Add a new detector `MaskScoringRCNN`.
# model settings
model = dict(
+ type='MaskScoringRCNN',
pretrained='open-mmlab://resnet101_caffe',
backbone=dict(
type='ResNet', |
codereview_python_data_2307 | quit_texts.append("{} {} open.".format(
tab_count, "tab is" if tab_count == 1 else "tabs are"))
# Ask if pinned-tabs are open
- if 'pinned-tabs' in config.val.confirm_quit and has_pinned:
quit_texts.append("{} {} pinned.".format(
- tab_count, "tab is" if tab_count == 1 else "tabs are"))
# Ask if multiple downloads running
if 'downloads' in config.val.confirm_quit and download_count > 0:
quit_texts.append("{} {} running.".format(
`tab_count` is the total number of tabs, right? So if I have 1 out of 5 tabs pinned, this will print "5 tabs are pinned" instead of "1 tab is pinned".
quit_texts.append("{} {} open.".format(
tab_count, "tab is" if tab_count == 1 else "tabs are"))
# Ask if pinned-tabs are open
+ if 'pinned-tabs' in config.val.confirm_quit and any(pinned_values):
+ pinned_count = len(pinned_values)
quit_texts.append("{} {} pinned.".format(
+ pinned_count), "tab is" if pinned_count == 1 else "tabs are")
# Ask if multiple downloads running
if 'downloads' in config.val.confirm_quit and download_count > 0:
quit_texts.append("{} {} running.".format( |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.