ast_errors stringlengths 0 3.2k | d_id int64 44 121k | id int64 70 338k | n_whitespaces int64 3 14k | path stringlengths 8 134 | n_words int64 4 4.82k | n_identifiers int64 1 131 | random_cut stringlengths 16 15.8k | commit_message stringlengths 2 15.3k | fun_name stringlengths 1 84 | commit_id stringlengths 40 40 | repo stringlengths 3 28 | file_name stringlengths 5 79 | ast_levels int64 6 31 | nloc int64 1 548 | url stringlengths 31 59 | complexity int64 1 66 | token_counts int64 6 2.13k | n_ast_errors int64 0 28 | vocab_size int64 4 1.11k | n_ast_nodes int64 15 19.2k | language stringclasses 1
value | documentation dict | code stringlengths 101 62.2k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13,236 | 63,296 | 207 | .venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py | 141 | 30 | def replaceHTMLEntity(t):
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
"Comment of the form ``/* ... */``"
htmlComment =... | upd; format | replaceHTMLEntity | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | pyparsing.py | 21 | 2 | https://github.com/jindongwang/transferlearning.git | 1 | 15 | 0 | 91 | 347 | Python | {
"docstring": "Helper parser action to replace common HTML entities with their special characters(Deprecated) Predefined expression of 1 or more printable words or\nquoted strings, separated by commas.\n\nThis expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`.\n",
"language": "en"... | def replaceHTMLEntity(t):
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
"Comment of the form ``/* ... */``"
htmlComment =... | |
1,072 | 6,800 | 81 | tests/integration_tests/utils.py | 29 | 19 | def read_csv_with_nan(path, nan_percent=0.0):
df = pd.read_csv(path)
if nan_percent > 0:
num_rows = len(df)
for col in df.columns:
for row in random.sampl | Adds regression tests for #2020 (#2021)
* fixes nans in dask df engine
* adds tests
* fixes with logs
* fixes
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* cleanup
* checking accuracy closeness
* investigates ray batcher dropping sample... | read_csv_with_nan | 9ae57a93ee4011c3d56cb0284ec07a861f3c88ff | ludwig | utils.py | 16 | 8 | https://github.com/ludwig-ai/ludwig.git | 4 | 76 | 0 | 24 | 116 | Python | {
"docstring": "Converts `nan_percent` of samples in each row of the CSV at `path` to NaNs.",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 13
} | def read_csv_with_nan(path, nan_percent=0.0):
df = pd.read_csv(path)
if nan_percent > 0:
num_rows = len(df)
for col in df.columns:
for row in random.sample(range(num_rows), int(round(nan_percent * num_rows))):
df[col].iloc[row] = np.nan
return df
| |
10,203 | 50,719 | 586 | modules/image/text_to_image/stable_diffusion/diffusers/models/resnet.py | 219 | 36 | def _upsample_2d(self, x, w=None, k=None, factor=2, gain=1):
assert isinstance(factor, int) and factor >= 1
# Setup filter kernel.
if k is None:
k = [1] * factor
# setup kernel
k = np.asarray(k, dtype=np.float32)
if k.ndim == 1:
k = np.... | Add stable diffusion module | _upsample_2d | a6790a651a12eb391060e533868bf0ba197f6f7e | PaddleHub | resnet.py | 18 | 33 | https://github.com/PaddlePaddle/PaddleHub.git | 6 | 434 | 0 | 112 | 670 | Python | {
"docstring": "Fused `upsample_2d()` followed by `Conv2d()`.\n\n Args:\n Padding is performed only once at the beginning, not between the operations. The fused op is considerably more\n efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitra... | def _upsample_2d(self, x, w=None, k=None, factor=2, gain=1):
assert isinstance(factor, int) and factor >= 1
# Setup filter kernel.
if k is None:
k = [1] * factor
# setup kernel
k = np.asarray(k, dtype=np.float32)
if k.ndim == 1:
k = np.... | |
79,255 | 267,980 | 44 | test/lib/ansible_test/_internal/docker_util.py | 12 | 8 | def get_network_names(self) -> t.Optional[t.List[str]]:
i | ansible-test - Use more native type hints. (#78435)
* ansible-test - Use more native type hints.
Simple search and replace to switch from comments to native type hints for return types of functions with no arguments.
* ansible-test - Use more native type hints.
Conversion of simple single-line function annota... | get_network_names | 3eb0485dd92c88cc92152d3656d94492db44b183 | ansible | docker_util.py | 8 | 5 | https://github.com/ansible/ansible.git | 2 | 34 | 0 | 11 | 55 | Python | {
"docstring": "Return a list of the network names the container is attached to.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 11
} | def get_network_names(self) -> t.Optional[t.List[str]]:
if self.networks is None:
return None
return sorted(self.networks)
| |
14,413 | 67,035 | 16 | erpnext/projects/report/project_wise_stock_tracking/project_wise_stock_tracking.py | 31 | 12 | def get_delivered_items_cost():
dn_items = frappe.db.sql(
,
as_dict=1,
)
si_items = frappe.db.sql(
,
as_dict=1,
)
dn_item_map = {}
for item in dn_items:
dn_item_map.setdefault(item.project, item.amount)
for item in si_items: | style: format code with black | get_delivered_items_cost | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | project_wise_stock_tracking.py | 10 | 22 | https://github.com/frappe/erpnext.git | 3 | 74 | 0 | 19 | 116 | Python | {
"docstring": "select dn.project, sum(dn_item.base_net_amount) as amount\n\t\tfrom `tabDelivery Note` dn, `tabDelivery Note Item` dn_item\n\t\twhere dn.name = dn_item.parent and dn.docstatus = 1 and ifnull(dn.project, '') != ''\n\t\tgroup by dn.projectselect si.project, sum(si_item.base_net_amount) as amount\n\t\tfr... | def get_delivered_items_cost():
dn_items = frappe.db.sql(
,
as_dict=1,
)
si_items = frappe.db.sql(
,
as_dict=1,
)
dn_item_map = {}
for item in dn_items:
dn_item_map.setdefault(item.project, item.amount)
for item in si_items:
dn_item_map.setdefault(item.project, item.amount)
return dn_item_map
| |
49,709 | 200,574 | 508 | sympy/tensor/tensor.py | 186 | 61 | def __new__(cls, *args, **kw_args):
is_canon_bp = kw_args.get('is_canon_bp', False)
args = list(map(_sympify, args))
free = [get_free_indices(arg) for arg in args]
free = set(itertools.chain(*free)) #flatten free
newargs = []
for arg in args:
dum_thi... | move dummy index deduping to TensMul.__new__
Also removed _eval_subs and _xreplace. All tests pass. | __new__ | 6c55ca197b0f795047d8f8ee0d871ab36600d560 | sympy | tensor.py | 13 | 37 | https://github.com/sympy/sympy.git | 10 | 348 | 0 | 107 | 648 | Python | {
"docstring": "\n If the internal dummy indices in one arg conflict with the free indices of the remaining args, we need to rename those internal dummy indices.\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 25,
"vocab_size": 20
} | def __new__(cls, *args, **kw_args):
is_canon_bp = kw_args.get('is_canon_bp', False)
args = list(map(_sympify, args))
free = [get_free_indices(arg) for arg in args]
free = set(itertools.chain(*free)) #flatten free
newargs = []
for arg in args:
dum_thi... | |
19,308 | 96,402 | 178 | tests/sentry/incidents/test_action_handlers.py | 38 | 19 | def test_context_for_crash_rate_alert(self):
status = TriggerStatus.ACTIVE
incident = self.create_incident()
alert_rule = self.create_alert_rule(
aggregate="percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate"
)
alert_rule_trigger = self.cre... | fix(metric_alerts): Make sure critical triggers resolve properly when no action is set on a warning trigger (#31883)
### Problem
If we have an alert set up like:
- Warning: 50. Action: None
- Critical: 100. Action: Slack
Then if we go from critical -> warning state the slack resolve action will fail to fire.
... | test_context_for_crash_rate_alert | 146fba432a32568be7d0b884dae0c39a6c33a11f | sentry | test_action_handlers.py | 11 | 16 | https://github.com/getsentry/sentry.git | 1 | 76 | 0 | 32 | 124 | Python | {
"docstring": "\n Test that ensures the metric name for Crash rate alerts excludes the alias\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 13,
"vocab_size": 12
} | def test_context_for_crash_rate_alert(self):
status = TriggerStatus.ACTIVE
incident = self.create_incident()
alert_rule = self.create_alert_rule(
aggregate="percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate"
)
alert_rule_trigger = self.cre... | |
51,471 | 206,293 | 302 | django/template/loaders/base.py | 49 | 13 | def get_template(self, template_name, skip=None):
tried = []
for origin in self.get_template_sources(template_name):
if skip is n | Refs #33476 -- Reformatted code with Black. | get_template | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | base.py | 14 | 19 | https://github.com/django/django.git | 5 | 98 | 0 | 43 | 155 | Python | {
"docstring": "\n Call self.get_template_sources() and return a Template object for\n the first template matching template_name. If skip is provided, ignore\n template origins in skip. This is used to avoid recursion during\n template extending.\n ",
"language": "en",
"n_whites... | def get_template(self, template_name, skip=None):
tried = []
for origin in self.get_template_sources(template_name):
if skip is not None and origin in skip:
tried.append((origin, "Skipped to avoid recursion"))
continue
try:
... | |
@keras_export("keras.optimizers.experimental.Optimizer", v1=[]) | 81,373 | 275,289 | 103 | keras/optimizers/optimizer_experimental/optimizer.py | 24 | 10 | def from_config(cls, config):
if "learning_rate" in config:
if isinstance(config["learning_rate"], dict):
config["learning_rate"] = learning_rate_schedule.deserialize(
config["learning_rate"]
)
return cls( | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | from_config | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | optimizer.py | 14 | 7 | https://github.com/keras-team/keras.git | 3 | 44 | 1 | 21 | 104 | Python | {
"docstring": "Creates an optimizer from its config.\n\n This method is the reverse of `get_config`, capable of instantiating the\n same optimizer from the config dictionary.\n\n Args:\n config: A Python dictionary, typically the output of get_config.\n\n Returns:\n ... | def from_config(cls, config):
if "learning_rate" in config:
if isinstance(config["learning_rate"], dict):
config["learning_rate"] = learning_rate_schedule.deserialize(
config["learning_rate"]
)
return cls(**config)
base_optimizer... |
7,726 | 42,764 | 238 | airflow/providers/amazon/aws/log/s3_task_handler.py | 92 | 19 | def close(self):
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
... | Light Refactor and Clean-up AWS Provider (#23907) | close | 595981c8ad3cfeb4ad7a4514d00060e978aa9d81 | airflow | s3_task_handler.py | 12 | 12 | https://github.com/apache/airflow.git | 4 | 93 | 0 | 68 | 158 | Python | {
"docstring": "Close and upload local log file to remote storage S3.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def close(self):
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
... | |
50,242 | 203,167 | 41 | tests/view_tests/views.py | 15 | 10 | def safestring_in_template_exception(request):
template = Template('{% extends "<script>alert(1);</script>" %}')
try:
template.render(Cont | Fixed #33461 -- Escaped template errors in the technical 500 debug page. | safestring_in_template_exception | c5c7a15b09368a58340d3a65ba9d1f1441e92dc8 | django | views.py | 13 | 6 | https://github.com/django/django.git | 2 | 37 | 0 | 15 | 67 | Python | {
"docstring": "\n Trigger an exception in the template machinery which causes a SafeString\n to be inserted as args[0] of the Exception.\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 19,
"vocab_size": 18
} | def safestring_in_template_exception(request):
template = Template('{% extends "<script>alert(1);</script>" %}')
try:
template.render(Context())
except Exception:
return technical_500_response(request, *sys.exc_info())
| |
115,017 | 316,439 | 18 | tests/test_config_entries.py | 9 | 6 | async def test_unique_id_in_progress(hass, manager):
mock_integration(hass, Mo | Search/replace RESULT_TYPE_* by FlowResultType enum (#74642) | test_unique_id_in_progress | 7cd68381f1d4f58930ffd631dfbfc7159d459832 | core | test_config_entries.py | 10 | 17 | https://github.com/home-assistant/core.git | 1 | 127 | 0 | 9 | 45 | Python | {
"docstring": "Test that we abort if there is already a flow in progress with same unique id.",
"language": "en",
"n_whitespaces": 15,
"n_words": 16,
"vocab_size": 16
} | async def test_unique_id_in_progress(hass, manager):
mock_integration(hass, MockModule("comp"))
mock_entity_platform(hass, "config_flow.comp", None)
| |
73,285 | 250,140 | 2,689 | tests/storage/test_event_chain.py | 338 | 55 | def test_simple(self) -> None:
event_factory = self.hs.get_event_builder_factory()
bob = "@creator:test"
alice = "@alice:test"
room_id = "!room:test"
# Ensure that we have a rooms entry so that we generate the chain index.
self.get_success(
self.sto... | Require types in tests.storage. (#14646)
Adds missing type hints to `tests.storage` package
and does not allow untyped definitions. | test_simple | 3ac412b4e2f8c5ba11dc962b8a9d871c1efdce9b | synapse | test_event_chain.py | 17 | 175 | https://github.com/matrix-org/synapse.git | 3 | 808 | 0 | 148 | 1,276 | Python | {
"docstring": "Test that the example in `docs/auth_chain_difference_algorithm.md`\n works.\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 7,
"vocab_size": 7
} | def test_simple(self) -> None:
event_factory = self.hs.get_event_builder_factory()
bob = "@creator:test"
alice = "@alice:test"
room_id = "!room:test"
# Ensure that we have a rooms entry so that we generate the chain index.
self.get_success(
self.sto... | |
94,400 | 295,382 | 273 | homeassistant/components/withings/common.py | 69 | 14 | async def _do_retry(self, func, attempts=3) -> Any:
# pylint: disable=no-self-use
exception = None
for attempt in range(1, attempts + 1):
_LOGGER.debug("Attempt %s of %s", attempt, attempts)
try:
return await func()
except Exception as... | Fix withings race condition for access token (#69107) | _do_retry | ccd5ada3414b8b51835a7a29b2e5c2a70464987f | core | common.py | 14 | 19 | https://github.com/home-assistant/core.git | 4 | 83 | 0 | 56 | 135 | Python | {
"docstring": "Retry a function call.\n\n Withings' API occasionally and incorrectly throws errors. Retrying the call tends to work.\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 17,
"vocab_size": 17
} | async def _do_retry(self, func, attempts=3) -> Any:
# pylint: disable=no-self-use
exception = None
for attempt in range(1, attempts + 1):
_LOGGER.debug("Attempt %s of %s", attempt, attempts)
try:
return await func()
except Exception as... | |
55,526 | 218,881 | 343 | python3.10.4/Lib/lib2to3/refactor.py | 97 | 30 | def refactor_doctest(self, block, lineno, indent, filename):
try:
tree = self.parse_block(block, lineno, indent)
except Exception as err:
if self.logger.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug("Source: %s", line.... | add python 3.10.4 for windows | refactor_doctest | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | refactor.py | 17 | 20 | https://github.com/XX-net/XX-Net.git | 8 | 195 | 0 | 66 | 313 | Python | {
"docstring": "Refactors one doctest.\n\n A doctest is given as a block of lines, the first of which starts\n with \">>>\" (possibly indented), while the remaining lines start\n with \"...\" (identically indented).\n\n ",
"language": "en",
"n_whitespaces": 58,
"n_words": 30,
"voca... | def refactor_doctest(self, block, lineno, indent, filename):
try:
tree = self.parse_block(block, lineno, indent)
except Exception as err:
if self.logger.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug("Source: %s", line.... | |
25,567 | 115,819 | 41 | mindsdb/integrations/handlers/bigquery_handler/bigquery_handler.py | 13 | 8 | def get_tables(self, dataset_id) -> Response:
client = self.connect()
| Add handler | get_tables | cbe6767de6152a78348a8047244e5e3305b24e04 | mindsdb | bigquery_handler.py | 8 | 7 | https://github.com/mindsdb/mindsdb.git | 1 | 27 | 0 | 11 | 46 | Python | {
"docstring": "\n Get a list with all of the tabels in BigQuery\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | def get_tables(self, dataset_id) -> Response:
client = self.connect()
result = client.list_tables(dataset_id)
return result
| |
77,401 | 262,841 | 130 | PyInstaller/depend/dylib.py | 76 | 17 | def mac_set_relative_dylib_deps(libname, distname):
from macholib import util
from macholib.MachO import MachO
# Ignore bootloader; otherwise PyInstaller fails with exception like
# 'ValueError: total_size > low_offset (288 > 0)'
if os.path.basename(libname) in _BOOTLOADER_FNAMES:
ret... | Fix typos (#6782) [skip ci] | mac_set_relative_dylib_deps | 1a7d704ffbabb433007e3ba04750c2f13ade48e5 | pyinstaller | dylib.py | 15 | 21 | https://github.com/pyinstaller/pyinstaller.git | 5 | 141 | 0 | 61 | 132 | Python | {
"docstring": "\n On Mac OS set relative paths to dynamic library dependencies of `libname`.\n\n Relative paths allow to avoid using environment variable DYLD_LIBRARY_PATH. There are known some issues with\n DYLD_LIBRARY_PATH. Relative paths is more flexible mechanism.\n\n Current location of dependent l... | def mac_set_relative_dylib_deps(libname, distname):
from macholib import util
from macholib.MachO import MachO
# Ignore bootloader; otherwise PyInstaller fails with exception like
# 'ValueError: total_size > low_offset (288 > 0)'
if os.path.basename(libname) in _BOOTLOADER_FNAMES:
ret... | |
81,643 | 276,381 | 90 | keras/testing_infra/test_utils.py | 23 | 9 | def get_v2_optimizer(name, **kwargs):
try:
return _V2_OPTIMIZER_MAP[name](**kwargs)
except KeyError:
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | get_v2_optimizer | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | test_utils.py | 16 | 9 | https://github.com/keras-team/keras.git | 2 | 42 | 0 | 22 | 73 | Python | {
"docstring": "Get the v2 optimizer requested.\n\n This is only necessary until v2 are the default, as we are testing in Eager,\n and Eager + v1 optimizers fail tests. When we are in v2, the strings alone\n should be sufficient, and this mapping can theoretically be removed.\n\n Args:\n name: string... | def get_v2_optimizer(name, **kwargs):
try:
return _V2_OPTIMIZER_MAP[name](**kwargs)
except KeyError:
raise ValueError(
"Could not find requested v2 optimizer: {}\nValid choices: {}".format(
name, list(_V2_OPTIMIZER_MAP.keys())
)
)
| |
55,366 | 218,525 | 197 | python3.10.4/Lib/ipaddress.py | 52 | 14 | def _prefix_from_ip_int(cls, ip_int):
trailing_zeroes = _count_righthand_zero_bits(ip_int,
cls._max_prefixlen)
prefixlen = cls._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
| add python 3.10.4 for windows | _prefix_from_ip_int | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | ipaddress.py | 11 | 12 | https://github.com/XX-net/XX-Net.git | 2 | 74 | 0 | 40 | 120 | Python | {
"docstring": "Return prefix length from the bitwise netmask.\n\n Args:\n ip_int: An integer, the netmask in expanded bitwise format\n\n Returns:\n An integer, the prefix length.\n\n Raises:\n ValueError: If the input intermingles zeroes & ones\n ",
"lan... | def _prefix_from_ip_int(cls, ip_int):
trailing_zeroes = _count_righthand_zero_bits(ip_int,
cls._max_prefixlen)
prefixlen = cls._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
all_ones = (1 << prefixle... | |
24,361 | 111,228 | 102 | spacy/pipeline/entity_linker.py | 21 | 11 | def batch_has_learnable_example(self, examples):
for eg in examples:
for ent in eg.predicted.ents:
candidates = list(self.get_candidates(self.kb, ent))
if candidates:
return True
return False
| Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a N... | batch_has_learnable_example | 91acc3ea75d219ad07ed2b106e7b8bdcb01516dd | spaCy | entity_linker.py | 15 | 7 | https://github.com/explosion/spaCy.git | 4 | 44 | 0 | 18 | 69 | Python | {
"docstring": "Check if a batch contains a learnable example.\n\n If one isn't present, then the update step needs to be skipped.\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 20,
"vocab_size": 19
} | def batch_has_learnable_example(self, examples):
for eg in examples:
for ent in eg.predicted.ents:
candidates = list(self.get_candidates(self.kb, ent))
if candidates:
return True
return False
| |
17,165 | 81,164 | 56 | awx/main/tasks/callback.py | 17 | 8 | def get_delayed_update_fields(self):
self.extra_update_fields['emitted_events'] = self.event_ct
if 'got an unexpected keyword argument' in self.extra_update_fields.get('result_traceback', ''):
self.delay_up | Delay update of artifacts and error fields until final job save (#11832)
* Delay update of artifacts until final job save
Save tracebacks from receptor module to callback object
Move receptor traceback check up to be more logical
Use new mock_me fixture to avoid DB call with me method
Update the special ru... | get_delayed_update_fields | 452744b67e02823879e722fe574984a2d760ed60 | awx | callback.py | 10 | 5 | https://github.com/ansible/awx.git | 2 | 42 | 0 | 17 | 76 | Python | {
"docstring": "Return finalized dict of all fields that should be saved along with the job status change",
"language": "en",
"n_whitespaces": 15,
"n_words": 16,
"vocab_size": 16
} | def get_delayed_update_fields(self):
self.extra_update_fields['emitted_events'] = self.event_ct
if 'got an unexpected keyword argument' in self.extra_update_fields.get('result_traceback', ''):
self.delay_update(result_traceback=ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE)
return sel... | |
15,508 | 70,442 | 89 | wagtail/search/tests/test_indexed_class.py | 20 | 7 | def get_checks_result(warning_id=None):
checks_result = checks.run_checks()
if warning_id:
return [
warning for warning in
| add check for correct search_fields on pages
- fixes #4940 | get_checks_result | d964675ee8fcb7ea58681ac8869733a86d58e4ec | wagtail | test_indexed_class.py | 11 | 7 | https://github.com/wagtail/wagtail.git | 4 | 34 | 0 | 15 | 56 | Python | {
"docstring": "Run Django checks on any with the 'search' tag used when registering the check",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 13
} | def get_checks_result(warning_id=None):
checks_result = checks.run_checks()
if warning_id:
return [
warning for warning in
checks_result if warning.id == warning_id]
return checks_result
| |
40,077 | 167,693 | 16 | pandas/core/config_init.py | 11 | 9 | def use_bottleneck_cb(key) -> None:
| TYP: return values in core/*.py (#47587)
* TYP: return values in core/*.py
* fix test
* to_html
* to_html part 2
* DataFrame.query
* more overloads
* fix query?
* increase stacklevel by one
* fix rename_axis
* and an overload for DataFrame.eval
* address comments
* fix typevar | use_bottleneck_cb | 9612375ca28ade056f15d4338f1bfde5d045c9fc | pandas | config_init.py | 9 | 3 | https://github.com/pandas-dev/pandas.git | 1 | 24 | 0 | 11 | 46 | Python | {
"docstring": "\n: bool\n Use the numexpr library to accelerate computation if it is installed,\n the default is True\n Valid values: False,True\n",
"language": "en",
"n_whitespaces": 28,
"n_words": 20,
"vocab_size": 18
} | def use_bottleneck_cb(key) -> None:
from pandas.core import nanops
nanops.set_use_bottleneck(cf.get_option(key))
use_numexpr_doc =
| |
39,578 | 164,647 | 92 | pandas/tests/io/test_stata.py | 19 | 10 | def test_repeated_column_labels(self, datapath):
# GH 13923, 2577 | TST: Don't use autouse fixture in test_stata (#45831) | test_repeated_column_labels | c055dc4e6be9fc1b68d873a1ace286322dadd5e1 | pandas | test_stata.py | 12 | 16 | https://github.com/pandas-dev/pandas.git | 1 | 40 | 0 | 19 | 73 | Python | {
"docstring": "\nValue labels for column ethnicsn are not unique. These cannot be converted to\npandas categoricals.\n\nEither read the file with `convert_categoricals` set to False or use the\nlow level interface in `StataReader` to separately read the values and the\nvalue_labels.\n\nThe repeated labels are:\\n-+\... | def test_repeated_column_labels(self, datapath):
# GH 13923, 25772
msg =
with pytest.raises(ValueError, match=msg):
read_stata(
datapath("io", "data", "stata", "stata15.dta"),
convert_categoricals=True,
)
| |
53,306 | 212,643 | 190 | PySimpleGUI.py | 70 | 17 | def string_width_in_pixels(cls, font, string):
# if no windows have been created (there is no hidden master root to rely on) then temporarily make a window so the meas | Enable Text class methods to be called prior to any windows being created: string_width_in_pixels, char_height_in_pixels, char_width_in_pixels. Removed destruction of hidden master root from popup_get_file & popup_get_folder (was old code) | string_width_in_pixels | acaae54a1ade24b2e55f7274ae4db747160a38db | PySimpleGUI | PySimpleGUI.py | 13 | 13 | https://github.com/PySimpleGUI/PySimpleGUI.git | 4 | 75 | 0 | 56 | 128 | Python | {
"docstring": "\n Get the with of the supplied string in pixels for the font being passed in.\n If an error occurs, 0 will be returned\n :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike... | def string_width_in_pixels(cls, font, string):
# if no windows have been created (there is no hidden master root to rely on) then temporarily make a window so the measurement can happen
if Window.NumOpenWindows == 0:
root = tk.Tk()
else:
root = None
siz... | |
81,601 | 276,243 | 41 | keras/saving/saving_utils.py | 19 | 7 | def model_call_inputs(model, keep_original_batch_size=False):
input_specs = model.save_spec(dynamic_batch=not keep_original_batch_size)
if input_specs is None:
return None, None
input_specs = | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | model_call_inputs | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | saving_utils.py | 10 | 6 | https://github.com/keras-team/keras.git | 2 | 38 | 0 | 14 | 63 | Python | {
"docstring": "Inspect model to get its input signature.\n\n The model's input signature is a list with a single (possibly-nested) object.\n This is due to the Keras-enforced restriction that tensor inputs must be\n passed in as the first argument.\n\n For example, a model with input {'feature1': <Tensor... | def model_call_inputs(model, keep_original_batch_size=False):
input_specs = model.save_spec(dynamic_batch=not keep_original_batch_size)
if input_specs is None:
return None, None
input_specs = _enforce_names_consistency(input_specs)
return input_specs
| |
42,862 | 178,927 | 595 | nuitka/plugins/standard/DataFileCollectorPlugin.py | 139 | 40 | def _getSubDirectoryFolders(self, module, sub_dirs):
module_dir = module.getCompileTimeDirectory()
file_list = []
data_dirs = [os.path.join(module_dir, subdir) for subdir in sub_dirs]
# Gather the full file list, probably makes no sense to include bytecode files
file_... | Plugins: Massive cleanup of data file handling
* Move data file handling out of standalone only, allowing support
for other modes as well.
* Attach logger and tags to data file objects. | _getSubDirectoryFolders | abfb99b0a05dd76d2ecc6ebc20732a271857c6c8 | Nuitka | DataFileCollectorPlugin.py | 14 | 42 | https://github.com/Nuitka/Nuitka.git | 8 | 232 | 0 | 95 | 376 | Python | {
"docstring": "Get dirnames in given subdirs of the module.\n\n Notes:\n All dirnames in folders below one of the sub_dirs are recursively\n retrieved and returned shortened to begin with the string of subdir.\n Args:\n module: module object\n sub_dirs: sub f... | def _getSubDirectoryFolders(self, module, sub_dirs):
module_dir = module.getCompileTimeDirectory()
file_list = []
data_dirs = [os.path.join(module_dir, subdir) for subdir in sub_dirs]
# Gather the full file list, probably makes no sense to include bytecode files
file_... | |
18,045 | 85,795 | 233 | tests/sentry/api/endpoints/test_organization_metric_data.py | 33 | 23 | def test_orderby_percentile_with_many_fields_one_entity_no_data(self):
for metric in [
TransactionMRI.MEASUREMENTS_FCP.value,
"transaction",
]:
perf_indexer_record(self.organization.id, metric)
response = self.get_success_response(
self.or... | feat(metrics): Support rate for derived metric [TET-129 TET-127] (#38792)
Adds support for operation `rate` to be able to compute performance
related metrics such as tpm, tps, epm, eps
This PR achieves this by:
- Defining rate as a derived operation that produces its own SnQL rather
than trying to compute the da... | test_orderby_percentile_with_many_fields_one_entity_no_data | 35ec251212b82e5d9468062a3ab5945d8e739002 | sentry | test_organization_metric_data.py | 14 | 20 | https://github.com/getsentry/sentry.git | 2 | 94 | 0 | 32 | 181 | Python | {
"docstring": "\n Test that ensures that when metrics data is available then an empty response is returned\n gracefully\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 16,
"vocab_size": 14
} | def test_orderby_percentile_with_many_fields_one_entity_no_data(self):
for metric in [
TransactionMRI.MEASUREMENTS_FCP.value,
"transaction",
]:
perf_indexer_record(self.organization.id, metric)
response = self.get_success_response(
self.or... | |
20,973 | 101,563 | 133 | lib/training/preview_tk.py | 20 | 15 | def _add_save_button(self) -> None:
logger.debug("Adding save button")
button = tk.Button(self,
text="Save",
cursor="hand2",
command=lambda: self.save_var.set(True))
button.pack(side=tk.LEFT)
logger... | Training - Use custom preview pop-out | _add_save_button | 7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5 | faceswap | preview_tk.py | 13 | 9 | https://github.com/deepfakes/faceswap.git | 1 | 61 | 0 | 19 | 104 | Python | {
"docstring": " Add a save button for saving out original preview ",
"language": "en",
"n_whitespaces": 10,
"n_words": 9,
"vocab_size": 9
} | def _add_save_button(self) -> None:
logger.debug("Adding save button")
button = tk.Button(self,
text="Save",
cursor="hand2",
command=lambda: self.save_var.set(True))
button.pack(side=tk.LEFT)
logger... | |
10,804 | 53,433 | 145 | src/prefect/context.py | 38 | 11 | def temporary_environ_defaults(**kwargs):
old_env = os.environ.copy()
try:
for var in kwargs:
# TODO: Consider warning on conflicts
os.environ.setdefault(var, str(kwargs[var]))
yield {var: os.environ[var] for var in kwargs}
finally:
for var in kwargs:
... | Introduce basic profile context management | temporary_environ_defaults | a9e67e2311c1e4a056b9e740cc739360896aab92 | prefect | context.py | 16 | 12 | https://github.com/PrefectHQ/prefect.git | 6 | 92 | 0 | 29 | 147 | Python | {
"docstring": "\n Temporarily override default values in os.environ.\n\n Yields a dictionary of the key/value pairs matching the provided keys.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 17,
"vocab_size": 16
} | def temporary_environ_defaults(**kwargs):
old_env = os.environ.copy()
try:
for var in kwargs:
# TODO: Consider warning on conflicts
os.environ.setdefault(var, str(kwargs[var]))
yield {var: os.environ[var] for var in kwargs}
finally:
for var in kwargs:
... | |
18,790 | 91,706 | 264 | tests/sentry/api/endpoints/test_organization_metric_details.py | 53 | 22 | def test_same_entity_multiple_metric_ids_missing_data(self, mocked_derived_metrics):
mocked_derived_metrics.return_value = MOCKED_DERIVED_METRICS_2
_indexer_record(self.organization.id, "metric_foo_doe")
self.store_sessi | feat(metrics): make indexer more configurable (#35604)
This makes the sentry_metrics indexer more configurable in the following ways, to enable indexing on the ingest-performance-metrics topic:
- configurable input Kafka topic
- configurable output Kafka topic
- configurable model from which to pull index results... | test_same_entity_multiple_metric_ids_missing_data | 7f60db924ea37f34e0cfe6856777239e2a2ffe13 | sentry | test_organization_metric_details.py | 15 | 21 | https://github.com/getsentry/sentry.git | 1 | 97 | 0 | 43 | 169 | Python | {
"docstring": "\n Test when not requested metrics have data in the dataset\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | def test_same_entity_multiple_metric_ids_missing_data(self, mocked_derived_metrics):
mocked_derived_metrics.return_value = MOCKED_DERIVED_METRICS_2
_indexer_record(self.organization.id, "metric_foo_doe")
self.store_session(
self.build_session(
project_id=self... | |
81,784 | 276,932 | 126 | keras/utils/kernelized_utils.py | 62 | 15 | def _align_matrices(x, y):
x_matrix = _to_matrix(x)
y_matrix = _to_matrix(y)
x_shape = x_matrix.shape
y_shape = y_matrix.shape
if y_shape[1] != x_shape[1]: # dimensions do | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _align_matrices | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | kernelized_utils.py | 13 | 13 | https://github.com/keras-team/keras.git | 2 | 104 | 0 | 51 | 176 | Python | {
"docstring": "Aligns x and y tensors to allow computations over pairs of their rows.",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | def _align_matrices(x, y):
x_matrix = _to_matrix(x)
y_matrix = _to_matrix(y)
x_shape = x_matrix.shape
y_shape = y_matrix.shape
if y_shape[1] != x_shape[1]: # dimensions do not match.
raise ValueError(
"The outermost dimensions of the input tensors should match. "
... | |
72,243 | 248,366 | 454 | tests/storage/test_events.py | 131 | 32 | def test_prune_gap_if_dummy_local(self):
body = self.helper.send(self.room_id, body="Test", tok=self.token)
body = self.helper.send_event(
self.room_id, type=EventTypes.Dummy, content={}, tok=self.token
)
local_message_event_id = body["event_id"]
self.asser... | Pull out less state when handling gaps mk2 (#12852) | test_prune_gap_if_dummy_local | b83bc5fab57b37f75a79d02213d6032c586fd36e | synapse | test_events.py | 13 | 27 | https://github.com/matrix-org/synapse.git | 1 | 191 | 0 | 96 | 320 | Python | {
"docstring": "Test that we don't drop extremities after a gap when the previous\n extremity is a local dummy event and points to local events.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 23,
"vocab_size": 21
} | def test_prune_gap_if_dummy_local(self):
body = self.helper.send(self.room_id, body="Test", tok=self.token)
body = self.helper.send_event(
self.room_id, type=EventTypes.Dummy, content={}, tok=self.token
)
local_message_event_id = body["event_id"]
self.asser... | |
79,929 | 269,147 | 64 | keras/saving/saved_model/save_impl.py | 27 | 9 | def _reset_layer_losses(parent_layer):
losses_dict = {}
for layer in utils.list_all_layers_and_sublayers(parent_layer):
losses_dict[layer] = {
'losses': layer._losses[:],
'eager_losses | Support Keras saving/loading for ShardedVariables with arbitrary partitions.
PiperOrigin-RevId: 439837516 | _reset_layer_losses | e61cbc52fd3b0170769c120e9b8dabc8c4205322 | keras | save_impl.py | 12 | 11 | https://github.com/keras-team/keras.git | 2 | 66 | 0 | 22 | 113 | Python | {
"docstring": "Resets losses of layer and its sublayers, and returns original losses.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 10
} | def _reset_layer_losses(parent_layer):
losses_dict = {}
for layer in utils.list_all_layers_and_sublayers(parent_layer):
losses_dict[layer] = {
'losses': layer._losses[:],
'eager_losses': layer._eager_losses[:]
}
with utils.no_automatic_dependency_tracking_scope(layer):
layer._lo... | |
51,815 | 206,971 | 438 | tests/admin_changelist/tests.py | 139 | 40 | def test_result_list_editable_html(self):
new_parent = Parent.objects.create(name="parent")
new_child = Child.objects.create(name="name", parent=new_parent)
request = self.factory.get("/child/")
request.user = self.superuser
m = ChildAdmin(Child, custom_site)
# ... | Refs #33476 -- Reformatted code with Black. | test_result_list_editable_html | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 11 | 34 | https://github.com/django/django.git | 1 | 186 | 0 | 99 | 330 | Python | {
"docstring": "\n Regression tests for #11791: Inclusion tag result_list generates a\n table and this checks that the items are nested within the table\n element tags.\n Also a regression test for #13599, verifies that hidden fields\n when list_editable is enabled are rendered in a... | def test_result_list_editable_html(self):
new_parent = Parent.objects.create(name="parent")
new_child = Child.objects.create(name="name", parent=new_parent)
request = self.factory.get("/child/")
request.user = self.superuser
m = ChildAdmin(Child, custom_site)
# ... | |
16,677 | 77,580 | 337 | wagtail/admin/tests/ui/test_tables.py | 51 | 27 | def test_title_column(self):
root_page = Page.objects.filter(depth=2).first()
blog = Site.objects.create(
| Allow passing arbitrary link attributes to TitleColumn | test_title_column | 5994cc43dfc5cc1ed891ab78eff3a3bcf56f6830 | wagtail | test_tables.py | 15 | 51 | https://github.com/wagtail/wagtail.git | 1 | 136 | 0 | 40 | 223 | Python | {
"docstring": "\n <table class=\"listing\">\n <thead>\n <tr><th>Hostname</th><th>Site name</th></tr>\n </thead>\n <tbody>\n <tr>\n <td class=\"title\">\n <div class=\"title-... | def test_title_column(self):
root_page = Page.objects.filter(depth=2).first()
blog = Site.objects.create(
hostname="blog.example.com", site_name="My blog", root_page=root_page
)
gallery = Site.objects.create(
hostname="gallery.example.com", site_name="My gallery",... | |
6,233 | 34,270 | 149 | src/transformers/models/realm/tokenization_realm.py | 32 | 11 | def _clean_text(self, text):
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append | Add FastTokenizer to REALM (#15211)
* Remove BertTokenizer abstraction
* Add FastTokenizer to REALM
* Fix config archive map
* Fix copies
* Update realm.mdx
* Apply suggestions from code review | _clean_text | 841d979190319098adc8101f9820a02ee3be4c8b | transformers | tokenization_realm.py | 12 | 11 | https://github.com/huggingface/transformers.git | 6 | 65 | 0 | 26 | 112 | Python | {
"docstring": "Performs invalid character removal and whitespace cleanup on text.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def _clean_text(self, text):
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(cha... | |
84,941 | 284,723 | 258 | openbb_terminal/cryptocurrency/crypto_controller.py | 27 | 20 | def call_candle(self, other_args):
if self.symbol:
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="candle",
description=,
)
ns_parser = pa... | refactoring load, changed chart to candle (#1838)
* refactoring load, changed chart to candle
* updating load
* refactor done, missing tests
* fixed chart
* refactor
* linting
* tests failing
* fix minh issues
* auto completion for load
* linting
* Tests : cryptocurrency/controller ; remo... | call_candle | 0e03b9e9e41aaa61cdec5d674a9f2c64ab8d3394 | OpenBBTerminal | crypto_controller.py | 13 | 18 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 3 | 72 | 0 | 23 | 111 | Python | {
"docstring": "Process candle commandDisplay chart for loaded coin. You can specify currency vs which you want\n to show chart and also number of days to get data for.",
"language": "en",
"n_whitespaces": 41,
"n_words": 27,
"vocab_size": 25
} | def call_candle(self, other_args):
if self.symbol:
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="candle",
description=,
)
ns_parser = pa... | |
88,940 | 289,805 | 355 | tests/components/bayesian/test_binary_sensor.py | 72 | 14 | async def test_load_values_when_added_to_hass(hass):
config = {
"binary_sensor": {
| Bayesian - support `unique_id:` (#79879)
* support unique_id
* adds test for unique_ids | test_load_values_when_added_to_hass | fe7402375d2f899a7edd6ac326d2c1998b4c43da | core | test_binary_sensor.py | 14 | 33 | https://github.com/home-assistant/core.git | 1 | 183 | 0 | 55 | 319 | Python | {
"docstring": "Test that sensor initializes with observations of relevant entities.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | async def test_load_values_when_added_to_hass(hass):
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"unique_id": "3b4c9563-5e84-4167-8fe7-8f507e796d72",
"device_class": "connectivity",
"observations": [
... | |
9,308 | 47,940 | 157 | tests/providers/databricks/operators/test_databricks_sql.py | 24 | 16 | def test_copy_with_target_credential(self):
expression = "col1, col2"
op = DatabricksCopyIntoOperator(
file_location=COPY_FILE_LOCATION,
file_format='CSV',
table_name='test',
task_id=TASK_ID,
expression_list=expression,
storage_cred... | Update to the released version of DBSQL connector
Also added additional parameters for further customization of connection
if it's required | test_copy_with_target_credential | 6a3d6cc32b4e3922d259c889460fe82e0ebf3663 | airflow | test_databricks_sql.py | 12 | 18 | https://github.com/apache/airflow.git | 1 | 60 | 0 | 22 | 109 | Python | {
"docstring": "COPY INTO test WITH (CREDENTIAL abc)\nFROM (SELECT {expression} FROM '{COPY_FILE_LOCATION}' WITH (CREDENTIAL (AZURE_SAS_TOKEN = 'abc') ))\nFILEFORMAT = CSV\n",
"language": "en",
"n_whitespaces": 17,
"n_words": 20,
"vocab_size": 16
} | def test_copy_with_target_credential(self):
expression = "col1, col2"
op = DatabricksCopyIntoOperator(
file_location=COPY_FILE_LOCATION,
file_format='CSV',
table_name='test',
task_id=TASK_ID,
expression_list=expression,
storage_cred... | |
12,219 | 60,607 | 189 | .venv/lib/python3.8/site-packages/pip/_internal/commands/debug.py | 58 | 12 | def show_actual_vendor_versions(vendor_txt_versions):
# type: (Dict[str, str]) -> None
for module_name, expected_version in vendor_txt_versions.items():
extra_message = ''
actual_version = get_vendor_version_from_module(module_name)
if not actual_version:
extra_message =... | upd; format | show_actual_vendor_versions | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | debug.py | 15 | 12 | https://github.com/jindongwang/transferlearning.git | 4 | 71 | 0 | 45 | 126 | Python | {
"docstring": "Log the actual version and print extra info if there is\n a conflict or if the actual version could not be imported.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 22,
"vocab_size": 18
} | def show_actual_vendor_versions(vendor_txt_versions):
# type: (Dict[str, str]) -> None
for module_name, expected_version in vendor_txt_versions.items():
extra_message = ''
actual_version = get_vendor_version_from_module(module_name)
if not actual_version:
extra_message =... | |
22,383 | 106,467 | 45 | youtube_dl/utils.py | 26 | 9 | def escape_rfc3986(s):
if sys.version_info < (3, 0) and isinstance(s, compat_str):
s = s.encode('utf- | [utils] Ensure RFC3986 encoding result is unicode | escape_rfc3986 | 556862bc911bb54435b7b0b01451789b884b0390 | youtube-dl | utils.py | 11 | 4 | https://github.com/ytdl-org/youtube-dl.git | 3 | 45 | 0 | 26 | 75 | Python | {
"docstring": "Escape non-ASCII characters as suggested by RFC 3986",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def escape_rfc3986(s):
if sys.version_info < (3, 0) and isinstance(s, compat_str):
s = s.encode('utf-8')
# ensure unicode: after quoting, it can always be converted
return compat_str(compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]"))
| |
83,534 | 281,122 | 158 | gamestonk_terminal/cryptocurrency/due_diligence/dd_controller.py | 24 | 18 | def call_social(self, other_args):
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="social",
description=,
)
ns_parser = parse_known_args_and_warn(
parser, other_ar... | Crypto menu refactor (#1119)
* enabled some crypto commands in dd to be called independent of source loaded
* support for coin_map_df in all dd functions + load ta and plot chart refactor
* updated tests and removed coingecko scrapping where possible
* removed ref of command from hugo
* updated pycoingecko... | call_social | ea964109d654394cc0a5237e6ec5510ba6404097 | OpenBBTerminal | dd_controller.py | 11 | 15 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 2 | 63 | 0 | 21 | 100 | Python | {
"docstring": "Process social commandShows social media corresponding to loaded coin. You can find there name of\n telegram channel, urls to twitter, reddit, bitcointalk, facebook and discord.",
"language": "en",
"n_whitespaces": 35,
"n_words": 25,
"vocab_size": 23
} | def call_social(self, other_args):
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="social",
description=,
)
ns_parser = parse_known_args_and_warn(
parser, other_ar... | |
17,999 | 85,476 | 174 | tests/snuba/api/endpoints/test_project_event_details.py | 36 | 21 | def test_ignores_different_group(self):
url = reverse(
"sentry-api-0-project-event-details",
kwargs={
"event_id": self.next_transaction_event.event_id,
"project_slug": self.next_transaction_event.project.slug,
"organization_slug": ... | feat(perf issues): Return prev/next for transaction events (#38274)
* feat(perf issues): Return prev/next for transaction events | test_ignores_different_group | 6d7681529f68a87e41d4c11a4aa1e6732cb15ade | sentry | test_project_event_details.py | 15 | 14 | https://github.com/getsentry/sentry.git | 1 | 117 | 0 | 32 | 195 | Python | {
"docstring": "Test that a different group's events aren't attributed to the one that was passed",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 13
} | def test_ignores_different_group(self):
url = reverse(
"sentry-api-0-project-event-details",
kwargs={
"event_id": self.next_transaction_event.event_id,
"project_slug": self.next_transaction_event.project.slug,
"organization_slug": ... | |
49,706 | 200,571 | 258 | sympy/tensor/tensor.py | 67 | 26 | def _dedupe_indices(new, exclude, index_structure):
inds_self = set(exclude)
dums_new = set(get_dummy_indices(new))
conflicts = dums_new.intersection(inds_self)
if len(conflicts) == 0:
return None
inds_self.update(dums_new)
self_args_free =... | _dedupe_indices: convert to staticmethod
index_structure is now an additional argument | _dedupe_indices | b5f5ec455e7d003fa214a215475a3fa2407760cc | sympy | tensor.py | 13 | 26 | https://github.com/sympy/sympy.git | 6 | 150 | 0 | 45 | 242 | Python | {
"docstring": "\n exclude: set\n new: TensExpr\n index_structure: _IndexStructure (required to generate new dummy indices)\n\n If ``new`` has any dummy indices that are in ``exclude``, return a version\n of new with those indices replaced. If no replacements are needed,\n re... | def _dedupe_indices(new, exclude, index_structure):
inds_self = set(exclude)
dums_new = set(get_dummy_indices(new))
conflicts = dums_new.intersection(inds_self)
if len(conflicts) == 0:
return None
inds_self.update(dums_new)
self_args_free =... | |
23,916 | 110,082 | 711 | lib/matplotlib/spines.py | 174 | 39 | def get_window_extent(self, renderer=None):
# make sure the location is updated so that transforms etc are correct:
self._adjust_location()
bb = super().get_window_extent(renderer=renderer)
if self.axis is None or not self.axis.get_visible():
return bb
bboxes... | Fix issue with space allocated for single tick that should not be there
Co-authored-by: Antony Lee <anntzer.lee@gmail.com> | get_window_extent | 3804cdd8f1771065f9c8616c57357c2b190c3a05 | matplotlib | spines.py | 15 | 42 | https://github.com/matplotlib/matplotlib.git | 13 | 330 | 0 | 80 | 534 | Python | {
"docstring": "\n Return the window extent of the spines in display space, including\n padding for ticks (but not their labels)\n\n See Also\n --------\n matplotlib.axes.Axes.get_tightbbox\n matplotlib.axes.Axes.get_window_extent\n ",
"language": "en",
"n_whitespa... | def get_window_extent(self, renderer=None):
# make sure the location is updated so that transforms etc are correct:
self._adjust_location()
bb = super().get_window_extent(renderer=renderer)
if self.axis is None or not self.axis.get_visible():
return bb
bboxes... | |
48,985 | 198,533 | 157 | sympy/physics/continuum_mechanics/truss.py | 41 | 11 | def apply_support(self, location, type):
if location not in self._node_labels:
raise ValueError("Support must be added on a known node")
else:
self._supports[location] = type
if type == "pinned":
self._loads['R_'+str(location)+'_x']= []
... | remove_load method added along with other changes | apply_support | 99ede53223eafb56b2c2b4ab7b8a6764b628c9d9 | sympy | truss.py | 16 | 12 | https://github.com/sympy/sympy.git | 5 | 123 | 0 | 32 | 221 | Python | {
"docstring": "\n This method adds a pinned or roller support at a particular node\n\n Parameters\n ==========\n\n location: String or Symbol\n Label of the Node at which support is added.\n\n type: String\n Type of the support being provided at the node.\n\n ... | def apply_support(self, location, type):
if location not in self._node_labels:
raise ValueError("Support must be added on a known node")
else:
self._supports[location] = type
if type == "pinned":
self._loads['R_'+str(location)+'_x']= []
... | |
102,508 | 303,692 | 119 | homeassistant/components/xiaomi_miio/vacuum.py | 20 | 13 | def timers(self) -> list[dict[str, Any]]:
retu | Improve type hints in xiaomi_miio vacuum entities (#76563)
Co-authored-by: Teemu R. <tpr@iki.fi> | timers | 54fc17e10de0752c03d6b95153c3d8168f76ea44 | core | vacuum.py | 11 | 10 | https://github.com/home-assistant/core.git | 2 | 52 | 0 | 20 | 83 | Python | {
"docstring": "Get the list of added timers of the vacuum cleaner.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 8
} | def timers(self) -> list[dict[str, Any]]:
return [
{
"enabled": timer.enabled,
"cron": timer.cron,
"next_schedule": as_utc(timer.next_schedule),
}
for timer in self.coordinator.data.timers
]
| |
77,142 | 262,205 | 110 | TTS/tts/utils/visual.py | 51 | 27 | def plot_avg_pitch(pitch, chars, fig_size=(30, 10), output_fig=False):
old_fig_size = plt.rcParams["figure.figsize"]
if fig_size is not None:
plt.rcParams["figure.figsize"] = fig_size
fig, ax = plt.subplots()
x = np.array(range(len(chars)))
| Plot pitch over input characters | plot_avg_pitch | 5169d4eb32407ca0278046aaffc56ca6f9e9ef32 | TTS | visual.py | 12 | 26 | https://github.com/coqui-ai/TTS.git | 4 | 142 | 0 | 39 | 233 | Python | {
"docstring": "Plot pitch curves on top of the input characters.\n\n Args:\n pitch (np.array): Pitch values.\n chars (str): Characters to place to the x-axis.\n\n Shapes:\n pitch: :math:`(T,)`\n ",
"language": "en",
"n_whitespaces": 55,
"n_words": 25,
"vocab_size": 22
} | def plot_avg_pitch(pitch, chars, fig_size=(30, 10), output_fig=False):
old_fig_size = plt.rcParams["figure.figsize"]
if fig_size is not None:
plt.rcParams["figure.figsize"] = fig_size
fig, ax = plt.subplots()
x = np.array(range(len(chars)))
my_xticks = [c for c in chars]
plt.xtick... | |
48,304 | 197,047 | 402 | sympy/ntheory/generate.py | 170 | 23 | def composite(nth):
n = as_int(nth)
if n < 1:
raise ValueError("nth must be a positive integer; composite(1) == 4")
composite_arr = [4, 6, 8, 9, 10, 12, 14, 15, 16, 18]
if n <= 10:
return composite_arr[n - 1]
a, b = 4, sieve._list[-1]
if n <= b - primepi(b) - 1:
whi... | Refactored import ordering in functions | composite | e0dc14eca132f37c5f49369eb4051eae37c9b119 | sympy | generate.py | 15 | 36 | https://github.com/sympy/sympy.git | 12 | 250 | 0 | 76 | 389 | Python | {
"docstring": " Return the nth composite number, with the composite numbers indexed as\n composite(1) = 4, composite(2) = 6, etc....\n\n Examples\n ========\n\n >>> from sympy import composite\n >>> composite(36)\n 52\n >>> composite(1)\n 4\n >>> composi... | def composite(nth):
n = as_int(nth)
if n < 1:
raise ValueError("nth must be a positive integer; composite(1) == 4")
composite_arr = [4, 6, 8, 9, 10, 12, 14, 15, 16, 18]
if n <= 10:
return composite_arr[n - 1]
a, b = 4, sieve._list[-1]
if n <= b - primepi(b) - 1:
whi... | |
@set_module('numpy') | 38,645 | 160,496 | 122 | numpy/lib/twodim_base.py | 64 | 19 | def histogram2d(x, y, bins=10, range=None, density=None, weights=None):
from numpy import histogramdd
if len(x) != len(y):
raise ValueError('x and y must have the same length.')
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges =... | DEP: Remove `normed=` keyword argument from histogroms
The normed keyword argument has been deprecated for a long time.
This removes it, replacing its position with the new density
argument. | histogram2d | 2215054472616df563faa4613734426c790d4217 | numpy | twodim_base.py | 11 | 13 | https://github.com/numpy/numpy.git | 5 | 114 | 1 | 50 | 184 | Python | {
"docstring": "\n Compute the bi-dimensional histogram of two data samples.\n\n Parameters\n ----------\n x : array_like, shape (N,)\n An array containing the x coordinates of the points to be\n histogrammed.\n y : array_like, shape (N,)\n An array containing the y coordinates of ... | def histogram2d(x, y, bins=10, range=None, density=None, weights=None):
from numpy import histogramdd
if len(x) != len(y):
raise ValueError('x and y must have the same length.')
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges =... |
91,534 | 292,448 | 25 | homeassistant/components/dlna_dms/dms.py | 11 | 5 | def available(self) -> bool:
return self._device is not None and self._device.profile_device.available
| Add dlna_dms integration to support DLNA Digital Media Servers (#66437) | available | b19bf9b147f4321e89d1f7f01e68337f2102f460 | core | dms.py | 9 | 3 | https://github.com/home-assistant/core.git | 2 | 23 | 0 | 11 | 38 | Python | {
"docstring": "Device is available when we have a connection to it.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def available(self) -> bool:
return self._device is not None and self._device.profile_device.available
| |
80,431 | 270,328 | 215 | keras/distribute/distributed_training_utils_v1.py | 115 | 16 | def _get_input_from_iterator(iterator, model):
next_element = iterator.get_next()
# `len(nest.flatten(x))` is going to not count empty elements such as {}.
# len(nest.flatten([[0,1,2], {}])) is 3 and not 4. The `next_element` is
# going to get flattened in `_prepare_feed_values` to work around t... | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _get_input_from_iterator | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | distributed_training_utils_v1.py | 12 | 17 | https://github.com/keras-team/keras.git | 3 | 108 | 0 | 67 | 176 | Python | {
"docstring": "Get elements from the iterator and verify the input shape and type.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 10
} | def _get_input_from_iterator(iterator, model):
next_element = iterator.get_next()
# `len(nest.flatten(x))` is going to not count empty elements such as {}.
# len(nest.flatten([[0,1,2], {}])) is 3 and not 4. The `next_element` is
# going to get flattened in `_prepare_feed_values` to work around t... | |
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where) | 38,647 | 160,501 | 12 | numpy/core/multiarray.py | 7 | 6 | def inner(a, b):
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where) | DIC: Misc RST reformatting.
This contains various RST reformatting.
One, moving `(C)` one line up, is specific to a bug in tree-sitter-rst
that mis parses this section. Another is adding one black line for a
similar reason where `..` is seen as section underline by
tree-sitter-rst.
This is some shuffling of section ... | inner | 84eeca630ec9c5bf580bc456035c87d8591c1389 | numpy | multiarray.py | 7 | 2 | https://github.com/numpy/numpy.git | 1 | 14 | 1 | 7 | 35 | Python | {
"docstring": "\n inner(a, b, /)\n\n Inner product of two arrays.\n\n Ordinary inner product of vectors for 1-D arrays (without complex\n conjugation), in higher dimensions a sum product over the last axes.\n\n Parameters\n ----------\n a, b : array_like\n If `a` and `b` are nonscalar, th... | def inner(a, b):
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where) |
22,539 | 106,987 | 347 | lib/matplotlib/collections.py | 135 | 29 | def contains(self, mouseevent):
inside, info = self._default_contains(mouseevent)
if inside is not None:
return inside, info
if not self.get_visible():
return False, {}
pickradius = (
float(self._picker)
if isinstance(self._picke... | Switch transOffset to offset_transform.
Note that most APIs *previously* already accepted *offset_transform* as
kwarg, due to the presence of the `set_offset_transform` setter. Prefer
that name (shortening it to `offset_trf` for local variables).
Backcompat for the old `transOffset` name is kept in most places by
in... | contains | c25cf96cfb7e6fc9ad75347cb2a32193c501e82c | matplotlib | collections.py | 12 | 19 | https://github.com/matplotlib/matplotlib.git | 6 | 148 | 0 | 89 | 231 | Python | {
"docstring": "\n Test whether the mouse event occurred in the collection.\n\n Returns ``bool, dict(ind=itemlist)``, where every item in itemlist\n contains the event.\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 20,
"vocab_size": 17
} | def contains(self, mouseevent):
inside, info = self._default_contains(mouseevent)
if inside is not None:
return inside, info
if not self.get_visible():
return False, {}
pickradius = (
float(self._picker)
if isinstance(self._picke... | |
1,949 | 10,849 | 103 | jina/orchestrate/deployments/__init__.py | 15 | 7 | def deployments(self) -> List[Dict]:
return [
{
'name': self.name,
| refactor: rename pod to deployment (#4230)
* refactor: rename pod to deployment
* style: fix overload and cli autocomplete
* fix: undo daemon mistake
* refactor: leftover cleanup
* fix: more test fixes
* fix: more fixes
* fix: more fixes
* fix: more fixes
* fix: more tests
* fix: fix more te... | deployments | 13edc16d806fb5d77a6849551178ccc75937f25f | jina | __init__.py | 9 | 12 | https://github.com/jina-ai/jina.git | 1 | 34 | 0 | 15 | 58 | Python | {
"docstring": "Get deployments of the deployment. The BaseDeployment just gives one deployment.\n\n :return: list of deployments\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 15,
"vocab_size": 12
} | def deployments(self) -> List[Dict]:
return [
{
'name': self.name,
'head_host': self.head_host,
'head_port_in': self.head_port_in,
}
]
| |
117,279 | 320,685 | 251 | qutebrowser/browser/webkit/http.py | 100 | 23 | def parse_content_disposition(reply):
is_inline = True
filename = None
content_disposition_header = b'Content-Disposition'
# First check if the Content-Disposition header has a filename
# attribute.
if reply.hasRawHeader(content_disposition_header):
# We use the unsafe variant of th... | Simplify some syntax
Found via pyupgrade | parse_content_disposition | bd8c940320b7d8476b422edd9c417703db64f603 | qutebrowser | http.py | 14 | 19 | https://github.com/qutebrowser/qutebrowser.git | 5 | 121 | 0 | 68 | 222 | Python | {
"docstring": "Parse a content_disposition header.\n\n Args:\n reply: The QNetworkReply to get a filename for.\n\n Return:\n A (is_inline, filename) tuple.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 18,
"vocab_size": 17
} | def parse_content_disposition(reply):
is_inline = True
filename = None
content_disposition_header = b'Content-Disposition'
# First check if the Content-Disposition header has a filename
# attribute.
if reply.hasRawHeader(content_disposition_header):
# We use the unsafe variant of th... | |
7,413 | 41,391 | 415 | seaborn/_core/properties.py | 124 | 32 | def _get_categorical_mapping(self, scale, data):
levels = categorical_order(data, scale.order)
n = len(levels)
values = scale.values
if isinstance(values, dict):
self._check_dict_entries(levels, values)
# TODO where to ensure that dict values have consis... | Transition mappings->properties, leaving a few loose ends | _get_categorical_mapping | a07ef69882ed76e09a0ed43d6f3ea33780c1b2be | seaborn | properties.py | 17 | 28 | https://github.com/mwaskom/seaborn.git | 8 | 184 | 0 | 89 | 311 | Python | {
"docstring": "Define mapping as lookup in list of discrete color values.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def _get_categorical_mapping(self, scale, data):
levels = categorical_order(data, scale.order)
n = len(levels)
values = scale.values
if isinstance(values, dict):
self._check_dict_entries(levels, values)
# TODO where to ensure that dict values have consis... | |
22,712 | 107,367 | 28 | lib/matplotlib/colorbar.py | 7 | 6 | def minorlocator(self, loc):
self._long_axis().set_minor_locator(loc) | MNT: make colorbars locators and formatters properties | minorlocator | 6010bb43ed01c48c7c403569dd210490b236a853 | matplotlib | colorbar.py | 9 | 3 | https://github.com/matplotlib/matplotlib.git | 1 | 23 | 0 | 7 | 40 | Python | {
"docstring": "\n Set minor locator being used for colorbar\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 7,
"vocab_size": 7
} | def minorlocator(self, loc):
self._long_axis().set_minor_locator(loc)
self._minorlocator = loc
| |
@pytest.mark.parametrize(
"time_zone", ["Europe/Berlin", "America/Chicago", "US/Hawaii", "UTC"]
) | 99,327 | 300,467 | 71 | tests/components/recorder/test_models.py | 30 | 16 | def test_process_datetime_to_timestamp_freeze_time(time_zone, hass):
hass.config.set_time_zone(time_zone)
utc_now = dt_util.utcnow()
with freeze_time(utc_now):
epoch = utc_now.timestamp()
assert process_datetime_to_timestamp(dt_util.utcnow()) == epoch
now = dt_util.now()
... | Fix process_datetime_to_timestamp and add test coverage (#71755) | test_process_datetime_to_timestamp_freeze_time | 1d9fb4bca871f97109684419f0f9526a0c151f2d | core | test_models.py | 12 | 8 | https://github.com/home-assistant/core.git | 1 | 61 | 1 | 24 | 141 | Python | {
"docstring": "Test we can handle processing database datatimes to timestamps.\n\n This test freezes time to make sure everything matches.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 18,
"vocab_size": 17
} | def test_process_datetime_to_timestamp_freeze_time(time_zone, hass):
hass.config.set_time_zone(time_zone)
utc_now = dt_util.utcnow()
with freeze_time(utc_now):
epoch = utc_now.timestamp()
assert process_datetime_to_timestamp(dt_util.utcnow()) == epoch
now = dt_util.now()
... |
21,127 | 101,723 | 306 | tools/alignments/jobs.py | 68 | 19 | def _spatially_filter(self) -> np.ndarray:
logger.debug("Spatially Filter")
assert self._shapes_model is not None
landmarks_norm = self._normalized["l | Alignments Tool - Typing, Documentation + Re-org | _spatially_filter | e2a77e7c6e84e81f642cb22f528e25e3f2d2dbc1 | faceswap | jobs.py | 12 | 21 | https://github.com/deepfakes/faceswap.git | 1 | 126 | 0 | 53 | 203 | Python | {
"docstring": " interpret the shapes using our shape model (project and reconstruct)\n\n Returns\n -------\n :class:`numpy.ndarray`\n The filtered landmarks in original coordinate space\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 20,
"vocab_size": 20
} | def _spatially_filter(self) -> np.ndarray:
logger.debug("Spatially Filter")
assert self._shapes_model is not None
landmarks_norm = self._normalized["landmarks"]
# Convert to matrix form
landmarks_norm_table = np.reshape(landmarks_norm, [68 * 2, landmarks_norm.shape[2]]).... | |
47,587 | 196,087 | 48 | sympy/combinatorics/free_groups.py | 15 | 7 | def letter_form(self):
return tuple(flatten([(i,)*j if j > 0 else (-i,)*(-j)
for i, j in se | Updated import locations | letter_form | 498015021131af4dbb07eb110e5badaba8250c7b | sympy | free_groups.py | 14 | 3 | https://github.com/sympy/sympy.git | 3 | 44 | 0 | 14 | 69 | Python | {
"docstring": "\n The letter representation of a ``FreeGroupElement`` is a tuple\n of generator symbols, with each entry corresponding to a group\n generator. Inverses of the generators are represented by\n negative generator symbols.\n\n Examples\n ========\n\n >>> f... | def letter_form(self):
return tuple(flatten([(i,)*j if j > 0 else (-i,)*(-j)
for i, j in self.array_form]))
| |
34,858 | 150,868 | 26 | freqtrade/rpc/rpc.py | 12 | 6 | def _handle_default_message(self, type, data):
logger.debug(f"Received message from Leader | Refactoring, minor improvements, data provider improvements | _handle_default_message | 2b5f0678772bea0abaf4abe93efc55de43ea3e0e | freqtrade | rpc.py | 9 | 2 | https://github.com/freqtrade/freqtrade.git | 1 | 17 | 0 | 12 | 37 | Python | {
"docstring": "\n Default leader message handler, just logs it. We should never have to\n run this unless the leader sends us some weird message.\n ",
"language": "en",
"n_whitespaces": 44,
"n_words": 22,
"vocab_size": 21
} | def _handle_default_message(self, type, data):
logger.debug(f"Received message from Leader of type {type}: {data}")
| |
24,885 | 113,318 | 170 | nni/nas/oneshot/pytorch/base_lightning.py | 37 | 14 | def export_probs(self) -> dict[str, Any]:
result = {}
for module in self.nas_modules:
try:
result.update(module.export_probs(memo=result))
except NotImplementedE | Enhancement of one-shot NAS (v2.9) (#5049) | export_probs | f77db747d07d5c90a3a9f70bb17f71d4573f329e | nni | base_lightning.py | 14 | 22 | https://github.com/microsoft/nni.git | 3 | 52 | 0 | 36 | 86 | Python | {
"docstring": "\n Export the probability of every choice in the search space got chosen.\n\n .. note:: If such method of some modules is not implemented, they will be simply ignored.\n\n Returns\n -------\n dict\n In most cases, keys are names of ``nas_modules`` suffixed... | def export_probs(self) -> dict[str, Any]:
result = {}
for module in self.nas_modules:
try:
result.update(module.export_probs(memo=result))
except NotImplementedError:
warnings.warn(
'Some super-modules you have used did... | |
@region_silo_endpoint | 18,323 | 87,624 | 636 | src/sentry/api/endpoints/organization_releases.py | 265 | 48 | def debounce_update_release_health_data(organization, project_ids):
# Figure out which projects need to get updates from the snuba.
should_update = {}
cache_keys = ["debounce-health:%d" % id for id in project_ids]
cache_data = cache.get_many(cache_keys)
for project_id, cache_key in zip(project_... | fix(releases): Use Release.is_valid_version on adding releases (#40930) | debounce_update_release_health_data | 0cfac5c8bd83bcc7b96f3294c41a96524b883786 | sentry | organization_releases.py | 16 | 36 | https://github.com/getsentry/sentry.git | 14 | 268 | 1 | 161 | 440 | Python | {
"docstring": "This causes a flush of snuba health data to the postgres tables once\n per minute for the given projects.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 19,
"vocab_size": 18
} | def debounce_update_release_health_data(organization, project_ids):
# Figure out which projects need to get updates from the snuba.
should_update = {}
cache_keys = ["debounce-health:%d" % id for id in project_ids]
cache_data = cache.get_many(cache_keys)
for project_id, cache_key in zip(project_... |
56,613 | 222,520 | 95 | python3.10.4/Lib/dis.py | 96 | 19 | def show_code(co, *, file=None):
print(code_info(co), file=file)
_Instruction = collections.namedtuple("_Instruction",
"opname opcode arg argval argrepr offset starts_line is_jump_target")
_Instruction.opname.__doc__ = "Human readable name for operation"
_Instruction.opcode.__doc__ = "Numeric co | add python 3.10.4 for windows | show_code | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | dis.py | 9 | 2 | https://github.com/XX-net/XX-Net.git | 1 | 23 | 0 | 67 | 171 | Python | {
"docstring": "Print details of methods, functions, or code to *file*.\n\n If *file* is not provided, the output is printed on stdout.\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 20,
"vocab_size": 19
} | def show_code(co, *, file=None):
print(code_info(co), file=file)
_Instruction = collections.namedtuple("_Instruction",
"opname opcode arg argval argrepr offset starts_line is_jump_target")
_Instruction.opname.__doc__ = "Human readable name for operation"
_Instruction.opcode.__doc__ = "Numeric code for o... | |
@_copy_docstring_and_deprecators(matplotlib.image.imread) | 23,562 | 109,381 | 38 | lib/matplotlib/pyplot.py | 17 | 11 | def set_cmap(cmap):
cmap = colormaps[cmap]
rc('image', cmap=cmap.name)
im = gci()
if im is not None:
im.set_cmap(cmap)
| MNT: convert tests and internal usage way from using mpl.cm.get_cmap | set_cmap | a17f4f3bd63e3ca3754f96d7db4ce5197720589b | matplotlib | pyplot.py | 9 | 6 | https://github.com/matplotlib/matplotlib.git | 2 | 39 | 1 | 15 | 82 | Python | {
"docstring": "\n Set the default colormap, and applies it to the current image if any.\n\n Parameters\n ----------\n cmap : `~matplotlib.colors.Colormap` or str\n A colormap instance or the name of a registered colormap.\n\n See Also\n --------\n colormaps\n matplotlib.cm.register_cma... | def set_cmap(cmap):
cmap = colormaps[cmap]
rc('image', cmap=cmap.name)
im = gci()
if im is not None:
im.set_cmap(cmap)
@_copy_docstring_and_deprecators(matplotlib.image.imread) |
117,343 | 320,776 | 608 | qutebrowser/completion/completionwidget.py | 114 | 38 | def completion_item_focus(self, which, history=False):
if history:
if (self._cmd.text() == ':' or self._cmd.history.is_browsing() or
not self._active):
if which == 'next':
self._cmd.command_history_next()
return
... | mypy: Upgrade to PyQt5-stubs 5.15.6.0
For some unknown reason, those new stubs cause a *lot* of things now to be
checked by mypy which formerly probably got skipped due to Any being implied
somewhere.
The stubs themselves mainly improved, with a couple of regressions too.
In total, there were some 337 (!) new mypy e... | completion_item_focus | a20bb67a878b2e68abf8268c1b0a27f018d01352 | qutebrowser | completionwidget.py | 18 | 41 | https://github.com/qutebrowser/qutebrowser.git | 14 | 292 | 0 | 78 | 494 | Python | {
"docstring": "Shift the focus of the completion menu to another item.\n\n Args:\n which: 'next', 'prev',\n 'next-category', 'prev-category',\n 'next-page', or 'prev-page'.\n history: Navigate through command history if no text was typed.\n ",
... | def completion_item_focus(self, which, history=False):
if history:
if (self._cmd.text() == ':' or self._cmd.history.is_browsing() or
not self._active):
if which == 'next':
self._cmd.command_history_next()
return
... | |
20,679 | 101,259 | 575 | tools/manual/detected_faces.py | 65 | 58 | def _background_extract(self, output_folder, progress_queue):
_io = dict(saver=ImagesSaver(get_folder(output_folder), as_bytes=True),
loader=ImagesLoader(self._input_location, count=self._alignments.frames_count))
for frame_idx, (filename, image) in enumerate(_io["loader"].l... | lib.align updates:
- alignments.py
- Add typed dicts for imported alignments
- Explicitly check for presence of thumb value in alignments dict
- linting
- detected_face.py
- Typing
- Linting
- Legacy support for pre-aligned face
- Update dependencies to new property names | _background_extract | 5e73437be47f2410439a3c6716de96354e6a0c94 | faceswap | detected_faces.py | 18 | 24 | https://github.com/deepfakes/faceswap.git | 3 | 232 | 0 | 56 | 366 | Python | {
"docstring": " Perform the background extraction in a thread so GUI doesn't become unresponsive.\n\n Parameters\n ----------\n output_folder: str\n The location to save the output faces to\n progress_queue: :class:`queue.Queue`\n The queue to place incremental count... | def _background_extract(self, output_folder, progress_queue):
_io = dict(saver=ImagesSaver(get_folder(output_folder), as_bytes=True),
loader=ImagesLoader(self._input_location, count=self._alignments.frames_count))
for frame_idx, (filename, image) in enumerate(_io["loader"].l... | |
117,149 | 320,361 | 40 | src/paperless_tesseract/tests/test_checks.py | 12 | 8 | def test_multi_part_language(self, m):
m.return_value = ["chi_sim", "eng"]
msgs = check_default_language_available(None)
self.assertEqual(len | Fixes language code checks around two part languages | test_multi_part_language | 55ef0d4a1b62c3abe8500cad97ddeecf9f746b84 | paperless-ngx | test_checks.py | 9 | 4 | https://github.com/paperless-ngx/paperless-ngx.git | 1 | 34 | 0 | 11 | 59 | Python | {
"docstring": "\n GIVEN:\n - An OCR language which is multi part (ie chi-sim)\n - The language is correctly formatted\n WHEN:\n - Installed packages are checked\n THEN:\n - No errors are reported\n ",
"language": "en",
"n_whitespaces": 102,
... | def test_multi_part_language(self, m):
m.return_value = ["chi_sim", "eng"]
msgs = check_default_language_available(None)
self.assertEqual(len(msgs), 0)
| |
17,072 | 80,512 | 84 | awx/main/tasks/callback.py | 16 | 12 | def finished_callback(self, runner_obj):
| Decoupled callback functions from BaseTask Class
--- Removed all callback functions from 'jobs.py' and put them in a new file '/awx/main/tasks/callback.py'
--- Modified Unit tests unit moved
--- Moved 'update_model' from jobs.py to /awx/main/utils/update_model.py | finished_callback | 443bdc1234682dd0004bae372078512fcf37cce9 | awx | callback.py | 9 | 8 | https://github.com/ansible/awx.git | 1 | 50 | 0 | 16 | 85 | Python | {
"docstring": "\n Ansible runner callback triggered on finished run\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 7,
"vocab_size": 7
} | def finished_callback(self, runner_obj):
event_data = {
'event': 'EOF',
'final_counter': self.event_ct,
'guid': self.guid,
}
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
| |
@pytest.mark.parametrize(
"sensitive_fields",
[
{"NonExistingType": {}},
{"Product": {"nonExistingField"}},
{"Node": {"id"}},
],
) | 5,117 | 27,579 | 79 | saleor/webhook/observability/tests/test_obfuscation.py | 33 | 11 | def test_anonymize_gql_operation_response_with_fragment_spread(gql_operation_factory):
query =
result = {"data": "result"}
sensitive_fields = {" | Observability reporter (#9803)
* Initial commit
* Add observability celery beat task
* Add observability_reporter_task and observability_send_events
* Convert payload to camel case
* Add fakeredis to dev dependencies
* Add redis buffer tests
* Refactor buffer
* Update
* Optimize buffer
* Add... | test_anonymize_gql_operation_response_with_fragment_spread | 7ea7916c65357741c3911e307acb58d547a5e91a | saleor | test_obfuscation.py | 11 | 23 | https://github.com/saleor/saleor.git | 1 | 49 | 1 | 29 | 152 | Python | {
"docstring": "\n fragment ProductFragment on Product {\n id\n name\n }\n query products($first: Int){\n products(channel: \"channel-pln\", first:$first){\n edges{\n node{\n ... ProductFragment\n variants {\n variantName: name\n ... | def test_anonymize_gql_operation_response_with_fragment_spread(gql_operation_factory):
query =
result = {"data": "result"}
sensitive_fields = {"Product": {"name"}}
operation_result = gql_operation_factory(query, result=result)
anonymize_gql_operation_response(operation_result, sensitive_fields)
... |
@py_random_state(3) | 42,003 | 176,620 | 130 | networkx/algorithms/bipartite/generators.py | 74 | 23 | def complete_bipartite_graph(n1, n2, create_using=None):
G = nx.empty_g | Adjust the usage of nodes_or_number decorator (#5599)
* recorrect typo in decorators.py
* Update tests to show troubles in current code
* fix troubles with usage of nodes_or_number
* fix typo
* remove nodes_or_number where that makes sense
* Reinclude nodes_or_numbers and add some tests for nonstandard ... | complete_bipartite_graph | de1d00f20e0bc14f1cc911b3486e50225a8fa168 | networkx | generators.py | 10 | 15 | https://github.com/networkx/networkx.git | 8 | 148 | 1 | 54 | 250 | Python | {
"docstring": "Returns the complete bipartite graph `K_{n_1,n_2}`.\n\n The graph is composed of two partitions with nodes 0 to (n1 - 1)\n in the first and nodes n1 to (n1 + n2 - 1) in the second.\n Each node in the first is connected to each node in the second.\n\n Parameters\n ----------\n n1, n2 ... | def complete_bipartite_graph(n1, n2, create_using=None):
G = nx.empty_graph(0, create_using)
if G.is_directed():
raise nx.NetworkXError("Directed Graph not supported")
n1, top = n1
n2, bottom = n2
if isinstance(n1, numbers.Integral) and isinstance(n2, numbers.Integral):
bottom ... |
56,088 | 220,698 | 130 | python3.10.4/Lib/asyncio/sslproto.py | 45 | 13 | def shutdown(self, callback=None):
if self._state == _UNWRAPPED:
raise RuntimeError('no security layer present')
if self._state == _SHUTDOWN:
raise RuntimeError('shutdown in progress')
assert self._state in (_WRAPPED, _DO_HANDSHAKE)
self._state = _SHUTDOW... | add python 3.10.4 for windows | shutdown | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | sslproto.py | 10 | 11 | https://github.com/XX-net/XX-Net.git | 4 | 79 | 0 | 31 | 130 | Python | {
"docstring": "Start the SSL shutdown sequence.\n\n Return a list of ssldata. A ssldata element is a list of buffers\n\n The optional *callback* argument can be used to install a callback that\n will be called when the shutdown is complete. The callback will be\n called without arguments.... | def shutdown(self, callback=None):
if self._state == _UNWRAPPED:
raise RuntimeError('no security layer present')
if self._state == _SHUTDOWN:
raise RuntimeError('shutdown in progress')
assert self._state in (_WRAPPED, _DO_HANDSHAKE)
self._state = _SHUTDOW... | |
54,359 | 216,053 | 154 | salt/cloud/clouds/proxmox.py | 46 | 18 | def preferred_ip(vm_, ips):
proto = config.get_cloud_config_value(
"protocol", vm_, __opts__, default="ipv4", search_global=False
)
family = socket.AF_INET
if proto == "ipv6":
family = socket.AF_INET6
for ip in ips:
ignore_ip = ignore_cidr(vm_, ip)
if ignore_ip:... | Add support for get IP-address from agent | preferred_ip | a5679caf65c7c79cd72841b6e5793b9b693744c9 | salt | proxmox.py | 11 | 17 | https://github.com/saltstack/salt.git | 5 | 78 | 0 | 36 | 128 | Python | {
"docstring": "\n Return either an 'ipv4' (default) or 'ipv6' address depending on 'protocol' option.\n The list of 'ipv4' IPs is filtered by ignore_cidr() to remove any unreachable private addresses.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 27,
"vocab_size": 26
} | def preferred_ip(vm_, ips):
proto = config.get_cloud_config_value(
"protocol", vm_, __opts__, default="ipv4", search_global=False
)
family = socket.AF_INET
if proto == "ipv6":
family = socket.AF_INET6
for ip in ips:
ignore_ip = ignore_cidr(vm_, ip)
if ignore_ip:... | |
14,866 | 68,785 | 17 | erpnext/accounts/doctype/account/account.py | 23 | 11 | def get_parent_account(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql(
% ("%s", searchfield, "%s", "%s", "%s"),
(filters["company"], "%%%s%%" % txt, page_len, start),
as_list=1,
| refactor: use db independent offset syntax (#31345)
* chore: use db independent offset syntax
* fix: typo
* style: reformat code to black spec
Co-authored-by: Ankush Menat <ankush@frappe.io> | get_parent_account | 00ef499739959630cd7cf97419fbb6ca59be05f2 | erpnext | account.py | 10 | 9 | https://github.com/frappe/erpnext.git | 1 | 56 | 0 | 18 | 85 | Python | {
"docstring": "select name from tabAccount\n\t\twhere is_group = 1 and docstatus != 2 and company = %s\n\t\tand %s like %s order by name limit %s offset %s",
"language": "en",
"n_whitespaces": 24,
"n_words": 27,
"vocab_size": 19
} | def get_parent_account(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql(
% ("%s", searchfield, "%s", "%s", "%s"),
(filters["company"], "%%%s%%" % txt, page_len, start),
as_list=1,
)
| |
81,783 | 276,930 | 30 | keras/utils/kernelized_utils.py | 19 | 13 | def exact_laplacian_kernel(x, y, stddev):
r
x_aligned, y_aligned = _align_matrices(x, y)
diff_l1_norm = tf.reduce_sum(tf.abs(tf.subtract(x_aligned, y_aligned)), 2)
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | exact_laplacian_kernel | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | kernelized_utils.py | 12 | 27 | https://github.com/keras-team/keras.git | 1 | 53 | 0 | 18 | 81 | Python | {
"docstring": "Computes exact Laplacian kernel value(s) for tensors x and y using stddev.\n\n The Laplacian kernel for vectors u, v is defined as follows:\n K(u, v) = exp(-||u-v|| / stddev)\n where the norm is the l1-norm. x, y can be either vectors or matrices. If they\n are vectors, they must have... | def exact_laplacian_kernel(x, y, stddev):
r
x_aligned, y_aligned = _align_matrices(x, y)
diff_l1_norm = tf.reduce_sum(tf.abs(tf.subtract(x_aligned, y_aligned)), 2)
return tf.exp(-diff_l1_norm / stddev)
| |
75,534 | 259,028 | 232 | sklearn/manifold/_isomap.py | 93 | 23 | def transform(self, X):
check_is_fitted(self)
if self.n_neighbors is not None:
distances, indices = self.nbrs_.kneighbors(X, return_dis | ENH Isomap supports radius-based neighbors (#19794)
Co-authored-by: Tom Dupré la Tour <tom.dupre-la-tour@m4x.org>
Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org>
Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com>
Co-authored-by: Julien Jerphanion <git@jjerphan.xyz> | transform | 71656844586f212324678804ace73f7a266deb00 | scikit-learn | _isomap.py | 13 | 14 | https://github.com/scikit-learn/scikit-learn.git | 3 | 140 | 0 | 75 | 213 | Python | {
"docstring": "Transform X.\n\n This is implemented by linking the points X into the graph of geodesic\n distances of the training data. First the `n_neighbors` nearest\n neighbors of X are found in the training data, and from these the\n shortest geodesic distances from each point in X t... | def transform(self, X):
check_is_fitted(self)
if self.n_neighbors is not None:
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
else:
distances, indices = self.nbrs_.radius_neighbors(X, return_distance=True)
# Create the graph of short... | |
46,283 | 189,989 | 212 | manim/mobject/svg/svg_mobject.py | 48 | 12 | def generate_config_style_dict(self) -> dict[str, str]:
keys_converting_dict = {
"fill": ("color", "fill_color"),
"fill-opacity": ("opacity", "fill_opacity"),
"stroke": ("color", "stroke_color"),
"stroke-opacity": ("opacity", "stroke_opacity"),
... | Ported improved implementation of :class:`.SVGMobject` from 3b1b/manim (#2898)
* port SVGMobject from 3b1b/manim
* added svgelements as dependency
* revert change of default values
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* set default stroke... | generate_config_style_dict | 309c9d41eb734ca85a7aea5533f88a6d4ee7c944 | manim | svg_mobject.py | 13 | 17 | https://github.com/ManimCommunity/manim.git | 4 | 104 | 0 | 40 | 180 | Python | {
"docstring": "Generate a dictionary holding the default style information.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def generate_config_style_dict(self) -> dict[str, str]:
keys_converting_dict = {
"fill": ("color", "fill_color"),
"fill-opacity": ("opacity", "fill_opacity"),
"stroke": ("color", "stroke_color"),
"stroke-opacity": ("opacity", "stroke_opacity"),
... | |
78,524 | 266,709 | 110 | test/lib/ansible_test/_internal/bootstrap.py | 22 | 13 | def get_variables(self): # type: () -> t.Dict[str, t.Union[str, t.List[str]]]
return dict(
| ansible-test - Code cleanup and refactoring. (#77169)
* Remove unnecessary PyCharm ignores.
* Ignore intentional undefined attribute usage.
* Add missing type hints. Fix existing type hints.
* Fix docstrings and comments.
* Use function to register completion handler.
* Pass strings to display functions.
* Fix C... | get_variables | a06fa496d3f837cca3c437ab6e9858525633d147 | ansible | bootstrap.py | 10 | 9 | https://github.com/ansible/ansible.git | 2 | 56 | 0 | 22 | 87 | Python | {
"docstring": "The variables to template in the bootstrapping script.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def get_variables(self): # type: () -> t.Dict[str, t.Union[str, t.List[str]]]
return dict(
bootstrap_type=self.bootstrap_type,
controller='yes' if self.controller else '',
python_versions=self.python_versions,
ssh_key_type=self.ssh_key.KEY_TYPE,
... | |
54,718 | 217,321 | 224 | python3.10.4/Lib/enum.py | 71 | 16 | def _create_pseudo_member_(cls, value):
pseudo_member = cls._value2member_map_.get(value, None)
if pseudo_member is None:
| add python 3.10.4 for windows | _create_pseudo_member_ | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | enum.py | 14 | 11 | https://github.com/XX-net/XX-Net.git | 3 | 83 | 0 | 54 | 136 | Python | {
"docstring": "\n Create a composite member iff value contains only members.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | def _create_pseudo_member_(cls, value):
pseudo_member = cls._value2member_map_.get(value, None)
if pseudo_member is None:
# verify all bits are accounted for
_, extra_flags = _decompose(cls, value)
if extra_flags:
raise ValueError("%r is not a... | |
75,639 | 259,201 | 237 | sklearn/preprocessing/_encoders.py | 78 | 26 | def _identify_infrequent(self, category_count, n_samples, col_idx):
if isinstance(self.min_frequency, numbers.Integral):
infrequent_mask = category_count < self.min_frequency
elif isinstance(self.min_frequency, numbers.Real):
min_frequency_abs = n_samples * self.min_freq... | ENH Adds infrequent categories to OneHotEncoder (#16018)
* ENH Completely adds infrequent categories
* STY Linting
* STY Linting
* DOC Improves wording
* DOC Lint
* BUG Fixes
* CLN Address comments
* CLN Address comments
* DOC Uses math to description float min_frequency
* DOC Adds comment r... | _identify_infrequent | 7f0006c8aad1a09621ad19c3db19c3ff0555a183 | scikit-learn | _encoders.py | 13 | 16 | https://github.com/scikit-learn/scikit-learn.git | 6 | 146 | 0 | 56 | 227 | Python | {
"docstring": "Compute the infrequent indices.\n\n Parameters\n ----------\n category_count : ndarray of shape (n_cardinality,)\n Category counts.\n\n n_samples : int\n Number of samples.\n\n col_idx : int\n Index of the current category. Only used ... | def _identify_infrequent(self, category_count, n_samples, col_idx):
if isinstance(self.min_frequency, numbers.Integral):
infrequent_mask = category_count < self.min_frequency
elif isinstance(self.min_frequency, numbers.Real):
min_frequency_abs = n_samples * self.min_freq... | |
8,405 | 44,892 | 76 | airflow/providers/google/cloud/hooks/kubernetes_engine.py | 27 | 9 | def get_conn(self) -> container_v1.ClusterManagerClient:
if self._client is None:
credentials = self._get_credentials()
self._client = container_v1.ClusterManagerClient(credentials=credent | Extract ClientInfo to module level (#21554) | get_conn | 1b568d73e1dfb838a3a0446e3a6063b9f27f04b8 | airflow | kubernetes_engine.py | 11 | 10 | https://github.com/apache/airflow.git | 2 | 44 | 0 | 23 | 74 | Python | {
"docstring": "\n Returns ClusterManagerCLinet object.\n\n :rtype: google.cloud.container_v1.ClusterManagerClient\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 5,
"vocab_size": 5
} | def get_conn(self) -> container_v1.ClusterManagerClient:
if self._client is None:
credentials = self._get_credentials()
self._client = container_v1.ClusterManagerClient(credentials=credentials, client_info=CLIENT_INFO)
return self._client
# To preserve backward comp... | |
55,985 | 220,414 | 60 | python3.10.4/Lib/asyncio/coroutines.py | 28 | 15 | def iscoroutinefunction(func):
return (inspect.isco | add python 3.10.4 for windows | iscoroutinefunction | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | coroutines.py | 11 | 3 | https://github.com/XX-net/XX-Net.git | 2 | 26 | 0 | 26 | 81 | Python | {
"docstring": "Return True if func is a decorated coroutine function.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def iscoroutinefunction(func):
return (inspect.iscoroutinefunction(func) or
getattr(func, '_is_coroutine', None) is _is_coroutine)
# Prioritize native coroutine check to speed-up
# asyncio.iscoroutine.
_COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType,
collections.a... | |
115,428 | 316,852 | 49 | homeassistant/helpers/storage.py | 17 | 7 | async def async_load(self) -> _T | None:
if self._load_task is None:
self._load_task = self.hass.async_create_task(self._async_load())
return await self._load_task
| Make Store a generic class (#74617) | async_load | 16900dcef15bdb9016feabd12bfec94d61ed4df6 | core | storage.py | 12 | 12 | https://github.com/home-assistant/core.git | 2 | 38 | 0 | 14 | 65 | Python | {
"docstring": "Load data.\n\n If the expected version and minor version do not match the given versions, the\n migrate function will be invoked with migrate_func(version, minor_version, config).\n\n Will ensure that when a call comes in while another one is in progress,\n the second call ... | async def async_load(self) -> _T | None:
if self._load_task is None:
self._load_task = self.hass.async_create_task(self._async_load())
return await self._load_task
| |
108,897 | 310,210 | 81 | homeassistant/components/vera/lock.py | 28 | 13 | def extra_state_attributes(self) -> dict[str, Any] | None:
data = super().extra | Remove vera from mypy ignore list (#64474)
* Remove vera from mypy ignore list
* Fix pylint | extra_state_attributes | 03bf2cdd56eb9a0a9ed56d7afb700d5f7d9cf75e | core | lock.py | 10 | 13 | https://github.com/home-assistant/core.git | 3 | 63 | 0 | 22 | 101 | Python | {
"docstring": "Who unlocked the lock and did a low battery alert fire.\n\n Reports on the previous poll cycle.\n changed_by_name is a string like 'Bob'.\n low_battery is 1 if an alert fired, 0 otherwise.\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 32,
"vocab_size": 28
} | def extra_state_attributes(self) -> dict[str, Any] | None:
data = super().extra_state_attributes or {}
last_user = self.vera_device.get_last_user_alert()
if last_user is not None:
data[ATTR_LAST_USER_NAME] = last_user[1]
data[ATTR_LOW_BATTERY] = self.vera_device.ge... | |
52,421 | 208,620 | 153 | IPython/core/interactiveshell.py | 51 | 16 | def magic(self, arg_s):
warnings.warn(
"`magic(...)` is deprecated since IPython 0.13 (warning added in "
"8.1), use run_line_magic(magic_name, parameter_s).",
DeprecationWarning,
stacklevel=2,
)
# TODO: should we issue a loud deprecation ... | Update old deprecation | magic | e306c9d3f707de42b47a1e7c4c8034d6862fba5f | ipython | interactiveshell.py | 9 | 10 | https://github.com/ipython/ipython.git | 1 | 57 | 0 | 48 | 97 | Python | {
"docstring": "\n DEPRECATED\n\n Deprecated since IPython 0.13 (warning added in\n 8.1), use run_line_magic(magic_name, parameter_s).\n\n Call a magic function by name.\n\n Input: a string containing the name of the magic function to call and\n any additional arguments to be... | def magic(self, arg_s):
warnings.warn(
"`magic(...)` is deprecated since IPython 0.13 (warning added in "
"8.1), use run_line_magic(magic_name, parameter_s).",
DeprecationWarning,
stacklevel=2,
)
# TODO: should we issue a loud deprecation ... | |
12,211 | 60,572 | 49 | .venv/lib/python3.8/site-packages/pip/_internal/commands/__init__.py | 28 | 13 | def create_command(name, **kwargs):
# type: (str, **Any) -> Command
module_path, class_name, summary = commands_dict[name]
module = importlib.import_module(module_path)
command_class = getattr(module, class_name)
command = command_cl | upd; format | create_command | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | __init__.py | 9 | 6 | https://github.com/jindongwang/transferlearning.git | 1 | 52 | 0 | 24 | 82 | Python | {
"docstring": "\n Create an instance of the Command class with the given name.\n ",
"language": "en",
"n_whitespaces": 18,
"n_words": 11,
"vocab_size": 10
} | def create_command(name, **kwargs):
# type: (str, **Any) -> Command
module_path, class_name, summary = commands_dict[name]
module = importlib.import_module(module_path)
command_class = getattr(module, class_name)
command = command_class(name=name, summary=summary, **kwargs)
return command
... | |
29,316 | 130,580 | 34 | python/ray/data/dataset.py | 13 | 11 | def to_pandas_refs(self) -> List[ObjectRef["pandas.DataFrame"]]:
block_to_df = cached_remote_fn(_block_to_df)
return [block_to_df.remote(b | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | to_pandas_refs | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | dataset.py | 10 | 15 | https://github.com/ray-project/ray.git | 2 | 39 | 0 | 13 | 65 | Python | {
"docstring": "Convert this dataset into a distributed set of Pandas dataframes.\n\n This is only supported for datasets convertible to Arrow records.\n This function induces a copy of the data. For zero-copy access to the\n underlying data, consider using ``.to_arrow()`` or\n ``.get_inte... | def to_pandas_refs(self) -> List[ObjectRef["pandas.DataFrame"]]:
block_to_df = cached_remote_fn(_block_to_df)
return [block_to_df.remote(block) for block in self._blocks.get_blocks()]
| |
@array_function_dispatch(_hvdsplit_dispatcher) | 38,466 | 160,008 | 64 | numpy/lib/shape_base.py | 32 | 9 | def hsplit(ary, indices_or_sections):
if _nx.ndim(ary) == 0:
raise ValueError('hsplit only works on arrays of 1 or more dimensions')
if ary.ndim > 1:
return split(ary, indices_or_sections, 1)
else:
retur | DOC: Include special case in `hsplit` doc (#20974) | hsplit | 1cacb2ffb1113167a4995f4f4c183f9a8356c2f0 | numpy | shape_base.py | 10 | 7 | https://github.com/numpy/numpy.git | 3 | 50 | 1 | 28 | 89 | Python | {
"docstring": "\n Split an array into multiple sub-arrays horizontally (column-wise).\n\n Please refer to the `split` documentation. `hsplit` is equivalent\n to `split` with ``axis=1``, the array is always split along the second\n axis except for 1-D arrays, where it is split at ``axis=0``.\n\n See A... | def hsplit(ary, indices_or_sections):
if _nx.ndim(ary) == 0:
raise ValueError('hsplit only works on arrays of 1 or more dimensions')
if ary.ndim > 1:
return split(ary, indices_or_sections, 1)
else:
return split(ary, indices_or_sections, 0)
@array_function_dispatch(_hvdsplit_di... |
13,748 | 64,884 | 36 | erpnext/accounts/doctype/loyalty_program/test_loyalty_program.py | 59 | 27 | def get_points_earned(self):
def get_returned_amount():
returned_amount = frappe.db.sql(
,
self.name,
)
return abs(flt(returned_amount[0][0])) if returned_amount else 0
lp_details = get_loyalty_program_details_with_points(
self.customer,
company=self.company,
loyalty_program=self.loyalty_program,
... | style: format code with black | get_points_earned | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | test_loyalty_program.py | 14 | 18 | https://github.com/frappe/erpnext.git | 6 | 114 | 0 | 42 | 231 | Python | {
"docstring": "\n\t\t\tselect sum(grand_total)\n\t\t\tfrom `tabSales Invoice`\n\t\t\twhere docstatus=1 and is_return=1 and ifnull(return_against, '')=%s\n\t\t",
"language": "en",
"n_whitespaces": 9,
"n_words": 12,
"vocab_size": 11
} | def get_points_earned(self):
def get_returned_amount():
returned_amount = frappe.db.sql(
,
self.name,
)
return abs(flt(returned_amount[0][0])) if returned_amount else 0
lp_details = get_loyalty_program_details_with_points(
self.customer,
company=self.company,
loyalty_program=self.loyalty_program,
... | |
20,326 | 100,875 | 119 | lib/model/losses_plaid.py | 49 | 17 | def _get_kernel(self) -> plaidml.tile.Value:
coords = np.arange(self._filter_size, dtype="float32")
coords -= (self._filter_size - 1) / 2.
kernel = np.square(coords)
kernel *= -0.5 / np.square(self._filter_sigma)
kernel = np.reshape(kernel, (1, -1)) + np.reshape(kernel,... | SSIM Updates
- Standardize DSSIM Function
- Implement MSSIM function for AMD | _get_kernel | 04337e0c5efd442c1ce3e2da193dd8749f1e30d8 | faceswap | losses_plaid.py | 12 | 17 | https://github.com/deepfakes/faceswap.git | 1 | 143 | 0 | 33 | 214 | Python | {
"docstring": " Obtain the base kernel for performing depthwise convolution.\n\n Returns\n -------\n :class:`plaidml.tile.Value`\n The gaussian kernel based on selected size and sigma\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 20,
"vocab_size": 19
} | def _get_kernel(self) -> plaidml.tile.Value:
coords = np.arange(self._filter_size, dtype="float32")
coords -= (self._filter_size - 1) / 2.
kernel = np.square(coords)
kernel *= -0.5 / np.square(self._filter_sigma)
kernel = np.reshape(kernel, (1, -1)) + np.reshape(kernel,... | |
15,499 | 70,351 | 55 | wagtail/core/tests/test_blocks.py | 13 | 13 | def test_default_default(self):
block = blocks.ListBlock(blocks.CharBlock(default='chocolate'))
self.assertEqual(list(block.get_default()), ['chocolate'])
block.set_name('test_shoppinglistblock')
| Implement a ListValue type for ListBlocks | test_default_default | 4a848bfb4e3ec1a84a3d36fda577c1ed784de498 | wagtail | test_blocks.py | 12 | 6 | https://github.com/wagtail/wagtail.git | 1 | 65 | 0 | 12 | 115 | Python | {
"docstring": "\n if no explicit 'default' is set on the ListBlock, it should fall back on\n a single instance of the child block in its default state.\n ",
"language": "en",
"n_whitespaces": 47,
"n_words": 25,
"vocab_size": 23
} | def test_default_default(self):
block = blocks.ListBlock(blocks.CharBlock(default='chocolate'))
self.assertEqual(list(block.get_default()), ['chocolate'])
block.set_name('test_shoppinglistblock')
js_args = ListBlockAdapter().js_args(block)
self.assertEqual(js_args[2], ... | |
39,674 | 165,521 | 39 | pandas/io/formats/xml.py | 14 | 5 | def convert_empty_str_key(self) -> None:
| CLN/DOC: typos (#46328)
* fix typos
* fix typo
* fix typo
* fix typo | convert_empty_str_key | a72fa1b400234d3a05342f17c3c0b1e3993a6bd8 | pandas | xml.py | 11 | 9 | https://github.com/pandas-dev/pandas.git | 3 | 40 | 0 | 14 | 71 | Python | {
"docstring": "\n Replace zero-length string in `namespaces`.\n\n This method will replace '' with None to align to `lxml`\n requirement that empty string prefixes are not allowed.\n ",
"language": "en",
"n_whitespaces": 53,
"n_words": 24,
"vocab_size": 22
} | def convert_empty_str_key(self) -> None:
if self.namespaces and "" in self.namespaces.keys():
self.namespaces[None] = self.namespaces.pop("", "default")
| |
16,399 | 75,353 | 149 | wagtail/images/tests/tests.py | 30 | 13 | def test_get_with_custom_key_using_default_key(self):
# Generate signature
signature = generate_signature(self.image.id, "fill-800x600")
# Get the image
response = self.client.get(
reverse(
"wagtailimages_serve_custom_key",
args=(sign... | Reformat with black | test_get_with_custom_key_using_default_key | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | tests.py | 15 | 10 | https://github.com/wagtail/wagtail.git | 1 | 58 | 0 | 24 | 100 | Python | {
"docstring": "\n Test that that the key can be changed on the view\n\n This tests that the default key no longer works when the key is changed on the view\n ",
"language": "en",
"n_whitespaces": 50,
"n_words": 28,
"vocab_size": 17
} | def test_get_with_custom_key_using_default_key(self):
# Generate signature
signature = generate_signature(self.image.id, "fill-800x600")
# Get the image
response = self.client.get(
reverse(
"wagtailimages_serve_custom_key",
args=(sign... | |
11,033 | 54,348 | 22 | tests/test_engine.py | 8 | 7 | async def test_timeouts_do_not_hide_crashes(self, flow_run, orion_client):
started = anyio.Event()
| Fix engine tests; move function to other crash handleres | test_timeouts_do_not_hide_crashes | 3e657b429b967fa532d2f97ed7e6809112db3107 | prefect | test_engine.py | 8 | 24 | https://github.com/PrefectHQ/prefect.git | 1 | 121 | 0 | 8 | 30 | Python | {
"docstring": "\n Since timeouts capture anyio cancellations, we want to ensure that something\n still ends up in a 'Crashed' state if it is cancelled independently from our\n timeout cancellation.\n ",
"language": "en",
"n_whitespaces": 56,
"n_words": 27,
"vocab_size": 27
} | async def test_timeouts_do_not_hide_crashes(self, flow_run, orion_client):
started = anyio.Event()
| |
24,437 | 111,556 | 258 | spacy/tests/pipeline/test_pipe_methods.py | 72 | 17 | def test_enable_disable_conflict_with_config():
nlp = English()
nlp.add_pipe("tagger")
nlp.add_pipe("senter")
nlp.add_pipe("sentencizer")
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
# Expected to fail, as config and arguments conflict.
with | Simplify and clarify enable/disable behavior of spacy.load() (#11459)
* Change enable/disable behavior so that arguments take precedence over config options. Extend error message on conflict. Add warning message in case of overwriting config option with arguments.
* Fix tests in test_serialize_pipeline.py to reflec... | test_enable_disable_conflict_with_config | aea16719be04d4d6ab889cd20fe0e323b2c7ffee | spaCy | test_pipe_methods.py | 18 | 19 | https://github.com/explosion/spaCy.git | 1 | 127 | 0 | 49 | 235 | Python | {
"docstring": "Test conflict between enable/disable w.r.t. `nlp.disabled` set in the config.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def test_enable_disable_conflict_with_config():
nlp = English()
nlp.add_pipe("tagger")
nlp.add_pipe("senter")
nlp.add_pipe("sentencizer")
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
# Expected to fail, as config and arguments conflict.
with pytest.raises(ValueE... | |
@pytest.mark.asyncio | 28,233 | 126,689 | 51 | dashboard/modules/job/tests/test_job_manager.py | 23 | 13 | async def test_logs_streaming(job_manager):
stream_logs_script =
stream_logs_cmd = f'python -c "{stream_logs_script}"'
job_id = await job_manager.submit_job(entrypoint=stream_logs_cmd)
await async_wait_for_condition(
lambda: "STREAMED" i | Convert job_manager to be async (#27123)
Updates jobs api
Updates snapshot api
Updates state api
Increases jobs api version to 2
Signed-off-by: Alan Guo aguo@anyscale.com
Why are these changes needed?
follow-up for #25902 (comment) | test_logs_streaming | 326b5bd1acc6d3d00ab0546e4ae45da6bed501f7 | ray | test_job_manager.py | 12 | 13 | https://github.com/ray-project/ray.git | 1 | 44 | 1 | 20 | 95 | Python | {
"docstring": "Test that logs are streamed during the job, not just at the end.\nimport time\nprint('STREAMED')\nwhile True:\n time.sleep(1)\n",
"language": "en",
"n_whitespaces": 18,
"n_words": 19,
"vocab_size": 18
} | async def test_logs_streaming(job_manager):
stream_logs_script =
stream_logs_cmd = f'python -c "{stream_logs_script}"'
job_id = await job_manager.submit_job(entrypoint=stream_logs_cmd)
await async_wait_for_condition(
lambda: "STREAMED" in job_manager.get_job_logs(job_id)
)
job_... |
80,978 | 272,209 | 159 | keras/integration_test/gradient_checkpoint_test.py | 60 | 31 | def _train_no_recompute(n_steps):
img_dim, n_channels, batch_size = 256, 1, 4
x, y = _get_dummy_data(img_dim, n_channels, batch_size)
model = _get_big_cnn_model(
img_dim, n_channels, num_partitions=3, blocks_per_partition=2
)
optimizer = optimizers.SGD()
losses = []
tr_vars = mo... | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _train_no_recompute | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | gradient_checkpoint_test.py | 12 | 18 | https://github.com/keras-team/keras.git | 2 | 123 | 0 | 46 | 195 | Python | {
"docstring": "Trains a single large model without gradient checkpointing.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def _train_no_recompute(n_steps):
img_dim, n_channels, batch_size = 256, 1, 4
x, y = _get_dummy_data(img_dim, n_channels, batch_size)
model = _get_big_cnn_model(
img_dim, n_channels, num_partitions=3, blocks_per_partition=2
)
optimizer = optimizers.SGD()
losses = []
tr_vars = mo... | |
80,457 | 270,367 | 44 | keras/distribute/distributed_training_utils_v1.py | 18 | 9 | def _make_replica_execution_function(model, mode):
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _make_replica_execution_function | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | distributed_training_utils_v1.py | 10 | 11 | https://github.com/keras-team/keras.git | 4 | 60 | 0 | 14 | 57 | Python | {
"docstring": "A single step of the distributed execution on a replica.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def _make_replica_execution_function(model, mode):
if mode == ModeKeys.TRAIN:
func = model.train_on_batch
elif mode == ModeKeys.TEST:
func = model.test_on_batch
else:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.