ast_errors stringlengths 0 3.2k | d_id int64 44 121k | id int64 70 338k | n_whitespaces int64 3 14k | path stringlengths 8 134 | n_words int64 4 4.82k | n_identifiers int64 1 131 | random_cut stringlengths 16 15.8k | commit_message stringlengths 2 15.3k | fun_name stringlengths 1 84 | commit_id stringlengths 40 40 | repo stringlengths 3 28 | file_name stringlengths 5 79 | ast_levels int64 6 31 | nloc int64 1 548 | url stringlengths 31 59 | complexity int64 1 66 | token_counts int64 6 2.13k | n_ast_errors int64 0 28 | vocab_size int64 4 1.11k | n_ast_nodes int64 15 19.2k | language stringclasses 1
value | documentation dict | code stringlengths 101 62.2k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
38,865 | 161,042 | 50 | ppg2mel/train/solver.py | 13 | 13 | def progress(self, msg):
if self.paras.verbose:
sys.stdout.write("\033[K") # Clear line
print('[{}] {}'.format(human | Init ppg extractor and ppg2mel (#375)
* Init ppg extractor and ppg2mel
* add preprocess and training
* FIx known issues
* Update __init__.py
Allow to gen audio
* Fix length issue
* Fix bug of preparing fid
* Fix sample issues
* Add UI usage of PPG-vc | progress | b617a87ee40ab384767a27335313c2c65ee094ec | MockingBird | solver.py | 14 | 4 | https://github.com/babysor/MockingBird.git | 2 | 43 | 0 | 13 | 78 | Python | {
"docstring": " Verbose function for updating progress on stdout (do not include newline) ",
"language": "en",
"n_whitespaces": 12,
"n_words": 11,
"vocab_size": 11
} | def progress(self, msg):
if self.paras.verbose:
sys.stdout.write("\033[K") # Clear line
print('[{}] {}'.format(human_format(self.step), msg), end='\r')
| |
56,870 | 223,317 | 150 | python3.10.4/Lib/distutils/tests/test_sysconfig.py | 46 | 24 | def test_customize_compiler_before_get_config_vars(self):
# Issue #21923: test that a Distribution compiler
# instance can be called without an explicit call to
# get_config_vars().
with open(TESTFN, 'w') as f:
f.writelines(textwrap.dedent())
p = subprocess.Popen | add python 3.10.4 for windows | test_customize_compiler_before_get_config_vars | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | test_sysconfig.py | 12 | 15 | https://github.com/XX-net/XX-Net.git | 1 | 82 | 0 | 43 | 137 | Python | {
"docstring": "\\\n from distutils.core import Distribution\n config = Distribution().get_command_obj('config')\n # try_compile may pass or it may fail if no compiler\n # is found but it should not raise an exception.\n rc = config.try_compil... | def test_customize_compiler_before_get_config_vars(self):
# Issue #21923: test that a Distribution compiler
# instance can be called without an explicit call to
# get_config_vars().
with open(TESTFN, 'w') as f:
f.writelines(textwrap.dedent())
p = subprocess.Popen([str... | |
@pytest.fixture | 112,597 | 313,986 | 106 | tests/components/zha/test_siren.py | 15 | 10 | def siren_platform_only():
with patch(
"homeassistant.components.zha.PLATFORMS",
(
Platform.DEVICE_TRACKER,
Platform.NUMBER,
Platform.SENSOR,
Platform.SELECT,
| Speed up zha tests (#73627) | siren_platform_only | 4bc5d7bfed07c20d6f3438ab91c734a620505a33 | core | test_siren.py | 11 | 12 | https://github.com/home-assistant/core.git | 1 | 36 | 1 | 15 | 66 | Python | {
"docstring": "Only setup the siren and required base platforms to speed up tests.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def siren_platform_only():
with patch(
"homeassistant.components.zha.PLATFORMS",
(
Platform.DEVICE_TRACKER,
Platform.NUMBER,
Platform.SENSOR,
Platform.SELECT,
Platform.SIREN,
),
):
yield
@pytest.fixture |
72,113 | 248,119 | 191 | tests/server_notices/test_resource_limits_server_notices.py | 33 | 28 | def test_maybe_send_server_notice_when_alerting_suppressed_room_blocked(self):
self._rlsn._auth.check_auth_blocking = Mock(
return_va | Prefer `make_awaitable` over `defer.succeed` in tests (#12505)
When configuring the return values of mocks, prefer awaitables from
`make_awaitable` over `defer.succeed`. `Deferred`s are only awaitable
once, so it is inappropriate for a mock to return the same `Deferred`
multiple times.
Also update `run_in_backgr... | test_maybe_send_server_notice_when_alerting_suppressed_room_blocked | 78b99de7c206b106340e12cdee0af9aa246bd5ad | synapse | test_resource_limits_server_notices.py | 14 | 18 | https://github.com/matrix-org/synapse.git | 1 | 122 | 0 | 24 | 197 | Python | {
"docstring": "\n When the room is already in a blocked state, test that when alerting\n is suppressed that the room is returned to an unblocked state.\n ",
"language": "en",
"n_whitespaces": 46,
"n_words": 24,
"vocab_size": 19
} | def test_maybe_send_server_notice_when_alerting_suppressed_room_blocked(self):
self._rlsn._auth.check_auth_blocking = Mock(
return_value=make_awaitable(None),
side_effect=ResourceLimitError(
403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER
... | |
53,786 | 215,066 | 212 | tests/pytests/integration/modules/test_event.py | 79 | 25 | def test_send(event_listener, salt_master, salt_minion, salt_call_cli):
event_tag = random_string("salt/test/event/")
data = {"event.fire": "just test it!!!!"}
start_time = time.time()
ret = salt_call_cli.run(
"event.send",
event_tag,
data=data,
with_grains=True,
... | Fix salt-call event.send call with grains and pillar | test_send | 374723c3abedee9ea5a399b566b460497b3885f6 | salt | test_event.py | 12 | 27 | https://github.com/saltstack/salt.git | 2 | 183 | 0 | 57 | 309 | Python | {
"docstring": "\n Test sending an event to the master event bus\n ",
"language": "en",
"n_whitespaces": 16,
"n_words": 9,
"vocab_size": 8
} | def test_send(event_listener, salt_master, salt_minion, salt_call_cli):
event_tag = random_string("salt/test/event/")
data = {"event.fire": "just test it!!!!"}
start_time = time.time()
ret = salt_call_cli.run(
"event.send",
event_tag,
data=data,
with_grains=True,
... | |
26,740 | 119,997 | 79 | jax/_src/lax/control_flow.py | 41 | 15 | def _check_tree_and_avals(what, tree1, avals1, tree2, avals2):
if tree1 != tree2:
raise TypeError(
f"{what} must have same type structure, got {tree1} and {tree2}.")
if not all(_map(core.typematch, avals1, avals2)):
diff = tree_map(_show_diff, tree_unflatten(tree1, avals1),
tr... | Deprecate jax.tree_util.tree_multimap | _check_tree_and_avals | df1ceaeeb11efc7c5af1ad2dd102857128c23b26 | jax | control_flow.py | 12 | 8 | https://github.com/google/jax.git | 3 | 67 | 0 | 36 | 122 | Python | {
"docstring": "Raises TypeError if (tree1, avals1) does not match (tree2, avals2).\n\n Corresponding `tree` and `avals` must match in the sense that the number of\n leaves in `tree` must be equal to the length of `avals`. `what` will be\n prepended to details of the mismatch in TypeError.\n ",
"language": "en"... | def _check_tree_and_avals(what, tree1, avals1, tree2, avals2):
if tree1 != tree2:
raise TypeError(
f"{what} must have same type structure, got {tree1} and {tree2}.")
if not all(_map(core.typematch, avals1, avals2)):
diff = tree_map(_show_diff, tree_unflatten(tree1, avals1),
tr... | |
26,359 | 118,684 | 257 | lib/tests/streamlit/config_test.py | 70 | 26 | def test_load_global_local_flag_config(self):
global_config =
local_config =
global_config_path = "/mock/home/folder/.streamlit/config.toml"
local_config_path = os.path.join(os.getcwd(), ".streamlit/config.toml")
global_open = mock_open(read_data=global_config)
... | Report sharing removal (#4260)
The report sharing feature is a substantial but completely unused portion of the code in Streamlit's underlying machinery. The feature was created early on, used by just a few groups, and has not been used by anyone for a while, as indicated by no activity in the associated S3 buckets. T... | test_load_global_local_flag_config | dd9084523e365e637443ea351eaaaa25f52d8412 | streamlit | config_test.py | 13 | 31 | https://github.com/streamlit/streamlit.git | 1 | 163 | 0 | 58 | 292 | Python | {
"docstring": "Test that CLI flags have higher priority than both\n ~/.streamlit/config.toml and $CWD/.streamlit/config.toml at parse time.\n \n [theme]\n base = \"dark\"\n font = \"sans serif\"\n textColor = \"#FFFFFF\"\n \n [theme]\n base = \"light\"\n... | def test_load_global_local_flag_config(self):
global_config =
local_config =
global_config_path = "/mock/home/folder/.streamlit/config.toml"
local_config_path = os.path.join(os.getcwd(), ".streamlit/config.toml")
global_open = mock_open(read_data=global_config)
... | |
@keep_lazy_text | 51,670 | 206,738 | 52 | django/utils/text.py | 31 | 10 | def get_valid_filename(name):
s = str(name).strip().replace(" ", "_")
s = re.sub(r"(?u)[^-\w.]", "", s)
if s in {"", ".", ".."}:
raise SuspiciousFileOperation("Could not derive file name from '%s'" % name)
return s
@keep_lazy_text | Refs #33476 -- Reformatted code with Black. | get_valid_filename | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | text.py | 12 | 6 | https://github.com/django/django.git | 2 | 56 | 1 | 27 | 106 | Python | {
"docstring": "\n Return the given string converted to a string that can be used for a clean\n filename. Remove leading and trailing spaces; convert other spaces to\n underscores; and remove anything that is not an alphanumeric, dash,\n underscore, or dot.\n >>> get_valid_filename(\"john's portrait in... | def get_valid_filename(name):
s = str(name).strip().replace(" ", "_")
s = re.sub(r"(?u)[^-\w.]", "", s)
if s in {"", ".", ".."}:
raise SuspiciousFileOperation("Could not derive file name from '%s'" % name)
return s
@keep_lazy_text |
51,826 | 206,991 | 159 | tests/admin_changelist/tests.py | 47 | 33 | def test_no_duplicates_for_m2m_in_list_filter(self):
blues = Genre.objects.create(name="Blues")
band = Band.objects.create(name="B.B. King Review", nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, custom_site)
request = self.f... | Refs #33476 -- Reformatted code with Black. | test_no_duplicates_for_m2m_in_list_filter | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 12 | 14 | https://github.com/django/django.git | 1 | 144 | 0 | 40 | 238 | Python | {
"docstring": "\n Regression test for #13902: When using a ManyToMany in list_filter,\n results shouldn't appear more than once. Basic ManyToMany.\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 18,
"vocab_size": 18
} | def test_no_duplicates_for_m2m_in_list_filter(self):
blues = Genre.objects.create(name="Blues")
band = Band.objects.create(name="B.B. King Review", nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, custom_site)
request = self.f... | |
40,126 | 167,798 | 116 | pandas/core/ops/methods.py | 45 | 11 | def add_flex_arithmetic_methods(cls) -> None:
flex_arith_method, flex_comp_method = _get_method_wrappers(cls)
new_methods = _create_methods(cls, flex_arith_method, flex_comp_method)
new_methods.update(
{
"multiply": new_methods["mul"],
"subtract": new_methods["sub"],
... | TYP: more return annotations in core/ (#47618)
* TYP: more return annotations in core/
* from __future__ import annotations
* more __future__ | add_flex_arithmetic_methods | f65417656ba8c59438d832b6e2a431f78d40c21c | pandas | methods.py | 11 | 21 | https://github.com/pandas-dev/pandas.git | 2 | 80 | 0 | 40 | 138 | Python | {
"docstring": "\n Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)\n to the class.\n\n Parameters\n ----------\n cls : class\n flex methods will be defined and pinned to this class\n ",
"language": "en",
"n_whitespaces": 55,
"n_words": 29,
"vocab_size": 24
} | def add_flex_arithmetic_methods(cls) -> None:
flex_arith_method, flex_comp_method = _get_method_wrappers(cls)
new_methods = _create_methods(cls, flex_arith_method, flex_comp_method)
new_methods.update(
{
"multiply": new_methods["mul"],
"subtract": new_methods["sub"],
... | |
52,889 | 210,309 | 83 | deploy/python/action_utils.py | 19 | 9 | def get_collected_keypoint(self):
output = []
for tracker_id in self.id_to_pop:
output.append([tracker_id, self.keypoint_saver[tracker_id]])
del (self.keypoint_saver[tracker_id])
self.flag_to_pop = False
self.id_to | Pipeline with kpt and act (#5399)
* add keypoint infer and visualize into Pipeline
* add independent action model inference
* add action inference into pipeline, still in working
* test different display frames and normalization methods
* use bbox and scale normalization
* Remove debug info and Optimize... | get_collected_keypoint | 7018dad10757b6d414f1b00a547244bced596d68 | PaddleDetection | action_utils.py | 12 | 8 | https://github.com/PaddlePaddle/PaddleDetection.git | 2 | 55 | 0 | 17 | 88 | Python | {
"docstring": "\n Output (List): List of keypoint results for Action Recognition task, where \n the format of each element is [tracker_id, KeyPointSequence of tracker_id]\n ",
"language": "en",
"n_whitespaces": 67,
"n_words": 21,
"vocab_size": 19
} | def get_collected_keypoint(self):
output = []
for tracker_id in self.id_to_pop:
output.append([tracker_id, self.keypoint_saver[tracker_id]])
del (self.keypoint_saver[tracker_id])
self.flag_to_pop = False
self.id_to_pop.clear()
return output
| |
@frappe.whitelist() | 13,546 | 63,987 | 19 | erpnext/education/api.py | 28 | 16 | def get_course_schedule_events(start, end, filters=None):
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Course Schedule", filters)
data = frappe.db.sql(.format(conditions=conditions), {
"start": start,
| fix: from time and to time not updated in drag and drop action #29114
fix: from time and to time not updated in drag and drop action | get_course_schedule_events | 8b5827ed6db1041526b6440ca8e4fde19c646e1e | erpnext | api.py | 12 | 14 | https://github.com/frappe/erpnext.git | 1 | 69 | 1 | 26 | 123 | Python | {
"docstring": "Returns events for Course Schedule Calendar view rendering.\n\n\t:param start: Start date-time.\n\t:param end: End date-time.\n\t:param filters: Filters (JSON).\n\tselect name, course, color,\n\t\t\ttimestamp(schedule_date, from_time) as from_time,\n\t\t\ttimestamp(schedule_date, to_time) as to_time,\... | def get_course_schedule_events(start, end, filters=None):
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Course Schedule", filters)
data = frappe.db.sql(.format(conditions=conditions), {
"start": start,
"end": end
}, as_dict=True, update={"allDay": 0})
return d... |
40,008 | 167,425 | 64 | pandas/io/json/_json.py | 21 | 11 | def check_keys_split(self, decoded) -> None:
bad_keys = set(decoded.keys()).difference(set(self._split_ | TYP: Return annotations for io/{formats,json} (#47516)
* TYP: Return annotations for io/{formats,json}
* flake8
* explicitly check whether width is None | check_keys_split | 734db4f1fde2566a02b3c7ff661a479b0a71633c | pandas | _json.py | 12 | 8 | https://github.com/pandas-dev/pandas.git | 2 | 47 | 0 | 20 | 85 | Python | {
"docstring": "\n Checks that dict has only the appropriate keys for orient='split'.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | def check_keys_split(self, decoded) -> None:
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys_joined = ", ".join(bad_keys)
raise ValueError(f"JSON data had unexpected key(s): {bad_keys_joined}")
| |
51,013 | 205,092 | 210 | django/db/backends/oracle/operations.py | 53 | 13 | def adapt_datetimefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value | Refs #33476 -- Reformatted code with Black. | adapt_datetimefield_value | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | operations.py | 14 | 13 | https://github.com/django/django.git | 5 | 66 | 0 | 42 | 112 | Python | {
"docstring": "\n Transform a datetime value to an object compatible with what is expected\n by the backend driver for datetime columns.\n\n If naive datetime is passed assumes that is in UTC. Normally Django\n models.DateTimeField makes sure that if USE_TZ is True passed datetime\n ... | def adapt_datetimefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, "resolve_expression"):
return value
# cx_Oracle doesn't support tz-aware datetimes
if timezone.is_aware... | |
54,764 | 217,412 | 493 | python3.10.4/Lib/fractions.py | 67 | 14 | def __pow__(a, b):
if isinstance(b, numbers.Rational):
if b.denominator == 1:
power = b.numerator
if power >= 0:
return Fraction(a._numerator ** power,
a._denominator ** power,
| add python 3.10.4 for windows | __pow__ | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | fractions.py | 18 | 20 | https://github.com/XX-net/XX-Net.git | 5 | 132 | 0 | 39 | 208 | Python | {
"docstring": "a ** b\n\n If b is not an integer, the result will be a float or complex\n since roots are generally irrational. If b is an integer, the\n result will be rational.\n\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 32,
"vocab_size": 21
} | def __pow__(a, b):
if isinstance(b, numbers.Rational):
if b.denominator == 1:
power = b.numerator
if power >= 0:
return Fraction(a._numerator ** power,
a._denominator ** power,
... | |
76,483 | 260,777 | 39 | sklearn/linear_model/_ridge.py | 11 | 7 | def fit(self, X, y, sample_weight=None):
self._validate_params()
super().fit(X, y, sa | MAINT Parameters validation for RidgeCV and RidgeClassifierCV (#24184)
Co-authored-by: jeremie du boisberranger <jeremiedbb@yahoo.fr>
Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com> | fit | d593606a8267a325d98b1e9a57de6b7b87442f55 | scikit-learn | _ridge.py | 9 | 4 | https://github.com/scikit-learn/scikit-learn.git | 1 | 35 | 0 | 10 | 55 | Python | {
"docstring": "Fit Ridge regression model with cv.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Training data. If using GCV, will be cast to float64\n if necessary.\n\n y : ndarray of shape (n_samples,) or (n_samples, n_targets)\n ... | def fit(self, X, y, sample_weight=None):
self._validate_params()
super().fit(X, y, sample_weight=sample_weight)
return self
| |
2,943 | 19,355 | 127 | PathPlanning/CubicSpline/cubic_spline_planner.py | 45 | 11 | def calc_position(self, x):
if x < self.x[0]:
return None
elif x > self.x[-1]:
| enhance cubic spline path doc (#698)
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path d... | calc_position | def289b723e9216830c2a7b2577cb31b55710167 | PythonRobotics | cubic_spline_planner.py | 12 | 10 | https://github.com/AtsushiSakai/PythonRobotics.git | 3 | 97 | 0 | 29 | 141 | Python | {
"docstring": "\n Calc `y` position for given `x`.\n\n if `x` is outside the data point's `x` range, return None.\n\n Returns\n -------\n y : float\n y position for given x.\n ",
"language": "en",
"n_whitespaces": 81,
"n_words": 27,
"vocab_size": 22
} | def calc_position(self, x):
if x < self.x[0]:
return None
elif x > self.x[-1]:
return None
i = self.__search_index(x)
dx = x - self.x[i]
position = self.a[i] + self.b[i] * dx + \
self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0
... | |
85,928 | 286,612 | 373 | openbb_terminal/portfolio/attribution_model.py | 133 | 14 | def get_daily_sector_prices(start_date, end_date) -> dict:
# sector ticker information
sp500_tickers = {
"S&P 500 Materials (Sector)": "^SP500-15",
"S&P 500 Industrials (Sector)": "^SP500-20",
"S&P 500 Consumer Discretionary (Sector)": "^SP500-25",
"S&P 500 Consumer Staples ... | [IMPROVE] Fix Docstring formatting/Fix missing, incomplete type hints (#3412)
* Fixes
* Update stocks_helper.py
* update git-actions set-output to new format
* Update stocks_helper.py
* Update terminal_helper.py
* removed LineAnnotateDrawer from qa_view
* lint
* few changes
* updates
* sdk a... | get_daily_sector_prices | 59d8b36bb0467a1a99513b10e8b8471afaa56fd6 | OpenBBTerminal | attribution_model.py | 14 | 45 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 2 | 109 | 0 | 82 | 207 | Python | {
"docstring": "\n fetches daily sector prices for S&P500 for a fixed time period\n\n Parameters\n ----------\n start_date : str ('yyyy-mm-dd') or datetime.date\n start date for fetching data\n end_date : str ('yyyy-mm-dd') or datetime.date\n end date for fetching data\n\n Returns\n ... | def get_daily_sector_prices(start_date, end_date) -> dict:
# sector ticker information
sp500_tickers = {
"S&P 500 Materials (Sector)": "^SP500-15",
"S&P 500 Industrials (Sector)": "^SP500-20",
"S&P 500 Consumer Discretionary (Sector)": "^SP500-25",
"S&P 500 Consumer Staples ... | |
46,162 | 189,675 | 123 | manim/mobject/geometry/arc.py | 38 | 17 | def get_unpositioned_tip(self, tip_shape=None, tip_length=None):
from manim.mobject.geometry.tips import ArrowTriangleFilledTip
if tip_shape is None:
tip_shape = ArrowTriangleFilledTip
if tip_length is None:
tip_length = self.get_default_tip_length()
col... | Improved structure of the :mod:`.mobject` module (#2476)
* group graphing and update its references
* group text and update its references
* group opengl and update its references
* group three_d and update its references
* group geometry and update (most) references
* move some chaning.py + updater fil... | get_unpositioned_tip | e040bcacd38378386749db18aeba575b93f4ebca | manim | arc.py | 10 | 11 | https://github.com/ManimCommunity/manim.git | 3 | 83 | 0 | 27 | 134 | Python | {
"docstring": "\n Returns a tip that has been stylistically configured,\n but has not yet been given a position in space.\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 18,
"vocab_size": 15
} | def get_unpositioned_tip(self, tip_shape=None, tip_length=None):
from manim.mobject.geometry.tips import ArrowTriangleFilledTip
if tip_shape is None:
tip_shape = ArrowTriangleFilledTip
if tip_length is None:
tip_length = self.get_default_tip_length()
col... | |
92,372 | 293,309 | 84 | tests/components/todoist/test_calendar.py | 34 | 10 | def test_parse_due_date_without_timezone_uses_offset():
data: DueDate = {
"date": "2022-02-02T14:00:00",
"is_recurring": False,
"lang": "en",
"string": "Feb 2 2:00 PM",
"timezone": None,
}
actual = _parse_due_date(data, timezone_offset=-8)
assert dat | Fix todoist parsing due dates for calendar events (#65403) | test_parse_due_date_without_timezone_uses_offset | d302b0d14e9df9cc46e7e035a0d2be5290182b40 | core | test_calendar.py | 10 | 10 | https://github.com/home-assistant/core.git | 1 | 65 | 0 | 30 | 109 | Python | {
"docstring": "Test due date uses user local timezone offset when it has no timezone.",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | def test_parse_due_date_without_timezone_uses_offset():
data: DueDate = {
"date": "2022-02-02T14:00:00",
"is_recurring": False,
"lang": "en",
"string": "Feb 2 2:00 PM",
"timezone": None,
}
actual = _parse_due_date(data, timezone_offset=-8)
assert datetime(202... | |
@add_start_docstrings(
"""TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
HUBERT_START_DOCSTRING,
) | 5,924 | 32,426 | 69 | src/transformers/models/hubert/modeling_tf_hubert.py | 27 | 14 | def serving_output(self, output):
hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states e | Update serving code to enable `saved_model=True` (#18153)
* Add serving_output and serving methods to some vision models
* Add serving outputs for DeiT
* Don't convert hidden states - differing shapes
* Make saveable
* Fix up
* Make swin saveable
* Add in tests
* Fix funnel tests (can't convert to... | serving_output | 8e8384663d716d4b5a4f510070ff954fc0ba4a52 | transformers | modeling_tf_hubert.py | 10 | 6 | https://github.com/huggingface/transformers.git | 3 | 60 | 1 | 22 | 103 | Python | {
"docstring": "TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 14
} | def serving_output(self, output):
hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(
last_hidden_state=outpu... |
75,671 | 259,240 | 256 | sklearn/utils/_encode.py | 111 | 17 | def _unique_np(values, return_inverse=False, return_counts=False):
uniques = np.unique(
values, return_inverse=return_inverse, return_counts=return_counts
)
inverse, counts = None, None
if return_counts:
*uniques, counts = uniques
if return_inverse:
*uniques, inverse ... | ENH Adds infrequent categories to OneHotEncoder (#16018)
* ENH Completely adds infrequent categories
* STY Linting
* STY Linting
* DOC Improves wording
* DOC Lint
* BUG Fixes
* CLN Address comments
* CLN Address comments
* DOC Uses math to description float min_frequency
* DOC Adds comment r... | _unique_np | 7f0006c8aad1a09621ad19c3db19c3ff0555a183 | scikit-learn | _encode.py | 14 | 25 | https://github.com/scikit-learn/scikit-learn.git | 12 | 177 | 0 | 68 | 276 | Python | {
"docstring": "Helper function to find unique values for numpy arrays that correctly\n accounts for nans. See `_unique` documentation for details.",
"language": "en",
"n_whitespaces": 21,
"n_words": 19,
"vocab_size": 17
} | def _unique_np(values, return_inverse=False, return_counts=False):
uniques = np.unique(
values, return_inverse=return_inverse, return_counts=return_counts
)
inverse, counts = None, None
if return_counts:
*uniques, counts = uniques
if return_inverse:
*uniques, inverse ... | |
100,006 | 301,158 | 27 | homeassistant/components/logbook/processor.py | 6 | 6 | def switch_to_live(self) -> None:
self.logbook_run.event_cache.clear()
self.logbook_run.context_lookup.clear()
| Add live streaming logbook websocket endpoint (#72258)
Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io> | switch_to_live | 9c3f9491651f409e8b4d0d645115b55b14f06165 | core | processor.py | 9 | 7 | https://github.com/home-assistant/core.git | 1 | 26 | 0 | 6 | 46 | Python | {
"docstring": "Switch to live stream.\n\n Clear caches so we can reduce memory pressure.\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 12,
"vocab_size": 12
} | def switch_to_live(self) -> None:
self.logbook_run.event_cache.clear()
self.logbook_run.context_lookup.clear()
| |
121,062 | 337,460 | 13 | src/accelerate/test_utils/testing.py | 7 | 5 | def require_comet_ml(test_case):
retu | Clean up tests + fix import (#330) | require_comet_ml | e5c17f36a8b5bf8b9478d416c4a80841a353fb19 | accelerate | testing.py | 10 | 2 | https://github.com/huggingface/accelerate.git | 1 | 20 | 0 | 7 | 37 | Python | {
"docstring": "\n Decorator marking a test that requires comet_ml installed. These tests are skipped when comet_ml isn't installed\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 16,
"vocab_size": 15
} | def require_comet_ml(test_case):
return unittest.skipUnless(is_comet_ml_available(), "test requires comet_ml")(test_case)
| |
7,303 | 39,972 | 65 | dash/_callback_context.py | 15 | 7 | def triggered_id(self):
component_id = None
if self.triggered:
prop_id = self.triggered_prop_ids.first()
| added docstrings | triggered_id | 67f56d09d70e77701d2ae9a002aa330202da118b | dash | _callback_context.py | 11 | 6 | https://github.com/plotly/dash.git | 2 | 33 | 0 | 11 | 56 | Python | {
"docstring": "\n Returns the component id (str or dict) of the Input component that triggered the callback.\n\n Note - use `triggered_prop_ids` if you need both the component id and the prop that triggered the callback or if\n multiple Inputs triggered the callback.\n\n Example usage:\n ... | def triggered_id(self):
component_id = None
if self.triggered:
prop_id = self.triggered_prop_ids.first()
component_id = self.triggered_prop_ids[prop_id]
return component_id
| |
75,878 | 259,703 | 228 | sklearn/decomposition/_nmf.py | 80 | 28 | def _solve_W(self, X, H, max_iter):
avg = np.sqrt(X.mean() / self._n_components)
W = np.full((X.shape[0], self._n_components), avg | FEA Online implementation of non-negative matrix factorization (#16948)
Co-authored-by: Tom Dupré la Tour <tom.dupre-la-tour@m4x.org>
Co-authored-by: jeremie du boisberranger <jeremiedbb@yahoo.fr>
Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com>
Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@use... | _solve_W | 69132ebbd39f070590ca01813340b5b12c0d02ab | scikit-learn | _nmf.py | 12 | 14 | https://github.com/scikit-learn/scikit-learn.git | 4 | 148 | 0 | 62 | 224 | Python | {
"docstring": "Minimize the objective function w.r.t W.\n\n Update W with H being fixed, until convergence. This is the heart\n of `transform` but it's also used during `fit` when doing fresh restarts.\n ",
"language": "en",
"n_whitespaces": 51,
"n_words": 30,
"vocab_size": 29
} | def _solve_W(self, X, H, max_iter):
avg = np.sqrt(X.mean() / self._n_components)
W = np.full((X.shape[0], self._n_components), avg, dtype=X.dtype)
W_buffer = W.copy()
# Get scaled regularization terms. Done for each minibatch to take into account
# variable sizes of min... | |
53,006 | 211,032 | 74 | deploy/pptracking/python/mot/tracker/ocsort_tracker.py | 49 | 12 | def convert_bbox_to_z(bbox):
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
x = bbox[0] + w / 2.
y = bbox[1] + h / 2.
s = w * h # scale is just area
| [MOT] Add OC_SORT tracker (#6272)
* add ocsort tracker
* add ocsort deploy
* merge develop
* fix ocsort tracker codes
* fix doc, test=document_fix
* fix doc, test=document_fix | convert_bbox_to_z | c84153a355d9855fe55cf51d203b8b24e7d884e5 | PaddleDetection | ocsort_tracker.py | 10 | 8 | https://github.com/PaddlePaddle/PaddleDetection.git | 1 | 91 | 0 | 31 | 133 | Python | {
"docstring": "\n Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form\n [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is\n the aspect ratio\n ",
"language": "en",
"n_whitespaces": 51,
"n_words": 34,
"vocab_size": 22
} | def convert_bbox_to_z(bbox):
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
x = bbox[0] + w / 2.
y = bbox[1] + h / 2.
s = w * h # scale is just area
r = w / float(h + 1e-6)
return np.array([x, y, s, r]).reshape((4, 1))
| |
4,786 | 24,770 | 126 | ppstructure/recovery/table_process.py | 82 | 6 | def remove_whitespace(string, leading=False, trailing=False):
# Remove any leading new line characters along with any surrounding white space
if leading:
string = re.sub(r'^\s*\n+\s*', '', string)
# Remove an | update recovery (#7259)
* update recovery
* update recovery
* update recovery
* update recovery
* update recovery | remove_whitespace | b7d99acd2e06945c789312cda70d60b7c8a5b0d0 | PaddleOCR | table_process.py | 11 | 7 | https://github.com/PaddlePaddle/PaddleOCR.git | 3 | 71 | 0 | 50 | 136 | Python | {
"docstring": "Remove white space from a string.\n Args:\n string(str): The string to remove white space from.\n leading(bool, optional): Remove leading new lines when True.\n trailing(bool, optional): Remove trailing new lines when False.\n Returns:\n str: The input string with new... | def remove_whitespace(string, leading=False, trailing=False):
# Remove any leading new line characters along with any surrounding white space
if leading:
string = re.sub(r'^\s*\n+\s*', '', string)
# Remove any trailing new line characters along with any surrounding white space
if trailing:... | |
117,347 | 320,780 | 92 | qutebrowser/completion/completionwidget.py | 21 | 12 | def selectionChanged(self, selected, deselected):
if not self._active:
return
super().selectionChanged(selected, deselected)
| mypy: Upgrade to PyQt5-stubs 5.15.6.0
For some unknown reason, those new stubs cause a *lot* of things now to be
checked by mypy which formerly probably got skipped due to Any being implied
somewhere.
The stubs themselves mainly improved, with a couple of regressions too.
In total, there were some 337 (!) new mypy e... | selectionChanged | a20bb67a878b2e68abf8268c1b0a27f018d01352 | qutebrowser | completionwidget.py | 12 | 9 | https://github.com/qutebrowser/qutebrowser.git | 3 | 65 | 0 | 17 | 108 | Python | {
"docstring": "Extend selectionChanged to call completers selection_changed.",
"language": "en",
"n_whitespaces": 5,
"n_words": 6,
"vocab_size": 6
} | def selectionChanged(self, selected, deselected):
if not self._active:
return
super().selectionChanged(selected, deselected)
indexes = selected.indexes()
if not indexes:
return
data = str(self._model().data(indexes[0]))
self.selection_chan... | |
48,862 | 198,300 | 72 | sympy/physics/vector/vector.py | 26 | 10 | def __mul__(self, other):
newlist = [v for v in self.args]
other = sympif | Use sympify less | __mul__ | 2a1afca9477eb781f16d5d6b63fa37abed7740a3 | sympy | vector.py | 12 | 6 | https://github.com/sympy/sympy.git | 3 | 64 | 0 | 21 | 97 | Python | {
"docstring": "Multiplies the Vector by a sympifyable expression.\n\n Parameters\n ==========\n\n other : Sympifyable\n The scalar to multiply this Vector with\n\n Examples\n ========\n\n >>> from sympy.physics.vector import ReferenceFrame\n >>> from sympy ... | def __mul__(self, other):
newlist = [v for v in self.args]
other = sympify(other)
for i, v in enumerate(newlist):
newlist[i] = (other * newlist[i][0], newlist[i][1])
return Vector(newlist)
| |
33,631 | 146,201 | 20 | python/ray/serve/application.py | 6 | 7 | def to_dict(self) -> Dict:
return serve_application_to_schema(self._deployments.values()).dict()
| [serve] Implement Serve Application object (#22917)
The concept of a Serve Application, a data structure containing all information needed to deploy Serve on a Ray cluster, has surfaced during recent design discussions. This change introduces a formal Application data structure and refactors existing code to use it. | to_dict | 1100c982223757f697a410a0d0c3d8bf3ff9c805 | ray | application.py | 12 | 10 | https://github.com/ray-project/ray.git | 1 | 23 | 0 | 6 | 41 | Python | {
"docstring": "Returns this Application's deployments as a dictionary.\n\n This dictionary adheres to the Serve REST API schema. It can be deployed\n via the Serve REST API.\n\n Returns:\n Dict: The Application's deployments formatted in a dictionary.\n ",
"language": "en",
... | def to_dict(self) -> Dict:
return serve_application_to_schema(self._deployments.values()).dict()
| |
14,740 | 68,207 | 117 | erpnext/hr/doctype/shift_assignment/shift_assignment.py | 149 | 25 | def get_shift_details(shift_type_name, for_timestamp=None):
if not shift_type_name:
return None
if not for_timestamp:
for_timestamp = now_datetime()
shift_type = frappe.get_doc('Shift Type', shift_type_name)
shift_actual_start = shift_type.start_time - timedelta(minutes=shift_type.begin_check_in_before_shift... | refactor: handle shifts spanning over 2 different days | get_shift_details | 62e72752dce92792166f9b734c2306adb4b41147 | erpnext | shift_assignment.py | 17 | 28 | https://github.com/frappe/erpnext.git | 6 | 282 | 0 | 75 | 460 | Python | {
"docstring": "Returns Shift Details which contain some additional information as described below.\n\t'shift_details' contains the following keys:\n\t 'shift_type' - Object of DocType Shift Type,\n\t 'start_datetime' - Date and Time of shift start on given date,\n\t 'end_datetime' - Date and Tim... | def get_shift_details(shift_type_name, for_timestamp=None):
if not shift_type_name:
return None
if not for_timestamp:
for_timestamp = now_datetime()
shift_type = frappe.get_doc('Shift Type', shift_type_name)
shift_actual_start = shift_type.start_time - timedelta(minutes=shift_type.begin_check_in_before_shift... | |
23,212 | 108,485 | 116 | lib/matplotlib/testing/__init__.py | 35 | 22 | def subprocess_run_helper(func, *args, timeout, extra_env=None):
target = func.__name__
module = func.__module__
proc = subprocess.run(
[sys.executable,
"-c",
f"from {module} import {target}; {target}()",
*args],
env={**os.environ, "SOURCE_DATE_EPOCH": "0", **... | Tweak subprocess_run_helper.
On general grounds, an API like
`subprocess_run_helper(func, *args, timeout, **extra_env)`
is problematic because it prevents one from passing an environment
variable called "timeout".
Instead, pass the extra environment variables as a dict, without
unpacking.
(Technically this has been ... | subprocess_run_helper | 031093e6f05496f55616a1fa2f39e573fea02828 | matplotlib | __init__.py | 14 | 14 | https://github.com/matplotlib/matplotlib.git | 2 | 92 | 0 | 32 | 151 | Python | {
"docstring": "\n Run a function in a sub-process.\n\n Parameters\n ----------\n func : function\n The function to be run. It must be in a module that is importable.\n *args : str\n Any additional command line arguments to be passed in\n the first argument to ``subprocess.run``.\... | def subprocess_run_helper(func, *args, timeout, extra_env=None):
target = func.__name__
module = func.__module__
proc = subprocess.run(
[sys.executable,
"-c",
f"from {module} import {target}; {target}()",
*args],
env={**os.environ, "SOURCE_DATE_EPOCH": "0", **... | |
26,297 | 118,567 | 29 | lib/streamlit/server/server.py | 8 | 7 | def add_preheated_app_session(self) -> None:
session = self._create_or_reuse_app_session(ws=None)
session.handle_rerun_script_request(is_preheat=True)
| Rename and refactor `Report` machinery (#4141)
This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app". | add_preheated_app_session | 704eab3478cf69847825b23dabf15813a8ac9fa2 | streamlit | server.py | 9 | 8 | https://github.com/streamlit/streamlit.git | 1 | 26 | 0 | 8 | 45 | Python | {
"docstring": "Register a fake browser with the server and run the script.\n\n This is used to start running the user's script even before the first\n browser connects.\n ",
"language": "en",
"n_whitespaces": 47,
"n_words": 26,
"vocab_size": 22
} | def add_preheated_app_session(self) -> None:
session = self._create_or_reuse_app_session(ws=None)
session.handle_rerun_script_request(is_preheat=True)
| |
43,599 | 181,815 | 312 | tpot/base.py | 88 | 17 | def score(self, testing_features, testing_target):
if self.fitted_pipeline_ is None:
raise RuntimeError(
"A pipeline has not yet been optimized. Please call fit() first."
)
testing_feature | Revert "Deployed 7ccda9a with MkDocs version: 1.3.0"
This reverts commit bd9629c40e01241766197119b581a99409b07068. | score | 388616b6247ca4ea8de4e2f340d6206aee523541 | tpot | base.py | 11 | 22 | https://github.com/EpistasisLab/tpot.git | 4 | 105 | 0 | 64 | 168 | Python | {
"docstring": "Return the score on the given testing data using the user-specified scoring function.\n\n Parameters\n ----------\n testing_features: array-like {n_samples, n_features}\n Feature matrix of the testing set\n testing_target: array-like {n_samples}\n List... | def score(self, testing_features, testing_target):
if self.fitted_pipeline_ is None:
raise RuntimeError(
"A pipeline has not yet been optimized. Please call fit() first."
)
testing_features, testing_target = self._check_dataset(
testing_featu... | |
89,038 | 289,912 | 85 | tests/util/test_unit_system.py | 24 | 24 | def test_as_dict():
expected = {
LENGTH: UnitOfLength.KILOMETERS,
WIND_SPEED: UnitOfSpeed.METERS_PER_SECOND,
TEMPERATURE: UnitOfTemperature.CELSIUS,
VOLUME: UnitOfVolume.LITERS,
MASS: U | Use unit enums in unit utilities (#81030) | test_as_dict | 2a2e097e174204e3710161898b4302e1bceca1e5 | core | test_unit_system.py | 9 | 11 | https://github.com/home-assistant/core.git | 1 | 59 | 0 | 23 | 88 | Python | {
"docstring": "Test that the as_dict() method returns the expected dictionary.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 8
} | def test_as_dict():
expected = {
LENGTH: UnitOfLength.KILOMETERS,
WIND_SPEED: UnitOfSpeed.METERS_PER_SECOND,
TEMPERATURE: UnitOfTemperature.CELSIUS,
VOLUME: UnitOfVolume.LITERS,
MASS: UnitOfMass.GRAMS,
PRESSURE: UnitOfPressure.PA,
ACCUMULATED_PRECIPITATIO... | |
19,870 | 100,384 | 120 | plugins/train/model/_base.py | 32 | 13 | def _get_inputs(self):
logger.debug("Getting inputs")
if len(self.input_shape) == 3:
input_shapes = [self.input_shape, self.input_shape]
else:
input_shapes = self.in | Update code to support Tensorflow versions up to 2.8 (#1213)
* Update maximum tf version in setup + requirements
* - bump max version of tf version in launcher
- standardise tf version check
* update keras get_custom_objects for tf>2.6
* bugfix: force black text in GUI file dialogs (linux)
* dssim loss -... | _get_inputs | c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf | faceswap | _base.py | 12 | 10 | https://github.com/deepfakes/faceswap.git | 3 | 80 | 0 | 28 | 137 | Python | {
"docstring": " Obtain the standardized inputs for the model.\n\n The inputs will be returned for the \"A\" and \"B\" sides in the shape as defined by\n :attr:`input_shape`.\n\n Returns\n -------\n list\n A list of :class:`keras.layers.Input` tensors. This will be a list... | def _get_inputs(self):
logger.debug("Getting inputs")
if len(self.input_shape) == 3:
input_shapes = [self.input_shape, self.input_shape]
else:
input_shapes = self.input_shape
inputs = [Input(shape=shape, name=f"face_in_{side}")
for side,... | |
79,311 | 268,037 | 59 | test/lib/ansible_test/_internal/python_requirements.py | 33 | 15 | def collect_units_install() -> t.List[PipInstall]:
requirements_paths = [] # type: t.List[t.Tuple[str, str]]
constraints_paths = [] # type: t.List[t.Tuple[str, str]]
path = os.path.join(data_context().content.unit_path, 'requirements.txt')
requirements_paths.append((data_context().content.root, ... | ansible-test - Use more native type hints. (#78435)
* ansible-test - Use more native type hints.
Simple search and replace to switch from comments to native type hints for return types of functions with no arguments.
* ansible-test - Use more native type hints.
Conversion of simple single-line function annota... | collect_units_install | 3eb0485dd92c88cc92152d3656d94492db44b183 | ansible | python_requirements.py | 12 | 9 | https://github.com/ansible/ansible.git | 1 | 95 | 0 | 22 | 158 | Python | {
"docstring": "Return details necessary for the specified units pip install(s).",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def collect_units_install() -> t.List[PipInstall]:
requirements_paths = [] # type: t.List[t.Tuple[str, str]]
constraints_paths = [] # type: t.List[t.Tuple[str, str]]
path = os.path.join(data_context().content.unit_path, 'requirements.txt')
requirements_paths.append((data_context().content.root, ... | |
@add_start_docstrings(
"""YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks.""",
YOSO_START_DOCSTRING,
) | 6,289 | 34,527 | 90 | src/transformers/models/yoso/modeling_yoso.py | 37 | 13 | def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = ACT2FN[self.config.hidden_act](x)
x = self.dropout(x)
x = self.out_proj(x)
| Add YOSO (#15091)
* Add cookiecutter files
* Add cuda kernels and cpp files
* Update modeling_yoso.py
* Add .h files
* Update configuration_yoso.py
* Updates
* Remove tokenizer
* Code quality
* Update modeling_yoso.py
* Update modeling_yoso.py
* Fix failing test
* Update modeling_yoso.... | forward | 99a2771189321c826ff55d161a7cfedadd4023c7 | transformers | modeling_yoso.py | 10 | 8 | https://github.com/huggingface/transformers.git | 1 | 67 | 1 | 25 | 120 | Python | {
"docstring": "YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks.",
"language": "en",
"n_whitespaces": 25,
"n_words": 23,
"vocab_size": 21
} | def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = ACT2FN[self.config.hidden_act](x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
,
YOSO_S... |
51,270 | 205,896 | 181 | django/db/models/sql/subqueries.py | 39 | 18 | def delete_batch(self, pk_list, using):
# number of objects deleted
num_deleted = 0
field = self.get_meta().pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.clear_where()
self.add_filter(
f"{field.attname}__in",
... | Refs #33476 -- Reformatted code with Black. | delete_batch | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | subqueries.py | 13 | 13 | https://github.com/django/django.git | 2 | 83 | 0 | 34 | 136 | Python | {
"docstring": "\n Set up and execute delete queries for all the objects in pk_list.\n\n More than one physical query may be executed if there are a\n lot of values in pk_list.\n ",
"language": "en",
"n_whitespaces": 58,
"n_words": 29,
"vocab_size": 27
} | def delete_batch(self, pk_list, using):
# number of objects deleted
num_deleted = 0
field = self.get_meta().pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.clear_where()
self.add_filter(
f"{field.attname}__in",
... | |
79,234 | 267,959 | 18 | test/lib/ansible_test/_internal/coverage_util.py | 8 | 3 | def generate_ansible_coverage_config() -> str:
coverage_config =
return coverage_config
| ansible-test - Use more native type hints. (#78435)
* ansible-test - Use more native type hints.
Simple search and replace to switch from comments to native type hints for return types of functions with no arguments.
* ansible-test - Use more native type hints.
Conversion of simple single-line function annota... | generate_ansible_coverage_config | 3eb0485dd92c88cc92152d3656d94492db44b183 | ansible | coverage_util.py | 7 | 18 | https://github.com/ansible/ansible.git | 1 | 12 | 0 | 7 | 25 | Python | {
"docstring": "Generate code coverage configuration for Ansible tests.\n[run]\nbranch = True\nconcurrency = multiprocessing\nparallel = True\n\nomit =\n */python*/dist-packages/*\n */python*/site-packages/*\n */python*/distutils/*\n */pyshared/*\n */pytest\n */AnsiballZ_*.py\n */test/results/*\n... | def generate_ansible_coverage_config() -> str:
coverage_config =
return coverage_config
| |
19,555 | 98,271 | 68 | src/sentry/models/organizationmember.py | 14 | 8 | def get_allowed_roles_to_invite(self):
return [
r
for r in organization_roles.get_all()
if r.priority <= organization_roles.get(self.role).priority
]
| feat(access): Implement team-based role management and access (#33387)
Introduce team-based roles in parallel to existing, organization-based
roles. Split the levels into their own objects, accessible through the
parent RoleManager object. Map org roles onto the corresponding minimum
team roles, which each member w... | get_allowed_roles_to_invite | b7dee7f2457a911bea343d20f2119e691bb153ce | sentry | organizationmember.py | 12 | 6 | https://github.com/getsentry/sentry.git | 3 | 33 | 0 | 13 | 54 | Python | {
"docstring": "\n Return a list of roles which that member could invite\n Must check if member member has member:admin first before checking\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 20,
"vocab_size": 18
} | def get_allowed_roles_to_invite(self):
return [
r
for r in organization_roles.get_all()
if r.priority <= organization_roles.get(self.role).priority
]
| |
24,601 | 112,159 | 21 | nni/retiarii/oneshot/pytorch/supermodule/base.py | 7 | 6 | def search_space_spec(self) -> Dict[str, ParameterSpec]:
raise NotImplementedError()
| Valuechoice oneshot lightning (#4602) | search_space_spec | 14d2966b9e91ae16dcc39de8f41017a75cec8ff9 | nni | base.py | 7 | 10 | https://github.com/microsoft/nni.git | 1 | 17 | 0 | 7 | 29 | Python | {
"docstring": "\n Space specification (sample points).\n Mapping from spec name to ParameterSpec. The names in choices should be in the same format of export.\n\n For example: ::\n\n {\"layer1\": ParameterSpec(values=[\"conv\", \"pool\"])}\n ",
"language": "en",
"n_whitespa... | def search_space_spec(self) -> Dict[str, ParameterSpec]:
raise NotImplementedError()
| |
71,346 | 246,790 | 557 | tests/rest/admin/test_room.py | 132 | 34 | def test_context_as_admin(self) -> None:
# Create a room. We're not part of it.
user_id = self.register_user("test", "test")
user_tok = self.login("test", "test")
room_id = self.helper.create_room_as(user_id, tok=user_tok)
# Populate the room with events.
event... | Replace assertEquals and friends with non-deprecated versions. (#12092) | test_context_as_admin | 02d708568b476f2f7716000b35c0adfa4cbd31b3 | synapse | test_room.py | 15 | 39 | https://github.com/matrix-org/synapse.git | 8 | 261 | 0 | 89 | 438 | Python | {
"docstring": "\n Test that, as admin, we can find the context of an event without having joined the room.\n ",
"language": "en",
"n_whitespaces": 32,
"n_words": 17,
"vocab_size": 16
} | def test_context_as_admin(self) -> None:
# Create a room. We're not part of it.
user_id = self.register_user("test", "test")
user_tok = self.login("test", "test")
room_id = self.helper.create_room_as(user_id, tok=user_tok)
# Populate the room with events.
event... | |
36,910 | 157,370 | 23 | ldm/models/diffusion/dpm_solver/dpm_solver.py | 9 | 7 | def marginal_std(self, t):
| release more models | marginal_std | ca86da3a30c4e080d4db8c25fca73de843663cb4 | stablediffusion | dpm_solver.py | 13 | 2 | https://github.com/Stability-AI/stablediffusion.git | 1 | 31 | 0 | 9 | 48 | Python | {
"docstring": "\n Compute sigma_t of a given continuous-time label t in [0, T].\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 11,
"vocab_size": 11
} | def marginal_std(self, t):
return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
| |
16,745 | 78,245 | 108 | wagtail/contrib/settings/models.py | 34 | 14 | def for_request(cls, request):
attr_name = cls.get_cache_attr_name()
if hasattr(request, attr_name):
return getattr(request, attr_name)
site = Site.find_for_request(request)
site_settings = cls.for_site(site)
# to allow mo | Add generic settings to compliment site-specific settings (#8327) | for_request | d967eccef28ce47f60d26be1c28f2d83a25f40b0 | wagtail | models.py | 9 | 9 | https://github.com/wagtail/wagtail.git | 2 | 61 | 0 | 29 | 98 | Python | {
"docstring": "\n Get or create an instance of this model for the request,\n and cache the result on the request for faster repeat access.\n ",
"language": "en",
"n_whitespaces": 44,
"n_words": 22,
"vocab_size": 19
} | def for_request(cls, request):
attr_name = cls.get_cache_attr_name()
if hasattr(request, attr_name):
return getattr(request, attr_name)
site = Site.find_for_request(request)
site_settings = cls.for_site(site)
# to allow more efficient page url generation
... | |
45,473 | 186,377 | 518 | certbot-apache/certbot_apache/_internal/configurator.py | 108 | 31 | def _enable_ocsp_stapling(self, ssl_vhost, unused_options):
min_apache_ver = (2, 3, 3)
if self.get_version() < min_apache_ver:
raise errors.PluginError(
"Unable to set OCSP directives.\n"
"Apache version is below 2.3.3.")
if "socache_shmcb_mo... | Various clean-ups in certbot-apache. Use f-strings. (#9132)
* Various clean-ups in certbot-apache. Use f-strings.
* Smaller tweaks | _enable_ocsp_stapling | eeca208c8f57304590ac1af80b496e61021aaa45 | certbot | configurator.py | 12 | 26 | https://github.com/certbot/certbot.git | 5 | 182 | 0 | 89 | 311 | Python | {
"docstring": "Enables OCSP Stapling\n\n In OCSP, each client (e.g. browser) would have to query the\n OCSP Responder to validate that the site certificate was not revoked.\n\n Enabling OCSP Stapling, would allow the web-server to query the OCSP\n Responder, and staple its response to the... | def _enable_ocsp_stapling(self, ssl_vhost, unused_options):
min_apache_ver = (2, 3, 3)
if self.get_version() < min_apache_ver:
raise errors.PluginError(
"Unable to set OCSP directives.\n"
"Apache version is below 2.3.3.")
if "socache_shmcb_mo... | |
14,494 | 67,319 | 33 | erpnext/regional/united_arab_emirates/utils.py | 44 | 14 | def make_regional_gl_entries(gl_entries, doc):
country = frappe.get_cached_value("Company", doc.company, "country")
| style: format code with black | make_regional_gl_entries | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | utils.py | 12 | 11 | https://github.com/frappe/erpnext.git | 5 | 81 | 0 | 35 | 137 | Python | {
"docstring": "Hooked to make_regional_gl_entries in Purchase Invoice.It appends the region specific general ledger entries to the list of GL Entries.",
"language": "en",
"n_whitespaces": 18,
"n_words": 19,
"vocab_size": 17
} | def make_regional_gl_entries(gl_entries, doc):
country = frappe.get_cached_value("Company", doc.company, "country")
if country != "United Arab Emirates":
return gl_entries
if doc.reverse_charge == "Y":
tax_accounts = get_tax_accounts(doc.company)
for tax in doc.get("taxes"):
if tax.category not in ("Tot... | |
1,387 | 8,266 | 75 | tests/integration_tests/utils.py | 23 | 8 | def private_param(param):
return pytest.param(
*param,
marks=pytest.mark.skipif(
not _run_private_tests,
reason="Skipping: this test is marked private, set RUN_PRIVATE=1 in your environment to run",
),
)
| Fixed issue when uploading output directory artifacts to remote filesystems (#2598) | private_param | d4dcff26dd9f25b3eb43c4e74a56af93879eeab2 | ludwig | utils.py | 12 | 8 | https://github.com/ludwig-ai/ludwig.git | 1 | 32 | 0 | 23 | 53 | Python | {
"docstring": "Wrap param to mark it as private, meaning it requires credentials to run.\n\n Private tests are skipped by default. Set the RUN_PRIVATE environment variable to a truth value to run them.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 31,
"vocab_size": 27
} | def private_param(param):
return pytest.param(
*param,
marks=pytest.mark.skipif(
not _run_private_tests,
reason="Skipping: this test is marked private, set RUN_PRIVATE=1 in your environment to run",
),
)
| |
@not_implemented_for("undirected") | 42,296 | 177,161 | 172 | networkx/algorithms/d_separation.py | 108 | 28 | def minimal_d_separator(G, u, v):
if not nx.is_directed_acyclic_graph(G):
raise nx.NetworkXError("graph should be direct | [ENH] Find and verify a minimal D-separating set in DAG (#5898)
* Ran black
* Add unit tests
* Rename and fix citation
* Black
* Fix unite tests
* Isort
* Add algo description
* Update networkx/algorithms/tests/test_d_separation.py
* Update networkx/algorithms/traversal/breadth_first_search.py
... | minimal_d_separator | df9a128f4171d95671e5d9f5460970cc4bf8e3b3 | networkx | d_separation.py | 12 | 15 | https://github.com/networkx/networkx.git | 4 | 152 | 1 | 77 | 254 | Python | {
"docstring": "Compute a minimal d-separating set between 'u' and 'v'.\n\n A d-separating set in a DAG is a set of nodes that blocks all paths\n between the two nodes, 'u' and 'v'. This function\n constructs a d-separating set that is \"minimal\", meaning it is the smallest\n d-separating set for 'u' and... | def minimal_d_separator(G, u, v):
if not nx.is_directed_acyclic_graph(G):
raise nx.NetworkXError("graph should be directed acyclic")
union_uv = {u, v}
if any(n not in G.nodes for n in union_uv):
raise nx.NodeNotFound("one or more specified nodes not found in the graph")
# first c... |
46,101 | 189,502 | 440 | manim/mobject/svg/text_mobject.py | 50 | 12 | def _change_alignment_for_a_line(self, alignment, line_no):
self.lines[1][line_no] = alignment
if self.lines[1][line_no] == "center":
self[line_no].move_to(
np.array([self.get_center()[0], self[line_no].get_center()[1], 0]),
)
elif self.lines[1][l... | Hide more private methods from the docs. (#2468)
* hide privs from text_mobject.py
* hide privs from tex_mobject.py
* hide privs from code_mobject.py
* hide privs from svg_mobject.py
* remove SVGPath and utils from __init__.py
* don't import string_to_numbers
* hide privs from geometry.py
* hide p... | _change_alignment_for_a_line | 902e7eb4f0147b5882a613b67467e38a1d47f01e | manim | text_mobject.py | 17 | 26 | https://github.com/ManimCommunity/manim.git | 4 | 196 | 0 | 30 | 294 | Python | {
"docstring": "Function to change one line's alignment to a specific value.\n\n Parameters\n ----------\n alignment : :class:`str`\n Defines the alignment of paragraph. Possible values are \"left\", \"right\", \"center\".\n line_no : :class:`int`\n Defines the line n... | def _change_alignment_for_a_line(self, alignment, line_no):
self.lines[1][line_no] = alignment
if self.lines[1][line_no] == "center":
self[line_no].move_to(
np.array([self.get_center()[0], self[line_no].get_center()[1], 0]),
)
elif self.lines[1][l... | |
72,907 | 249,425 | 189 | tests/handlers/test_room_member.py | 51 | 22 | def test_rejoin_forgotten_by_user(self) -> None:
self.helper.join(self.room_id, user=self. | Fix that user cannot `/forget` rooms after the last member has left (#13546) | test_rejoin_forgotten_by_user | 682dfcfc0db05d9c99b7615d950997535df4d533 | synapse | test_room_member.py | 12 | 18 | https://github.com/matrix-org/synapse.git | 1 | 170 | 0 | 35 | 265 | Python | {
"docstring": "Test that a user that has forgotten a room can do a re-join.\n The room was not forgotten from the local server.\n One local user is still member of the room.",
"language": "en",
"n_whitespaces": 44,
"n_words": 31,
"vocab_size": 23
} | def test_rejoin_forgotten_by_user(self) -> None:
self.helper.join(self.room_id, user=self.bob, tok=self.bob_token)
self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token)
self.get_success(self.handler.forget(self.alice_ID, self.room_id))
self.assertTrue(
... | |
50,601 | 203,990 | 53 | django/contrib/gis/gdal/libgdal.py | 35 | 10 | def std_call(func):
if os.name == "nt":
return lwingdal[func]
else:
return lgdal[func]
# #### Version-information functions. ####
# | Refs #33476 -- Reformatted code with Black. | std_call | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | libgdal.py | 9 | 5 | https://github.com/django/django.git | 2 | 25 | 0 | 30 | 77 | Python | {
"docstring": "\n Return the correct STDCALL function for certain OSR routines on Win32\n platforms.\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 12,
"vocab_size": 12
} | def std_call(func):
if os.name == "nt":
return lwingdal[func]
else:
return lgdal[func]
# #### Version-information functions. ####
# Return GDAL library version information with the given key.
_version_info = std_call("GDALVersionInfo")
_version_info.argtypes = [c_char_p]
_version_info.re... | |
51,581 | 206,594 | 85 | django/utils/datastructures.py | 21 | 10 | def __getitem__(self, key):
use_func = key.startswith(self.prefix)
if use_func:
| Refs #33476 -- Reformatted code with Black. | __getitem__ | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | datastructures.py | 13 | 8 | https://github.com/django/django.git | 3 | 55 | 0 | 15 | 91 | Python | {
"docstring": "\n Retrieve the real value after stripping the prefix string (if\n present). If the prefix is present, pass the value through self.func\n before returning, otherwise return the raw value.\n ",
"language": "en",
"n_whitespaces": 57,
"n_words": 28,
"vocab_size": 22
} | def __getitem__(self, key):
use_func = key.startswith(self.prefix)
if use_func:
key = key[len(self.prefix) :]
value = super().__getitem__(key)
if use_func:
return self.func(value)
return value
| |
11,418 | 55,900 | 71 | tests/test_client.py | 35 | 8 | def not_enough_open_files() -> bool:
try:
import resource
except ImportError:
# resource limits is not a concept on all systems, notably Windows
return False
soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
return soft | Skip running the more intense client tests when the ulimit of files is too low (PrefectHQ/orion#1905)
On some systems the ulimit for open files is set excruciatingly low, especially
the default settings of 256 for macOS. We can skip the threading tests on
systems with no enough open files.
Co-authored-by: Michae... | not_enough_open_files | 84d0f8a18f6a413fc70b78e4ccbef67372d05075 | prefect | test_client.py | 9 | 11 | https://github.com/PrefectHQ/prefect.git | 3 | 36 | 0 | 30 | 63 | Python | {
"docstring": "\n The current process does not currently allow enough open files for this test.\n You can increase the number of open files with `ulimit -n 512`.\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 25,
"vocab_size": 23
} | def not_enough_open_files() -> bool:
try:
import resource
except ImportError:
# resource limits is not a concept on all systems, notably Windows
return False
soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
return soft_limit < 512 or hard_limit < 512
| |
77,722 | 264,432 | 273 | netbox/netbox/tables/tables.py | 79 | 22 | def configure(self, request):
# Save ordering preference
if request.user.is_authenticated:
table_name = self.__class__.__name__
if self.prefixed_order_by_field in request.GET:
# If an ordering has been specified as a query parameter, save it as the
... | Move configure_table() logic to NetBoxTable.configure() | configure | 23a80770e1e96c0351cb4ead62ebf294f639845a | netbox | tables.py | 13 | 13 | https://github.com/netbox-community/netbox.git | 4 | 107 | 0 | 57 | 185 | Python | {
"docstring": "\n Configure the table for a specific request context. This performs pagination and records\n the user's preferred ordering logic.\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 18,
"vocab_size": 17
} | def configure(self, request):
# Save ordering preference
if request.user.is_authenticated:
table_name = self.__class__.__name__
if self.prefixed_order_by_field in request.GET:
# If an ordering has been specified as a query parameter, save it as the
... | |
@profiler.annotate_function | 27,280 | 122,960 | 115 | jax/interpreters/pxla.py | 75 | 19 | def _shard_arg(arg, devices, arg_indices):
if isinstance(arg, ShardedDeviceArray) and arg_indices == arg.indices:
# The shard_arg_handlers allow an extensible set of types to be sharded, but
# inline handling for ShardedDeviceArray as a special case for performance
# NOTE: we compare indices instead of... | Simplify Array's shard_arg_handler by merging pmap and pjit/xmap paths
PiperOrigin-RevId: 497991966 | _shard_arg | 1fc9197c79af89ef292dc69d508ed1569f62c4f0 | jax | pxla.py | 12 | 9 | https://github.com/google/jax.git | 5 | 81 | 1 | 64 | 134 | Python | {
"docstring": "Returns a list of size len(devices) containing per-device buffers.\n\n For the C++ pmap path, we fallback to Python (this function) to shard\n arguments that are not supported by the C++ `ShardArg`.\n\n Arrgs:\n arg: The Python argument.\n devices: The list of devices to shard over.\n arg_... | def _shard_arg(arg, devices, arg_indices):
if isinstance(arg, ShardedDeviceArray) and arg_indices == arg.indices:
# The shard_arg_handlers allow an extensible set of types to be sharded, but
# inline handling for ShardedDeviceArray as a special case for performance
# NOTE: we compare indices instead of... |
12,082 | 60,304 | 135 | code/deep/BJMMD/caffe/python/caffe/test/test_coord_map.py | 51 | 23 | def test_rect(self):
n3x3 = coord_net_spec(ks=3, stride=1, pad=0)
n5x5 = coord_net_spec(ks=5, stride=2, pad=10)
n3x5 = coord_net_spec(ks=[3, 5], stride=[1, 2], pad=[0, 10])
ax_3x3, a_3x3, b_3x3 = coord_map_from_to(n3x3.deconv, n3x3.data)
ax_5x5, a_5x5, b_5x5 = coord_map_... | Balanced joint maximum mean discrepancy for deep transfer learning | test_rect | cc4d0564756ca067516f71718a3d135996525909 | transferlearning | test_coord_map.py | 10 | 12 | https://github.com/jindongwang/transferlearning.git | 1 | 168 | 0 | 45 | 245 | Python | {
"docstring": "\n Anisotropic mapping is equivalent to its isotropic parts.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 8,
"vocab_size": 8
} | def test_rect(self):
n3x3 = coord_net_spec(ks=3, stride=1, pad=0)
n5x5 = coord_net_spec(ks=5, stride=2, pad=10)
n3x5 = coord_net_spec(ks=[3, 5], stride=[1, 2], pad=[0, 10])
ax_3x3, a_3x3, b_3x3 = coord_map_from_to(n3x3.deconv, n3x3.data)
ax_5x5, a_5x5, b_5x5 = coord_map_... | |
16,034 | 73,512 | 506 | wagtail/contrib/settings/tests/test_model.py | 102 | 17 | def test_get_page_url_when_for_settings_fetched_via_for_site(self):
self._create_importantpages_object()
settings = ImportantPages.for_site(self.default_site)
# Force site root paths query beforehand
self.default_site.root_page._get_site_root_paths()
for page_fk_field... | Reformat with black | test_get_page_url_when_for_settings_fetched_via_for_site | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_model.py | 16 | 20 | https://github.com/wagtail/wagtail.git | 2 | 115 | 0 | 74 | 201 | Python | {
"docstring": "ImportantPages.for_site() cannot make the settings object\n request-aware, so things are a little less efficient, and the\n URLs returned will not be site-relative",
"language": "en",
"n_whitespaces": 35,
"n_words": 22,
"vocab_size": 21
} | def test_get_page_url_when_for_settings_fetched_via_for_site(self):
self._create_importantpages_object()
settings = ImportantPages.for_site(self.default_site)
# Force site root paths query beforehand
self.default_site.root_page._get_site_root_paths()
for page_fk_field... | |
72,260 | 248,389 | 1,066 | tests/federation/test_federation_sender.py | 119 | 28 | def test_send_receipts_with_backoff(self):
mock_send_transaction = (
self.hs.get_federation_transport_client().send_transaction
)
mock_send_transaction.return_value = make_awaitable({})
sender = self.hs.get_federation_sender()
receipt = ReadReceipt(
... | Additional constants for EDU types. (#12884)
Instead of hard-coding strings in many places. | test_send_receipts_with_backoff | c52abc1cfdd9e5480cdb4a03d626fe61cacc6573 | synapse | test_federation_sender.py | 21 | 63 | https://github.com/matrix-org/synapse.git | 1 | 296 | 0 | 57 | 519 | Python | {
"docstring": "Send two receipts in quick succession; the second should be flushed, but\n only after 20ms",
"language": "en",
"n_whitespaces": 21,
"n_words": 15,
"vocab_size": 15
} | def test_send_receipts_with_backoff(self):
mock_send_transaction = (
self.hs.get_federation_transport_client().send_transaction
)
mock_send_transaction.return_value = make_awaitable({})
sender = self.hs.get_federation_sender()
receipt = ReadReceipt(
... | |
18,094 | 86,265 | 3,597 | src/sentry/lang/javascript/processor.py | 857 | 76 | def process_frame(self, processable_frame, processing_task):
frame = processable_frame.frame
token = None
cache = self.cache
sourcemaps = self.sourcemaps
all_errors = []
sourcemap_applied = False
# can't demangle if there's no filename or line number pr... | ref(processor): Use symbolic-sourcemapcache for JavaScript Sourcemap processing (#38551)
This PR attempts to replace the currently used `rust-sourcemap` crate
and it's symbolic python bindings, with `symbolic-sourcemapcache` crate.
It makes the whole processing pipeline easier to maintain, as it pushes
some work ... | process_frame | ae9c0d8a33d509d9719a5a03e06c9797741877e9 | sentry | processor.py | 21 | 145 | https://github.com/getsentry/sentry.git | 51 | 953 | 0 | 421 | 1,670 | Python | {
"docstring": "\n Attempt to demangle the given frame.\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 6,
"vocab_size": 6
} | def process_frame(self, processable_frame, processing_task):
frame = processable_frame.frame
token = None
cache = self.cache
sourcemaps = self.sourcemaps
all_errors = []
sourcemap_applied = False
# can't demangle if there's no filename or line number pr... | |
78,250 | 265,949 | 229 | netbox/utilities/forms/utils.py | 95 | 14 | def validate_csv(headers, fields, required_fields):
# Validate provided column headers
is_update = False
for field, to_field in headers.items():
if field == "id":
is_update = True
continue
if field not in fields:
raise forms.Vali | 7961 CSV bulk update (#10715)
* 7961 add csv bulk update
* temp checkin - blocked
* 7961 bugfix and cleanup
* 7961 change to id, add docs
* 7961 add tests cases
* 7961 fix does not exist validation error
* 7961 fix does not exist validation error
* 7961 update tests
* 7961 update tests
* 796... | validate_csv | cb815ede60ab298ca13907d523126380f50a8023 | netbox | utils.py | 15 | 16 | https://github.com/netbox-community/netbox.git | 11 | 118 | 0 | 59 | 212 | Python | {
"docstring": "\n Validate that parsed csv data conforms to the object's available fields. Raise validation errors\n if parsed csv data contains invalid headers or does not contain required headers.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 27,
"vocab_size": 24
} | def validate_csv(headers, fields, required_fields):
# Validate provided column headers
is_update = False
for field, to_field in headers.items():
if field == "id":
is_update = True
continue
if field not in fields:
raise forms.ValidationError(f'Unexpect... | |
@public | 49,299 | 199,621 | 37 | sympy/polys/appellseqs.py | 21 | 9 | def bernoulli_poly(n, x=None, polys=False):
return appell_poly(n, [[1], [1, QQ(-1,2)]], QQ(1,2),
lambda p, i: p * QQ(1<<(i-1), 1-(1<<i)), QQ, x, polys)
@public | Initial definition of Appell sequences | bernoulli_poly | e875bdb804b0285e4a9bd8de0158436e792c03cb | sympy | appellseqs.py | 14 | 3 | https://github.com/sympy/sympy.git | 1 | 78 | 1 | 21 | 111 | Python | {
"docstring": "Generates the Bernoulli polynomial of degree `n` in `x`.\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n ",
"language": "en",
"n_whitesp... | def bernoulli_poly(n, x=None, polys=False):
return appell_poly(n, [[1], [1, QQ(-1,2)]], QQ(1,2),
lambda p, i: p * QQ(1<<(i-1), 1-(1<<i)), QQ, x, polys)
@public |
34,465 | 149,634 | 149 | tests/data/test_btanalysis.py | 91 | 35 | def test_calculate_max_drawdown_abs(values, relative, result, result_rel):
dates = [Arrow(2020, 1, 1).shift(days=i) for i in range(len(values))]
df = DataFrame(zip(values, dates), columns=['profit | Improve test for max_drawdown calculations | test_calculate_max_drawdown_abs | 9bc6bbe472f58bbec82d741ab916d66c52b2978a | freqtrade | test_btanalysis.py | 12 | 14 | https://github.com/freqtrade/freqtrade.git | 2 | 152 | 0 | 69 | 236 | Python | {
"docstring": "\n Test case from issue https://github.com/freqtrade/freqtrade/issues/6655\n [1000, 500, 1000, 11000, 10000] # absolute results\n [1000, 50%, 0%, 0%, ~9%] # Relative drawdowns\n ",
"language": "en",
"n_whitespaces": 46,
"n_words": 21,
"vocab_size": 18
} | def test_calculate_max_drawdown_abs(values, relative, result, result_rel):
dates = [Arrow(2020, 1, 1).shift(days=i) for i in range(len(values))]
df = DataFrame(zip(values, dates), columns=['profit_abs', 'open_date'])
# sort by profit and reset index
df = df.sort_values('profit_abs').reset_index(dr... | |
29,749 | 132,413 | 130 | python/ray/tune/tests/test_checkpoint_manager.py | 32 | 15 | def testOnCheckpointUnavailableAttribute(self):
checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num=1)
no_attr_checkpoint = Checkpoint(Checkpoint.PERSISTENT, 0, {})
with patch.object(logger, "error") as log_error_mock:
checkpoint_manager.on_checkpoint(no_attr_... | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | testOnCheckpointUnavailableAttribute | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | test_checkpoint_manager.py | 11 | 9 | https://github.com/ray-project/ray.git | 1 | 62 | 0 | 30 | 106 | Python | {
"docstring": "\n Tests that an error is logged when the associated result of the\n checkpoint has no checkpoint score attribute.\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 18,
"vocab_size": 16
} | def testOnCheckpointUnavailableAttribute(self):
checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num=1)
no_attr_checkpoint = Checkpoint(Checkpoint.PERSISTENT, 0, {})
with patch.object(logger, "error") as log_error_mock:
checkpoint_manager.on_checkpoint(no_attr_... | |
@pytest.fixture | 5,090 | 27,097 | 28 | saleor/plugins/webhook/tests/subscription_webhooks/fixtures.py | 10 | 8 | def subscription_app_status_changed_webhook(subscription_webhook):
return subscription_webhook(
APP_STATUS_CHANGED_SUBSCRIPTION_QUERY,
WebhookEventAsyncType.APP_STATUS_CHANGED,
)
CATEGORY_CREATED_SUBSCRIPTION_QUERY =
@pytest.fixture | New events related to apps changes. (#9698)
* New events related to apps changes.
* Schema update after rebase
* CHANGELOG.md update
* New events description fix
* Missing app event added to CHANGELOG.md | subscription_app_status_changed_webhook | b5e414c98a1535d287721c859994424cf0eea081 | saleor | fixtures.py | 8 | 5 | https://github.com/saleor/saleor.git | 1 | 15 | 1 | 10 | 37 | Python | {
"docstring": "\n subscription{\n event{\n ...on CategoryCreated{\n category{\n id\n }\n }\n }\n }\n",
"language": "en",
"n_whitespaces": 69,
"n_words": 10,
"vocab_size": 7
} | def subscription_app_status_changed_webhook(subscription_webhook):
return subscription_webhook(
APP_STATUS_CHANGED_SUBSCRIPTION_QUERY,
WebhookEventAsyncType.APP_STATUS_CHANGED,
)
CATEGORY_CREATED_SUBSCRIPTION_QUERY =
@pytest.fixture |
35,617 | 153,801 | 196 | modin/core/dataframe/pandas/dataframe/dataframe.py | 49 | 23 | def binary_op(self, op, right_frame, join_type="outer"):
left_parts, right_parts, joined_index, row_lengths = self._copartition(
0, right_frame, join_type, sort=True
)
# unwrap list returned by `copartition`.
right_parts = right_parts[0]
| PERF-#4493: Use partition size caches more in Modin dataframe. (#4495)
Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com>
Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru>
Signed-off-by: mvashishtha <mahesh@ponder.io> | binary_op | cca9468648521e9317de1cb69cf8e6b1d5292d21 | modin | dataframe.py | 11 | 16 | https://github.com/modin-project/modin.git | 1 | 104 | 0 | 40 | 149 | Python | {
"docstring": "\n Perform an operation that requires joining with another Modin DataFrame.\n\n Parameters\n ----------\n op : callable\n Function to apply after the join.\n right_frame : PandasDataframe\n Modin DataFrame to join with.\n join_type : str,... | def binary_op(self, op, right_frame, join_type="outer"):
left_parts, right_parts, joined_index, row_lengths = self._copartition(
0, right_frame, join_type, sort=True
)
# unwrap list returned by `copartition`.
right_parts = right_parts[0]
new_frame = self._par... | |
@contextlib.contextmanager | 23,107 | 108,226 | 109 | lib/matplotlib/__init__.py | 42 | 15 | def rc_file(fname, *, use_default_template=True):
# Deprecation warnings were already handled in rc_params_from_file, no need
# to reemit them here.
with _api.suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rc_from_file = rc_params_from_file(
... | Fix removed cross-references | rc_file | 7c6c5f6215b40a27cfefb7bf21246299fd9b3a1e | matplotlib | __init__.py | 12 | 7 | https://github.com/matplotlib/matplotlib.git | 3 | 58 | 1 | 37 | 103 | Python | {
"docstring": "\n Update `.rcParams` from file.\n\n Style-blacklisted `.rcParams` (defined in\n ``matplotlib.style.core.STYLE_BLACKLIST``) are not updated.\n\n Parameters\n ----------\n fname : str or path-like\n A file with Matplotlib rc settings.\n\n use_default_template : bool\n ... | def rc_file(fname, *, use_default_template=True):
# Deprecation warnings were already handled in rc_params_from_file, no need
# to reemit them here.
with _api.suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rc_from_file = rc_params_from_file(
... |
32,000 | 140,529 | 18 | python/ray/util/collective/collective_group/gloo_util.py | 9 | 7 | def create_gloo_context(rank, world_size):
context = pygloo.rendezvous.Context(rank, world_size)
return context
| Clean up docstyle in python modules and add LINT rule (#25272) | create_gloo_context | 905258dbc19753c81039f993477e7ab027960729 | ray | gloo_util.py | 9 | 3 | https://github.com/ray-project/ray.git | 1 | 22 | 0 | 8 | 36 | Python | {
"docstring": "Create a GLOO context using GLOO APIs.\n\n Args:\n rank: the rank of this process.\n world_size: the number of processes of this collective group.\n\n Returns:\n context (pygloo.Context): a GLOO context.\n ",
"language": "en",
"n_whitespaces": 59,
"n_words": 29,
"... | def create_gloo_context(rank, world_size):
context = pygloo.rendezvous.Context(rank, world_size)
return context
| |
6,763 | 37,307 | 35 | src/transformers/testing_utils.py | 12 | 5 | def require_bitsandbytes(test_case):
if not is_bitsandbytes | Add support for bitsandbytes (#15622)
* Add initial BNB integration
* fixup! Add initial BNB integration
* Add bnb test decorator
* Update Adamw8bit option name
* Use the full bnb package name
* Overide bnb for all embedding layers
* Fix package name
* Formatting
* Remove unnecessary import
... | require_bitsandbytes | 3104036e7f1a3cd6e07a69d648c3597de32f72fe | transformers | testing_utils.py | 11 | 5 | https://github.com/huggingface/transformers.git | 2 | 26 | 0 | 11 | 49 | Python | {
"docstring": "\n Decorator for bits and bytes (bnb) dependency\n ",
"language": "en",
"n_whitespaces": 14,
"n_words": 7,
"vocab_size": 7
} | def require_bitsandbytes(test_case):
if not is_bitsandbytes_available():
return unittest.skip("test requires bnb")(test_case)
else:
return test_case
| |
85,374 | 285,675 | 75 | openbb_terminal/api.py | 22 | 17 | def copy_func(f) -> Callable:
| Next release : reports on steroids (#2349)
* fix gov tests
* refactor insider
* new virtual path extraction
* removed some symbol default params as they're considered critical
* little adjustments
* portfolio refactor
* merge API factory
* add helpers, stocks, crypto, forex
* minor forex change... | copy_func | 72b0a9f1ee8b91ad9fd9e76d80d2ccab51ee6d21 | OpenBBTerminal | api.py | 10 | 21 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 1 | 60 | 0 | 18 | 91 | Python | {
"docstring": "Copies the contents and attributes of the entered function. Based on https://stackoverflow.com/a/13503277\n Parameters\n ----------\n f: Callable\n Function to be copied\n Returns\n -------\n g: Callable\n New function\n ",
"language": "en",
"n_whitespaces": 61,
... | def copy_func(f) -> Callable:
g = types.FunctionType(
f.__code__,
f.__globals__,
name=f.__name__,
argdefs=f.__defaults__,
closure=f.__closure__,
)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g
| |
3,185 | 20,032 | 463 | pipenv/patched/notpip/_vendor/distlib/markers.py | 123 | 19 | def evaluate(self, expr, context):
if isinstance(expr, string_types):
if expr[0] in '\'"':
result = expr[1:-1]
else:
if expr not in context:
raise SyntaxError('unknown variable: %s' % expr)
result = context[expr... | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | evaluate | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | markers.py | 16 | 28 | https://github.com/pypa/pipenv.git | 12 | 233 | 0 | 73 | 395 | Python | {
"docstring": "\n Evaluate a marker expression returned by the :func:`parse_requirement`\n function in the specified context.\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 13,
"vocab_size": 12
} | def evaluate(self, expr, context):
if isinstance(expr, string_types):
if expr[0] in '\'"':
result = expr[1:-1]
else:
if expr not in context:
raise SyntaxError('unknown variable: %s' % expr)
result = context[expr... | |
@keras_export("keras.__internal__.backend.track_variable", v1=[]) | 80,151 | 269,522 | 29 | keras/backend.py | 11 | 9 | def track_tf_optimizer(tf_optimizer):
if tf.executing_eagerly():
return
optimizers = _GRAPH_TF_OPTIMIZERS[None]
optimiz | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | track_tf_optimizer | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | backend.py | 8 | 5 | https://github.com/keras-team/keras.git | 2 | 26 | 1 | 11 | 64 | Python | {
"docstring": "Tracks the given TF optimizer for initialization of its variables.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def track_tf_optimizer(tf_optimizer):
if tf.executing_eagerly():
return
optimizers = _GRAPH_TF_OPTIMIZERS[None]
optimizers.add(tf_optimizer)
@keras_export("keras.__internal__.backend.track_variable", v1=[]) |
47,370 | 195,687 | 582 | sympy/polys/numberfields/galoisgroups.py | 247 | 49 | def _galois_group_degree_5(T, max_tries=30, randomize=False):
r
from sympy.combinatorics.permutations import Permutation
from sympy.combinatorics.named_groups import (
CyclicGroup, DihedralGroup, AlternatingGroup, SymmetricGroup
| Add a `galois_group()` function | _galois_group_degree_5 | d3c0fc825c4a80904a1fb9a2092137c3d9e0c3fe | sympy | galoisgroups.py | 16 | 60 | https://github.com/sympy/sympy.git | 10 | 556 | 0 | 159 | 820 | Python | {
"docstring": "\n Compute the Galois group of a polynomial of degree 5, following Alg 6.3.9\n of Cohen.\n\n References\n ==========\n\n .. [1] Cohen, H. *A Course in Computational Algebraic Number Theory*.\n\n ",
"language": "en",
"n_whitespaces": 47,
"n_words": 28,
"vocab_size": 26
} | def _galois_group_degree_5(T, max_tries=30, randomize=False):
r
from sympy.combinatorics.permutations import Permutation
from sympy.combinatorics.named_groups import (
CyclicGroup, DihedralGroup, AlternatingGroup, SymmetricGroup
)
# The ideas here are all the same as in the degree-4 method.... | |
55,922 | 220,126 | 44 | python3.10.4/Lib/argparse.py | 16 | 10 | def error(self, message):
se | add python 3.10.4 for windows | error | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | argparse.py | 11 | 4 | https://github.com/XX-net/XX-Net.git | 1 | 42 | 0 | 16 | 74 | Python | {
"docstring": "error(message: string)\n\n Prints a usage message incorporating the message to stderr and\n exits.\n\n If you override this in a subclass, it should not return -- it\n should either exit or raise an exception.\n ",
"language": "en",
"n_whitespaces": 68,
"n_word... | def error(self, message):
self.print_usage(_sys.stderr)
args = {'prog': self.prog, 'message': message}
self.exit(2, _('%(prog)s: error: %(message)s\n') % args)
| |
@keras_export("keras.applications.regnet.decode_predictions") | 80,066 | 269,418 | 15 | keras/applications/regnet.py | 9 | 4 | def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
return x
@keras_export("k | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | preprocess_input | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | regnet.py | 7 | 2 | https://github.com/keras-team/keras.git | 1 | 12 | 1 | 9 | 33 | Python | {
"docstring": "A placeholder method for backward compatibility.\n\n The preprocessing logic has been included in the regnet model\n implementation. Users are no longer required to call this method to normalize\n the input data. This method does nothing and only kept as a placeholder to\n align the API su... | def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
return x
@keras_export("keras.applications.regnet.decode_predictions") |
16,323 | 74,807 | 214 | wagtail/documents/tests/test_admin_views.py | 46 | 24 | def test_edit_post(self):
# Send request
response = self.client.post(
reverse("wagtaildocs:edit_multiple", args=(self.doc.id,)),
{
"doc-%d-%s" % (self.doc.id, field): data
for field, data in self.edit_post_data.items()
},
... | Reformat with black | test_edit_post | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_admin_views.py | 14 | 17 | https://github.com/wagtail/wagtail.git | 2 | 147 | 0 | 38 | 246 | Python | {
"docstring": "\n This tests that a POST request to the edit view edits the document\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 13,
"vocab_size": 12
} | def test_edit_post(self):
# Send request
response = self.client.post(
reverse("wagtaildocs:edit_multiple", args=(self.doc.id,)),
{
"doc-%d-%s" % (self.doc.id, field): data
for field, data in self.edit_post_data.items()
},
... | |
117,439 | 320,926 | 26 | tests/unit/mainwindow/test_messageview.py | 10 | 13 | def test_message_hiding(qtbot, view):
with qtbot.wait_signal(view._clear_timer.timeout):
view.show_message(message.M | Add a MessageInfo data class
Preparation for #7246 | test_message_hiding | 5616a99eff34f7074641d1391ed77d6b4b743529 | qutebrowser | test_messageview.py | 13 | 4 | https://github.com/qutebrowser/qutebrowser.git | 1 | 42 | 0 | 10 | 72 | Python | {
"docstring": "Messages should be hidden after the timer times out.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def test_message_hiding(qtbot, view):
with qtbot.wait_signal(view._clear_timer.timeout):
view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test'))
assert not view._messages
| |
71,034 | 246,133 | 283 | tests/rest/client/test_profile.py | 48 | 19 | def test_avatar_allowed_mime_type_per_room(self):
self._setup_local_files(
{
"good": {"mimetype": "image/png"},
"bad": {"mimetype": "application/octet-stream"},
}
)
room_id = self.helper.create_room_as(tok=self.owner_tok)
... | Configurable limits on avatars (#11846)
Only allow files which file size and content types match configured
limits to be set as avatar.
Most of the inspiration from the non-test code comes from matrix-org/synapse-dinsic#19 | test_avatar_allowed_mime_type_per_room | bf60da1a60096fac5fb778b732ff2214862ac808 | synapse | test_profile.py | 12 | 25 | https://github.com/matrix-org/synapse.git | 1 | 150 | 0 | 32 | 276 | Python | {
"docstring": "Tests that the MIME type whitelist for avatars is enforced when updating a\n per-room profile.\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 15,
"vocab_size": 15
} | def test_avatar_allowed_mime_type_per_room(self):
self._setup_local_files(
{
"good": {"mimetype": "image/png"},
"bad": {"mimetype": "application/octet-stream"},
}
)
room_id = self.helper.create_room_as(tok=self.owner_tok)
... | |
79,292 | 268,018 | 20 | test/lib/ansible_test/_internal/host_profiles.py | 6 | 7 | def container_name(self) -> t.Optional[str]:
return self.state.get('container_na | ansible-test - Use more native type hints. (#78435)
* ansible-test - Use more native type hints.
Simple search and replace to switch from comments to native type hints for return types of functions with no arguments.
* ansible-test - Use more native type hints.
Conversion of simple single-line function annota... | container_name | 3eb0485dd92c88cc92152d3656d94492db44b183 | ansible | host_profiles.py | 8 | 3 | https://github.com/ansible/ansible.git | 1 | 22 | 0 | 6 | 39 | Python | {
"docstring": "Return the stored container name, if any, otherwise None.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def container_name(self) -> t.Optional[str]:
return self.state.get('container_name')
| |
75,702 | 259,303 | 148 | sklearn/metrics/_scorer.py | 37 | 10 | def get_scorer(scoring):
if isinstance(scoring, str):
try:
scorer = copy.deepcopy(_SCORERS[scoring])
| API get_scorer returns a copy and introduce get_scorer_names (#22866) | get_scorer | 7dc97a378ecbfa056dd9cfa9d1ef4c07d2d0cc1f | scikit-learn | _scorer.py | 15 | 13 | https://github.com/scikit-learn/scikit-learn.git | 3 | 46 | 0 | 30 | 83 | Python | {
"docstring": "Get a scorer from string.\n\n Read more in the :ref:`User Guide <scoring_parameter>`.\n :func:`~sklearn.metrics.get_scorer_names` can be used to retrieve the names\n of all available scorers.\n\n Parameters\n ----------\n scoring : str or callable\n Scoring method as string. I... | def get_scorer(scoring):
if isinstance(scoring, str):
try:
scorer = copy.deepcopy(_SCORERS[scoring])
except KeyError:
raise ValueError(
"%r is not a valid scoring value. "
"Use sklearn.metrics.get_scorer_names() "
"to get v... | |
75,239 | 258,445 | 1,034 | sklearn/discriminant_analysis.py | 249 | 50 | def fit(self, X, y):
X, y = self._validate_data(
X, y, ensure_min_samples=2, dtype=[np.float64, np.float32]
)
self.classes_ = unique_labels(y)
n_samples, _ = X.shape
n_classes = len(self.classes_)
if n_samples == n_classes:
raise ValueErr... | ENH Adds get_feature_names_out for discriminant_analysis (#22120) | fit | 5c675183d81d71e7e670bb32cf869afb99b513af | scikit-learn | discriminant_analysis.py | 14 | 68 | https://github.com/scikit-learn/scikit-learn.git | 13 | 437 | 0 | 151 | 696 | Python | {
"docstring": "Fit the Linear Discriminant Analysis model.\n\n .. versionchanged:: 0.19\n *store_covariance* has been moved to main constructor.\n\n .. versionchanged:: 0.19\n *tol* has been moved to main constructor.\n\n Parameters\n ----------\n X ... | def fit(self, X, y):
X, y = self._validate_data(
X, y, ensure_min_samples=2, dtype=[np.float64, np.float32]
)
self.classes_ = unique_labels(y)
n_samples, _ = X.shape
n_classes = len(self.classes_)
if n_samples == n_classes:
raise ValueErr... | |
17,646 | 83,277 | 59 | zerver/webhooks/bitbucket3/tests.py | 20 | 5 | def test_pr_opened_with_multiple_reviewers(self) -> None:
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
expected_message =
self.check_webhook(
"pull_request_opened_with_multiple_reviewers", expected_topic, expected_message
)
| docs: Fix many spelling mistakes.
Signed-off-by: Anders Kaseorg <anders@zulip.com> | test_pr_opened_with_multiple_reviewers | b0ce4f1bce8031881addecb1e86073483517f392 | zulip | tests.py | 8 | 6 | https://github.com/zulip/zulip.git | 1 | 23 | 0 | 18 | 46 | Python | {
"docstring": "[hypro999](http://139.59.64.214:7990/users/hypro999) opened [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6) from `master` to `master` (assigned to [sougo](http://139.59.64.214:7990/users/sougo), [zura](http://139.59.64.214:7990/users/zura) and [shimura](http://139.59.64.... | def test_pr_opened_with_multiple_reviewers(self) -> None:
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
expected_message =
self.check_webhook(
"pull_request_opened_with_multiple_reviewers", expected_topic, expected_message
)
| |
7,284 | 39,911 | 16 | dash/_grouping.py | 10 | 7 | def make_grouping_by_key(schema, source, default=None):
| extended ctx.arg_grouping and changed it to AttributeDict | make_grouping_by_key | d19f04c9529d624a8d8f9d02f047c4e972f9d4db | dash | _grouping.py | 10 | 2 | https://github.com/plotly/dash.git | 1 | 29 | 0 | 10 | 45 | Python | {
"docstring": "\n Create a grouping from a schema by using the schema's scalar values to look up\n items in the provided source object.\n\n :param schema: A grouping of potential keys in source\n :param source: Dict-like object to use to look up scalar grouping value using\n scalar grouping values... | def make_grouping_by_key(schema, source, default=None):
return map_grouping(lambda s: source.get(s, default), schema)
| |
47,778 | 196,278 | 39 | sympy/geometry/point.py | 18 | 11 | def taxicab_distance(self, p):
s, p = Point._normalize_dimension(self, Point(p))
return Add(*(abs(a - b) for a, b in zip(s, p)))
| Updated import locations | taxicab_distance | 498015021131af4dbb07eb110e5badaba8250c7b | sympy | point.py | 12 | 3 | https://github.com/sympy/sympy.git | 2 | 47 | 0 | 18 | 74 | Python | {
"docstring": "The Taxicab Distance from self to point p.\n\n Returns the sum of the horizontal and vertical distances to point p.\n\n Parameters\n ==========\n\n p : Point\n\n Returns\n =======\n\n taxicab_distance : The sum of the horizontal\n and vertical di... | def taxicab_distance(self, p):
s, p = Point._normalize_dimension(self, Point(p))
return Add(*(abs(a - b) for a, b in zip(s, p)))
| |
79,750 | 268,884 | 21 | keras/metrics/metrics.py | 17 | 8 | def cosine_similarity(y_true, y_pred, axis=-1):
y_true = tf.linalg.l2_normalize(y_true, axis=axis)
y_pred = tf.linalg.l2_normalize(y_pred, | Refactor disparate metrics-related files into a single metrics folder.
Further work may be needed to split up the long file with individual metric definitions. However having a single file per metric may be too granular. TBD.
PiperOrigin-RevId: 425248502 | cosine_similarity | b4dca51d0558e788f62a96d1009a07f773a202f4 | keras | metrics.py | 9 | 4 | https://github.com/keras-team/keras.git | 1 | 54 | 0 | 13 | 83 | Python | {
"docstring": "Computes the cosine similarity between labels and predictions.\n\n Args:\n y_true: The ground truth values.\n y_pred: The prediction values.\n axis: (Optional) Defaults to -1. The dimension along which the cosine\n similarity is computed.\n\n Returns:\n Cosine similarity value.\n "... | def cosine_similarity(y_true, y_pred, axis=-1):
y_true = tf.linalg.l2_normalize(y_true, axis=axis)
y_pred = tf.linalg.l2_normalize(y_pred, axis=axis)
return tf.reduce_sum(y_true * y_pred, axis=axis)
| |
22,523 | 106,953 | 215 | lib/matplotlib/transforms.py | 110 | 19 | def rotate(self, theta):
a = math.cos(theta)
b = math.sin(theta)
| Micro-optimize rotation transform.
The following test script shows a ~3x speedup.
```python
import math, numpy as np
mtx = np.array([[.1, .2, .3], [.4, .5, .6], [0, 0, 1]])
theta = np.pi / 4
def rotate(mtx, theta):
a = math.cos(theta)
b = math.sin(theta)
rotate_mtx = np.array([[a, -b, 0.0], [b, a, 0.0],... | rotate | ff120cdc5aef1d609913678b1ac8c26e6f30691e | matplotlib | transforms.py | 8 | 13 | https://github.com/matplotlib/matplotlib.git | 1 | 143 | 0 | 53 | 214 | Python | {
"docstring": "\n Add a rotation (in radians) to this transform in place.\n\n Returns *self*, so this method can easily be chained with more\n calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`\n and :meth:`scale`.\n ",
"language": "en",
"n_whitespaces": 64,
"n_wo... | def rotate(self, theta):
a = math.cos(theta)
b = math.sin(theta)
mtx = self._mtx
# Operating and assigning one scalar at a time is much faster.
(xx, xy, x0), (yx, yy, y0), _ = mtx.tolist()
# mtx = [[a -b 0], [b a 0], [0 0 1]] * mtx
mtx[0, 0] = a * xx - b ... | |
85,834 | 286,499 | 1,328 | openbb_terminal/parent_classes.py | 198 | 59 | def call_load(self, other_args):
parser = argparse.ArgumentParser(
add_help=False,
| Sdk dates (#3354)
* example changes in slopes
* change lettering size and side bar capitalization
* revert back to Fira
* start automatic website generation
* this was autogen
* add examples to slopes model
* generate slopes doc
* change to _index.md
* allow italic formatting
* fix regex
... | call_load | 46141766d7250671b7bc75872e2034afe4938374 | OpenBBTerminal | parent_classes.py | 16 | 99 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 7 | 486 | 0 | 141 | 791 | Python | {
"docstring": "Process load command.Load crypto currency to perform analysis on.\n Yahoo Finance is used as default source.\n Other sources can be used such as 'ccxt' or 'cg' with --source.\n If you select 'ccxt', you can then select any exchange with --exchange.\n You can... | def call_load(self, other_args):
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="load",
description=,
)
parser.add_argument(
"-c",
"--coin",
... | |
@register.filter('json') | 77,734 | 264,448 | 123 | netbox/utilities/templatetags/builtins/filters.py | 72 | 19 | def render_markdown(value):
schemes = '|'.join(get_config().ALLOWED_URL_SCHEMES)
# Strip HTML tags
value = strip_tags(value)
# Sanitize Markdown links
pattern = fr'\[([^\]]+)\]\((?!({schemes})).*:(.+)\)'
value = re.sub(pattern, '[\\1](\\3)', value, flags=re.IGNORECASE)
# Sanitize Mar... | Closes #8600: Document built-in template tags & filters | render_markdown | 7c105019d8ae9205051c302e7499b33a455f9176 | netbox | filters.py | 12 | 11 | https://github.com/netbox-community/netbox.git | 2 | 98 | 1 | 50 | 198 | Python | {
"docstring": "\n Render a string as Markdown. This filter is invoked as \"markdown\":\n\n {{ md_source_text|markdown }}\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 14,
"vocab_size": 13
} | def render_markdown(value):
schemes = '|'.join(get_config().ALLOWED_URL_SCHEMES)
# Strip HTML tags
value = strip_tags(value)
# Sanitize Markdown links
pattern = fr'\[([^\]]+)\]\((?!({schemes})).*:(.+)\)'
value = re.sub(pattern, '[\\1](\\3)', value, flags=re.IGNORECASE)
# Sanitize Mar... |
42,454 | 177,588 | 174 | label_studio/projects/functions/next_task.py | 62 | 22 | def _try_breadth_first(tasks, user):
tasks = tasks.annotate(annotations_count=Count( | feat: DEV-469: Skip queue (#1693)
* DEV-469 Skip queue project setting
* DEV-469 review fixes
* Merge migrations (DEV-469)
* Update requirements-test.txt
* Update requirements-test.txt
* Update test_exception.py
* Revert "Update test_exception.py"
This reverts commit b9c686c9bacaf298bafe3a207352cc... | _try_breadth_first | 074af782e6f351c711f18d8ad6a05aa4f632339c | label-studio | next_task.py | 16 | 17 | https://github.com/heartexlabs/label-studio.git | 3 | 104 | 0 | 49 | 174 | Python | {
"docstring": "Try to find tasks with maximum amount of annotations, since we are trying to label tasks as fast as possible\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 20,
"vocab_size": 17
} | def _try_breadth_first(tasks, user):
tasks = tasks.annotate(annotations_count=Count('annotations'))
max_annotations_count = tasks.aggregate(Max('annotations_count'))['annotations_count__max']
if max_annotations_count == 0:
# there is no any labeled tasks found
return
# find any ta... | |
33,074 | 143,838 | 405 | rllib/policy/tests/test_rnn_sequencing.py | 66 | 27 | def test_pad_batch_dynamic_max(self):
view_requirements = {
"state_in_0": ViewRequirement(
"state_out_0",
shift=[-1],
used_for_training=False,
used_for_compute_actions=True,
batch_repeat_value=1,
)
... | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | test_pad_batch_dynamic_max | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | test_rnn_sequencing.py | 13 | 33 | https://github.com/ray-project/ray.git | 1 | 190 | 0 | 49 | 299 | Python | {
"docstring": "Test pad_batch_to_sequences_of_same_size when dynamic_max = True",
"language": "en",
"n_whitespaces": 5,
"n_words": 6,
"vocab_size": 6
} | def test_pad_batch_dynamic_max(self):
view_requirements = {
"state_in_0": ViewRequirement(
"state_out_0",
shift=[-1],
used_for_training=False,
used_for_compute_actions=True,
batch_repeat_value=1,
)
... | |
45,950 | 188,975 | 171 | psutil/_pslinux.py | 61 | 31 | def sensors_fans():
ret = collections.defaultdict(list)
basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*')
if not basenames:
# CentOS has an intermediate /device directory:
# https://github.com/giampaolo/psutil/issues/971
basenames = glob.glob('/sys/class/hwmon/hwmon*/devic... | [Linux] cat/bcat utils refactoring (#2053) | sensors_fans | 46cb6c212a870b36bd0af17c48dd29f53468734b | psutil | _pslinux.py | 16 | 16 | https://github.com/giampaolo/psutil.git | 5 | 143 | 0 | 48 | 245 | Python | {
"docstring": "Return hardware fans info (for CPU and other peripherals) as a\n dict including hardware label and current speed.\n\n Implementation notes:\n - /sys/class/hwmon looks like the most recent interface to\n retrieve this info, and this implementation relies on it\n only (old distros wil... | def sensors_fans():
ret = collections.defaultdict(list)
basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*')
if not basenames:
# CentOS has an intermediate /device directory:
# https://github.com/giampaolo/psutil/issues/971
basenames = glob.glob('/sys/class/hwmon/hwmon*/devic... | |
55,257 | 218,360 | 31 | python3.10.4/Lib/importlib/util.py | 10 | 6 | def factory(cls, loader):
cls.__check_eager_loader(loader)
return lambda *args, ** | add python 3.10.4 for windows | factory | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | util.py | 11 | 3 | https://github.com/XX-net/XX-Net.git | 1 | 33 | 0 | 10 | 55 | Python | {
"docstring": "Construct a callable which returns the eager loader made lazy.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def factory(cls, loader):
cls.__check_eager_loader(loader)
return lambda *args, **kwargs: cls(loader(*args, **kwargs))
| |
16,053 | 73,586 | 197 | wagtail/contrib/table_block/tests.py | 40 | 10 | def test_table_block_caption_render(self):
value = {
"table_caption": "caption",
"first_row_is_table_header": False,
"first_col_is_header": Fals | Reformat with black | test_table_block_caption_render | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | tests.py | 11 | 25 | https://github.com/wagtail/wagtail.git | 1 | 83 | 0 | 31 | 140 | Python | {
"docstring": "\n Test a generic render with caption.\n \n <table>\n <caption>caption</caption>\n <tbody>\n <tr><td>Test 1</td><td>Test 2</td><td>Test 3</td></tr>\n <tr><td></td><td></td><td></td></tr>\n <... | def test_table_block_caption_render(self):
value = {
"table_caption": "caption",
"first_row_is_table_header": False,
"first_col_is_header": False,
"data": [
["Test 1", "Test 2", "Test 3"],
[None, None, None],
... | |
472 | 3,409 | 59 | airbyte-integrations/connectors/source-salesforce/unit_tests/unit_test.py | 15 | 1 | def stream_config_without_start_date(): | Source Salesforce: Deprecate API Type parameter (#9302)
* use BULK for the first sync, REST for incremental sync
* if stream contains compound data or/and base64 use always REST
* fix get stream state from connector state
* fix integration test
* refactor catalog name
* format code
* refactor unit te... | stream_config_without_start_date | 0a3713a5a52995dc0dc205d8edfd097bf625899f | airbyte | unit_test.py | 8 | 8 | https://github.com/airbytehq/airbyte.git | 1 | 28 | 0 | 15 | 58 | Python | {
"docstring": "Generates streams settings for REST logic without start_date",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def stream_config_without_start_date():
return {
"client_id": "fake_client_id",
"client_secret": "fake_client_secret",
"refresh_token": "fake_refresh_token",
"is_sandbox": False,
"wait_timeout": 15,
}
| |
35,023 | 151,476 | 132 | freqtrade/rpc/api_server/ws/channel.py | 25 | 8 | async def relay(self):
while True:
message = await self.queue.get()
try:
await self.send(message)
self.queue.task_done()
except RuntimeError:
# The connection was closed, just exit t | refactor broadcasting to a queue per client | relay | 3e8d8fd1b08e28f8ec231de9ee3be57a539b266e | freqtrade | channel.py | 12 | 8 | https://github.com/freqtrade/freqtrade.git | 3 | 39 | 0 | 24 | 72 | Python | {
"docstring": "\n Relay messages from the channel's queue and send them out. This is started\n as a task.\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 16,
"vocab_size": 16
} | async def relay(self):
while True:
message = await self.queue.get()
try:
await self.send(message)
self.queue.task_done()
except RuntimeError:
# The connection was closed, just exit the task
return
| |
9,909 | 49,786 | 98 | modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/gaussian_diffusion.py | 33 | 12 | def q_sample(self, x_start, t, noise=None):
if noise is None:
# noise = th.randn_like(x_start)
noise = paddle.randn(x_start.shape, x_start.dtype)
assert noise.shape == x_start.shape
return (_extract_into_tensor(self.sqrt_alphas_cumpr | add disco_diffusion_cnclip_vitb16 module | q_sample | f4d6e64cdc132ae868699a0ba442f4ab1d304a14 | PaddleHub | gaussian_diffusion.py | 11 | 6 | https://github.com/PaddlePaddle/PaddleHub.git | 2 | 73 | 0 | 26 | 109 | Python | {
"docstring": "\n Diffuse the data for a given number of diffusion steps.\n\n In other words, sample from q(x_t | x_0).\n\n :param x_start: the initial data batch.\n :param t: the number of diffusion steps (minus 1). Here, 0 means one step.\n :param noise: if specified, the split-o... | def q_sample(self, x_start, t, noise=None):
if noise is None:
# noise = th.randn_like(x_start)
noise = paddle.randn(x_start.shape, x_start.dtype)
assert noise.shape == x_start.shape
return (_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_star... | |
25,222 | 114,587 | 47 | mindsdb/integrations/postgres_handler/postgres_handler.py | 19 | 5 | def get_views(self):
query = f"SELECT * FROM information_schema.views WHERE table_schema NOT IN ('information_schema', 'pg_catalog')"
result = self.run_native_query(q | Get tables, views, describe | get_views | 7e3da9157508a5eb38dbfabbd7f08ba8fa6c5a88 | mindsdb | postgres_handler.py | 8 | 4 | https://github.com/mindsdb/mindsdb.git | 1 | 20 | 0 | 17 | 36 | Python | {
"docstring": "\n List all views in PostgreSQL without the system views information_schema and pg_catalog\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 11
} | def get_views(self):
query = f"SELECT * FROM information_schema.views WHERE table_schema NOT IN ('information_schema', 'pg_catalog')"
result = self.run_native_query(query)
return result
| |
50,640 | 204,139 | 1,068 | django/contrib/gis/utils/layermapping.py | 274 | 36 | def verify_ogr_field(self, ogr_field, model_field):
if isinstance(ogr_field, OFTString) and isinstance(
model_field, (models.CharField, models.TextField)
):
if self.encoding and ogr_field.value is not None:
# The encoding for OGR data sources may be speci... | Refs #33476 -- Reformatted code with Black. | verify_ogr_field | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | layermapping.py | 16 | 53 | https://github.com/django/django.git | 16 | 278 | 0 | 155 | 451 | Python | {
"docstring": "\n Verify if the OGR Field contents are acceptable to the model field. If\n they are, return the verified value, otherwise raise an exception.\n ",
"language": "en",
"n_whitespaces": 45,
"n_words": 23,
"vocab_size": 21
} | def verify_ogr_field(self, ogr_field, model_field):
if isinstance(ogr_field, OFTString) and isinstance(
model_field, (models.CharField, models.TextField)
):
if self.encoding and ogr_field.value is not None:
# The encoding for OGR data sources may be speci... | |
2,741 | 13,717 | 73 | jina/serve/streamer.py | 19 | 8 | def get_streamer():
if 'JINA_STREAMER_ARGS' in os.environ:
args_dict = json.loads(os.environ['JINA_STREAMER_ARGS'])
return GatewayStreamer(**args_dict)
| feat: add get_streamer helper and inject streamer info (#5472) | get_streamer | b36e6bdb1f5d02a4c5af3131f3a07d7b4ccddced | jina | streamer.py | 12 | 6 | https://github.com/jina-ai/jina.git | 2 | 38 | 0 | 19 | 71 | Python | {
"docstring": "\n Return a streamer object based on the current environment context.\n The streamer object is contructed using runtime arguments stored in the `JINA_STREAMER_ARGS` environment variable.\n If this method is used outside a Jina context (process not controlled/orchestrated by jina),... | def get_streamer():
if 'JINA_STREAMER_ARGS' in os.environ:
args_dict = json.loads(os.environ['JINA_STREAMER_ARGS'])
return GatewayStreamer(**args_dict)
else:
raise OSError('JINA_STREAMER_ARGS environment variable is not set')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.