ast_errors stringlengths 0 3.2k | d_id int64 44 121k | id int64 70 338k | n_whitespaces int64 3 14k | path stringlengths 8 134 | n_words int64 4 4.82k | n_identifiers int64 1 131 | random_cut stringlengths 16 15.8k | commit_message stringlengths 2 15.3k | fun_name stringlengths 1 84 | commit_id stringlengths 40 40 | repo stringlengths 3 28 | file_name stringlengths 5 79 | ast_levels int64 6 31 | nloc int64 1 548 | url stringlengths 31 59 | complexity int64 1 66 | token_counts int64 6 2.13k | n_ast_errors int64 0 28 | vocab_size int64 4 1.11k | n_ast_nodes int64 15 19.2k | language stringclasses 1
value | documentation dict | code stringlengths 101 62.2k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
56,295 | 221,252 | 39 | python3.10.4/Lib/calendar.py | 18 | 10 | def monthdays2calendar(self, year, month):
days = list(self.itermonthdays2(year, month))
r | add python 3.10.4 for windows | monthdays2calendar | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | calendar.py | 11 | 3 | https://github.com/XX-net/XX-Net.git | 2 | 48 | 0 | 18 | 72 | Python | {
"docstring": "\n Return a matrix representing a month's calendar.\n Each row represents a week; week entries are\n (day number, weekday number) tuples. Day numbers outside this month\n are zero.\n ",
"language": "en",
"n_whitespaces": 63,
"n_words": 27,
"vocab_size": 24
} | def monthdays2calendar(self, year, month):
days = list(self.itermonthdays2(year, month))
return [ days[i:i+7] for i in range(0, len(days), 7) ]
| |
28,163 | 126,397 | 55 | python/ray/serve/drivers.py | 23 | 8 | async def predict_with_route(self, route_path, *args, **kwargs):
if route_path not in self.dags:
raise RayServeExc | [Serve] Support Multiple DAG Entrypoints in DAGDriver (#26573) | predict_with_route | 410fe1b5ec9e798d6e7ffbb5844e258d08e323b3 | ray | drivers.py | 11 | 4 | https://github.com/ray-project/ray.git | 2 | 45 | 0 | 21 | 76 | Python | {
"docstring": "Perform inference directly without HTTP for multi dags.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | async def predict_with_route(self, route_path, *args, **kwargs):
if route_path not in self.dags:
raise RayServeException(f"{route_path} does not exist in dags routes")
return await self.dags[route_path].remote(*args, **kwargs)
| |
28,813 | 128,796 | 133 | python/ray/tune/experiment/trial.py | 23 | 9 | def should_recover(self):
return (
self.num_failures < self.max_failures
or self.max_failures < 0
or (
self.num_failures == self.max_failures
and self.num_restore_failures
< int(os.environ.get("TUNE_RESTORE_RETRY_NUM", ... | [tune] Add retry logic for restoring trials. (#29086)
This is an advanced setting. Consider the following scenario: Due to scheduling glitches, sometimes a restoring
trial may be scheduled onto a dying node. By setting this env var to a positive number, the trial can be restored
several times and hopefully one of th... | should_recover | f1882f90cf2d91f5d802b7dffd41db5e306d6e6c | ray | trial.py | 15 | 10 | https://github.com/ray-project/ray.git | 4 | 50 | 0 | 15 | 81 | Python | {
"docstring": "Returns whether the trial qualifies for retrying.\n\n This is if the trial has not failed more than max_failures. Note this\n may return true even when there is no checkpoint, either because\n `self.checkpoint_freq` is `0` or because the trial failed before\n a checkpoint h... | def should_recover(self):
return (
self.num_failures < self.max_failures
or self.max_failures < 0
or (
self.num_failures == self.max_failures
and self.num_restore_failures
< int(os.environ.get("TUNE_RESTORE_RETRY_NUM", ... | |
16,239 | 74,216 | 75 | wagtail/core/tests/test_locale_model.py | 15 | 14 | def test_change_root_page_locale_on_locale_deletion(self):
# change 'real' pages first
Page.objects.filter(depth__gt=1).update(
locale=Locale.objects.get(language_code="fr")
)
self.assertEqual(Page.get_first_root_node().locale.language_code, "en")
Locale.obje... | Reformat with black | test_change_root_page_locale_on_locale_deletion | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_locale_model.py | 12 | 7 | https://github.com/wagtail/wagtail.git | 1 | 78 | 0 | 14 | 136 | Python | {
"docstring": "\n On deleting the locale used for the root page (but no 'real' pages), the\n root page should be reassigned to a new locale (the default one, if possible)\n ",
"language": "en",
"n_whitespaces": 50,
"n_words": 28,
"vocab_size": 23
} | def test_change_root_page_locale_on_locale_deletion(self):
# change 'real' pages first
Page.objects.filter(depth__gt=1).update(
locale=Locale.objects.get(language_code="fr")
)
self.assertEqual(Page.get_first_root_node().locale.language_code, "en")
Locale.obje... | |
3,880 | 21,496 | 1,320 | pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py | 468 | 47 | def _proc_pax(self, tarfile):
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
... | Vendor in pip 22.1.2 | _proc_pax | c69d55f7c82d5ae2cce542bcfb98d043ca4836a0 | pipenv | tarfile.py | 15 | 52 | https://github.com/pypa/pipenv.git | 16 | 387 | 0 | 249 | 669 | Python | {
"docstring": "Process an extended or global header as described in\n POSIX.1-2008.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 10,
"vocab_size": 10
} | def _proc_pax(self, tarfile):
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
... | |
4,585 | 23,379 | 134 | ppocr/modeling/backbones/rec_efficientb3_pren.py | 22 | 9 | def get_global_params():
GlobalParams = namedtuple('GlobalParams', [
'drop_connect_rate', 'width_coefficient', 'depth_coefficient',
'depth_divisor', 'image_size'
])
global_params = GlobalParams(
drop_connect_rate=0.3,
width_coefficient=1.2... | [Feature] Add PREN Scene Text Recognition Model(Accepted in CVPR2021) (#5563)
* [Feature] add PREN scene text recognition model
* [Patch] Optimize yml File
* [Patch] Save Label/Pred Preprocess Time Cost
* [BugFix] Modify Shape Conversion to Fit for Inference Model Exportion
* [Patch] ?
* [Patch] ?
* ... | get_global_params | 6e607a0fa1cefbf0388dac86c84debf4781cec48 | PaddleOCR | rec_efficientb3_pren.py | 10 | 12 | https://github.com/PaddlePaddle/PaddleOCR.git | 1 | 55 | 0 | 20 | 83 | Python | {
"docstring": "\n The fllowing are efficientnetb3's arch superparams, but to fit for scene \n text recognition task, the resolution(image_size) here is changed \n from 300 to 64.\n ",
"language": "en",
"n_whitespaces": 54,
"n_words": 23,
"vocab_size": 22
} | def get_global_params():
GlobalParams = namedtuple('GlobalParams', [
'drop_connect_rate', 'width_coefficient', 'depth_coefficient',
'depth_divisor', 'image_size'
])
global_params = GlobalParams(
drop_connect_rate=0.3,
width_coefficient=1.2... | |
23,349 | 108,809 | 31 | lib/matplotlib/path.py | 10 | 8 | def _create_closed(cls, vertices):
v = _to_unmasked_float_array(vertices)
| Add a helper to generate closed paths.
Instead of having to manually append an unused vertex that corresponds
to the CLOSEPATH code, add a _make_closed helper (private for now) which
does that for us. | _create_closed | e994b58e49bcd98334b220d74540005f62af918d | matplotlib | path.py | 12 | 3 | https://github.com/matplotlib/matplotlib.git | 1 | 36 | 0 | 10 | 57 | Python | {
"docstring": "\n Create a closed polygonal path going through *vertices*.\n\n Unlike ``Path(..., closed=True)``, *vertices* should **not** end with\n an entry for the CLOSEPATH; this entry is added by `._create_closed`.\n ",
"language": "en",
"n_whitespaces": 56,
"n_words": 27,
"... | def _create_closed(cls, vertices):
v = _to_unmasked_float_array(vertices)
return cls(np.concatenate([v, v[:1]]), closed=True)
| |
@pytest.mark.parametrize("loss", ALL_LOSSES)
@pytest.mark.parametrize("sample_weight", [None, "range"])
@pytest.mark.parametrize("dtype", (np.float32, np.float64))
@pytest.mark.parametrize("order", ("C", "F")) | 75,678 | 259,259 | 483 | sklearn/_loss/tests/test_loss.py | 93 | 34 | def test_predict_proba(loss, global_random_seed):
n_samples = 20
y_true, raw_prediction = random_y_true_raw_prediction(
loss=loss,
n_samples=n_samples,
y_bound=(-100, 100),
raw_bound=(-5, 5),
seed=global_random_seed,
)
if hasattr(loss, "predict_proba"):
... | TST ensure that sklearn/_loss/tests/test_loss.py is seed insensitive (#22847)
Co-authored-by: Christian Lorentzen <lorentzen.ch@gmail.com> | test_predict_proba | 751c5cd05ff545c20ad0b09ac491c07f31e4cd56 | scikit-learn | test_loss.py | 14 | 38 | https://github.com/scikit-learn/scikit-learn.git | 4 | 248 | 1 | 62 | 453 | Python | {
"docstring": "Test that predict_proba and gradient_proba work as expected.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def test_predict_proba(loss, global_random_seed):
n_samples = 20
y_true, raw_prediction = random_y_true_raw_prediction(
loss=loss,
n_samples=n_samples,
y_bound=(-100, 100),
raw_bound=(-5, 5),
seed=global_random_seed,
)
if hasattr(loss, "predict_proba"):
... |
13,932 | 65,555 | 4 | erpnext/buying/doctype/supplier_scorecard_variable/supplier_scorecard_variable.py | 6 | 4 | def get_late_shipments(scorecard):
return get | style: format code with black | get_late_shipments | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | supplier_scorecard_variable.py | 8 | 2 | https://github.com/frappe/erpnext.git | 1 | 16 | 0 | 6 | 29 | Python | {
"docstring": "Gets the number of late shipments (counting each item) in the period (based on Purchase Receipts vs POs)",
"language": "en",
"n_whitespaces": 17,
"n_words": 18,
"vocab_size": 17
} | def get_late_shipments(scorecard):
return get_total_shipments(scorecard) - get_on_time_shipments(scorecard)
| |
1,150 | 7,172 | 121 | ludwig/models/base.py | 38 | 16 | def eval_loss(self, targets, predictions):
eval_loss = 0
for of_name, of_obj in self.outp | feat: Added model type GBM (LightGBM tree learner), as an alternative to ECD (#2027) | eval_loss | aa0c63bf2ed825eb3ca8eff8a002d5ccbe395173 | ludwig | base.py | 12 | 10 | https://github.com/ludwig-ai/ludwig.git | 3 | 82 | 0 | 29 | 132 | Python | {
"docstring": "Computes all evaluation losses for the model given targets and predictions.\n\n Args:\n targets: A dictionary of target names to target tensors.\n predictions: A dictionary of output names to output tensors.\n\n Returns:\n A tuple of loss values for eval ... | def eval_loss(self, targets, predictions):
eval_loss = 0
for of_name, of_obj in self.output_features.items():
of_eval_loss = of_obj.eval_loss(targets[of_name], predictions[of_name])
eval_loss += of_obj.loss["weight"] * of_eval_loss
additional_loss = 0
ad... | |
35,404 | 153,455 | 135 | modin/db_conn.py | 40 | 6 | def partition_query(self, query, limit, offset):
return (
(
f"SELECT * FROM ({query}) AS _ ORDER BY(SELECT NULL)"
+ f" OFFSET {offset} ROWS FETCH NEXT {limit} ROWS ONLY"
)
if self._dialect_is_microsoft_sql()
else f"SELECT *... | FEAT-#979: Enable reading from SQL server. (#4279)
Co-authored-by: eavidan <eran.avidan@intel.com>
Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com>
Signed-off-by: mvashishtha <mahesh@ponder.io> | partition_query | 2d40797b2b700d81d4db4a4cd023d563edf6431f | modin | db_conn.py | 11 | 9 | https://github.com/modin-project/modin.git | 2 | 31 | 0 | 31 | 73 | Python | {
"docstring": "\n Get a query that partitions the original `query`.\n\n Parameters\n ----------\n query : str\n The SQL query to get a partition.\n limit : int\n The size of the partition.\n offset : int\n Where the partition begins.\n\n ... | def partition_query(self, query, limit, offset):
return (
(
f"SELECT * FROM ({query}) AS _ ORDER BY(SELECT NULL)"
+ f" OFFSET {offset} ROWS FETCH NEXT {limit} ROWS ONLY"
)
if self._dialect_is_microsoft_sql()
else f"SELECT *... | |
51,869 | 207,118 | 339 | tests/admin_filters/tests.py | 122 | 24 | def test_parameter_ends_with__in__or__isnull(self):
# When it ends with '__in' -----------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site)
request = self.request_factory.get("/", {"decade__in": "the 90s"})
request.user = self.alf... | Refs #33476 -- Reformatted code with Black. | test_parameter_ends_with__in__or__isnull | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 11 | 25 | https://github.com/django/django.git | 1 | 284 | 0 | 52 | 475 | Python | {
"docstring": "\n A SimpleListFilter's parameter name is not mistaken for a model field\n if it ends with '__isnull' or '__in' (#17091).\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 19
} | def test_parameter_ends_with__in__or__isnull(self):
# When it ends with '__in' -----------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site)
request = self.request_factory.get("/", {"decade__in": "the 90s"})
request.user = self.alf... | |
55,384 | 218,553 | 39 | python3.10.4/Lib/ipaddress.py | 16 | 5 | def v4_int_to_packed(address):
try:
return address.to_bytes(4, 'big')
except OverflowError:
raise ValueError("Address negative or too large for I | add python 3.10.4 for windows | v4_int_to_packed | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | ipaddress.py | 11 | 5 | https://github.com/XX-net/XX-Net.git | 2 | 25 | 0 | 16 | 47 | Python | {
"docstring": "Represent an address as 4 packed bytes in network (big-endian) order.\n\n Args:\n address: An integer representation of an IPv4 IP address.\n\n Returns:\n The integer address packed as 4 bytes in network (big-endian) order.\n\n Raises:\n ValueError: If the integer is nega... | def v4_int_to_packed(address):
try:
return address.to_bytes(4, 'big')
except OverflowError:
raise ValueError("Address negative or too large for IPv4")
| |
71,768 | 247,600 | 88 | tests/handlers/test_directory.py | 12 | 14 | def test_delete_alias_not_allowed(self) -> None:
self._create_alias(self.admin_user)
self.get_failure(
self.handler.delete_association(
create_requester(self.test_us | Add type hints to some tests/handlers files. (#12224) | test_delete_alias_not_allowed | 5dd949bee6158a8b651db9f2ae417a62c8184bfd | synapse | test_directory.py | 12 | 9 | https://github.com/matrix-org/synapse.git | 1 | 47 | 0 | 12 | 75 | Python | {
"docstring": "A user that doesn't meet the expected guidelines cannot delete an alias.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def test_delete_alias_not_allowed(self) -> None:
self._create_alias(self.admin_user)
self.get_failure(
self.handler.delete_association(
create_requester(self.test_user), self.room_alias
),
synapse.api.errors.AuthError,
)
| |
48,923 | 198,414 | 3,401 | sympy/integrals/trigonometry.py | 909 | 41 | def trigintegrate(f, x, conds='piecewise'):
pat, a, n, m = _pat_sincos(x)
f = f.rewrite('sincos')
M = f.match(pat)
if M is None:
return
n, m = M[n], M[m]
if n.is_zero and m.is_zero:
return x
zz = x if n.is_zero else S.Zero
a = M[a]
if n.is_odd or m.is_odd:
... | Improve loop performance | trigintegrate | e94a7b45d7b033ccbd57395dca28b654f875c54c | sympy | trigonometry.py | 24 | 78 | https://github.com/sympy/sympy.git | 30 | 829 | 0 | 266 | 1,394 | Python | {
"docstring": "\n Integrate f = Mul(trig) over x.\n\n Examples\n ========\n\n >>> from sympy import sin, cos, tan, sec\n >>> from sympy.integrals.trigonometry import trigintegrate\n >>> from sympy.abc import x\n\n >>> trigintegrate(sin(x)*cos(x), x)\n sin(x)**2/2\n\n >>> trigintegrate(sin(... | def trigintegrate(f, x, conds='piecewise'):
pat, a, n, m = _pat_sincos(x)
f = f.rewrite('sincos')
M = f.match(pat)
if M is None:
return
n, m = M[n], M[m]
if n.is_zero and m.is_zero:
return x
zz = x if n.is_zero else S.Zero
a = M[a]
if n.is_odd or m.is_odd:
... | |
77,585 | 264,064 | 23 | PyInstaller/utils/hooks/win32.py | 11 | 9 | def get_pywin32_module_file_attribute(module_name):
from PyInstaller.utils.win32 import winutils
module = w | hookutils: win32: port to PyInstaller.isolated framework | get_pywin32_module_file_attribute | b87832b35dc1866c81ecaf7e502afe48a4e50a82 | pyinstaller | win32.py | 8 | 4 | https://github.com/pyinstaller/pyinstaller.git | 1 | 26 | 0 | 11 | 43 | Python | {
"docstring": "\n Get the absolute path of the PyWin32 DLL specific to the PyWin32 module with the passed name.\n\n On import, each PyWin32 module:\n\n * Imports a DLL specific to that module.\n * Overwrites the values of all module attributes with values specific to that DLL. This includes that module's... | def get_pywin32_module_file_attribute(module_name):
from PyInstaller.utils.win32 import winutils
module = winutils.import_pywin32_module(module_name)
return module.__file__
| |
19,490 | 97,856 | 32 | src/sentry/pipeline/base.py | 11 | 6 | def render_warning(self, message):
context = {"error": message}
return render_to_response("sentry/pipeline-provider-error.html", context, self.request)
| ref(py): Split up large file (#32862)
Co-authored-by: getsantry[bot] <66042841+getsantry[bot]@users.noreply.github.com> | render_warning | d246d2b6d3e014270941209e54f2f12e09ad9a81 | sentry | base.py | 9 | 3 | https://github.com/getsentry/sentry.git | 1 | 26 | 0 | 11 | 45 | Python | {
"docstring": "For situations when we want to display an error without triggering an issue",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 12
} | def render_warning(self, message):
context = {"error": message}
return render_to_response("sentry/pipeline-provider-error.html", context, self.request)
| |
71,570 | 247,283 | 284 | tests/rest/client/test_rooms.py | 66 | 20 | def test_context_filter_not_labels(self) -> None:
event_id = self._send_labelled_messages_in_room()
channel = self.make_request(
"GET",
"/rooms/%s/context/%s?filter=%s"
% (self.room_id, event_id, json.dumps(self.FILTER_NOT_LABELS)),
access_token=... | Add type hints to `tests/rest/client` (#12108)
* Add type hints to `tests/rest/client`
* newsfile
* fix imports
* add `test_account.py`
* Remove one type hint in `test_report_event.py`
* change `on_create_room` to `async`
* update new functions in `test_third_party_rules.py`
* Add `test_filter.py`... | test_context_filter_not_labels | 2ffaf30803f93273a4d8a65c9e6c3110c8433488 | synapse | test_rooms.py | 13 | 27 | https://github.com/matrix-org/synapse.git | 3 | 189 | 0 | 47 | 302 | Python | {
"docstring": "Test that we can filter by the absence of a label on a /context request.",
"language": "en",
"n_whitespaces": 14,
"n_words": 15,
"vocab_size": 14
} | def test_context_filter_not_labels(self) -> None:
event_id = self._send_labelled_messages_in_room()
channel = self.make_request(
"GET",
"/rooms/%s/context/%s?filter=%s"
% (self.room_id, event_id, json.dumps(self.FILTER_NOT_LABELS)),
access_token=... | |
42,311 | 177,245 | 189 | networkx/algorithms/isomorphism/vf2pp_helpers/feasibility.py | 53 | 11 | def _consistent_PT(u, v, graph_params, state_params):
G1, G2 = graph_params.G1, graph_params.G2
mapping, reverse_mapping = state_params.mapping, state_params.reverse_mapping
for neighbor in G1[u]:
if neighbor in mapping:
if G1.number_of_edges(u, neighbor) != G2.number_of_edges(
... | Preliminary VF2++ Implementation (#5788)
* Preliminary implementation of the candidate node pair ordering of VF2++
* Removed unused lines of code
* Added todos
* Added demo and pseudocode for VF2++
* Pointed out a problem with the pseudocode
* Initialisation of the VF2++ basis structure
* Initialise ... | _consistent_PT | bffcd74649fb95a57fb834846eb3c7d9693c55b8 | networkx | feasibility.py | 13 | 16 | https://github.com/networkx/networkx.git | 7 | 110 | 0 | 32 | 164 | Python | {
"docstring": "Checks the consistency of extending the mapping using the current node pair.\n\n Parameters\n ----------\n u, v: Graph node\n The two candidate nodes being examined.\n\n graph_params: namedtuple\n Contains all the Graph-related parameters:\n\n G1,G2: NetworkX Graph or ... | def _consistent_PT(u, v, graph_params, state_params):
G1, G2 = graph_params.G1, graph_params.G2
mapping, reverse_mapping = state_params.mapping, state_params.reverse_mapping
for neighbor in G1[u]:
if neighbor in mapping:
if G1.number_of_edges(u, neighbor) != G2.number_of_edges(
... | |
14,662 | 67,919 | 13 | erpnext/stock/report/stock_balance/stock_balance.py | 23 | 13 | def get_variant_values_for(items):
attribute_map = {}
for attr in frappe.db.sql(
% ", ".join(["%s"] * len(items)),
tuple(items),
as_dict=1,
):
attribute_map.setdefault(attr["parent"], {})
attribute_map[attr["parent"]]. | style: format code with black | get_variant_values_for | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | stock_balance.py | 13 | 13 | https://github.com/frappe/erpnext.git | 2 | 82 | 0 | 22 | 140 | Python | {
"docstring": "Returns variant values for items.select parent, attribute, attribute_value\n\t\tfrom `tabItem Variant Attribute` where parent in (%s)\n\t\t",
"language": "en",
"n_whitespaces": 14,
"n_words": 16,
"vocab_size": 16
} | def get_variant_values_for(items):
attribute_map = {}
for attr in frappe.db.sql(
% ", ".join(["%s"] * len(items)),
tuple(items),
as_dict=1,
):
attribute_map.setdefault(attr["parent"], {})
attribute_map[attr["parent"]].update({attr["attribute"]: attr["attribute_value"]})
return attribute_map
| |
17,037 | 80,233 | 216 | wagtail/snippets/tests/test_locking.py | 63 | 17 | def test_edit_post_locked_by_self(self):
# Lock the snippet
self.lock_snippet(self.user)
# Try to edit the snippet
response = self.client.post(
self.get_url("edit"),
{"text": "Edited while locked"},
follow=True,
)
| Add tests for locking snippets | test_edit_post_locked_by_self | 10dbbddaf35607e4257f50dd960520a1268dd225 | wagtail | test_locking.py | 11 | 14 | https://github.com/wagtail/wagtail.git | 1 | 77 | 0 | 45 | 142 | Python | {
"docstring": "A user can edit a snippet that is locked by themselves.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def test_edit_post_locked_by_self(self):
# Lock the snippet
self.lock_snippet(self.user)
# Try to edit the snippet
response = self.client.post(
self.get_url("edit"),
{"text": "Edited while locked"},
follow=True,
)
self.refresh... | |
1,143 | 7,165 | 64 | ludwig/models/base.py | 18 | 13 | def update_metrics(self, targets, predictions):
| feat: Added model type GBM (LightGBM tree learner), as an alternative to ECD (#2027) | update_metrics | aa0c63bf2ed825eb3ca8eff8a002d5ccbe395173 | ludwig | base.py | 10 | 6 | https://github.com/ludwig-ai/ludwig.git | 2 | 65 | 0 | 18 | 101 | Python | {
"docstring": "Updates the model's metrics given targets and predictions.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def update_metrics(self, targets, predictions):
for of_name, of_obj in self.output_features.items():
of_obj.update_metrics(targets[of_name], predictions[of_name])
eval_loss, additional_losses = self.eval_loss(targets, predictions)
self.eval_loss_metric.update(eval_loss)
... | |
22,772 | 107,483 | 111 | lib/matplotlib/axis.py | 31 | 8 | def tick_right(self):
label = True
if 'label1On' in self._m | DOC: More cleanup axes -> Axes | tick_right | f156db08eee54d285ab0fb4e031e48d078ba6aa3 | matplotlib | axis.py | 12 | 7 | https://github.com/matplotlib/matplotlib.git | 3 | 51 | 0 | 26 | 94 | Python | {
"docstring": "\n Move ticks and ticklabels (if present) to the right of the Axes.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 11
} | def tick_right(self):
label = True
if 'label1On' in self._major_tick_kw:
label = (self._major_tick_kw['label1On']
or self._major_tick_kw['label2On'])
self.set_ticks_position('right')
# if labels were turned off before this was called
# le... | |
117,451 | 320,942 | 23 | tests/unit/mainwindow/test_messageview.py | 11 | 8 | def test_show_message_twice(view, info1, info2, count):
view.show_message(info1)
view.show_message(info2)
assert len(view._messages) == count
| Only replace the exact same message
If we have a error message followed by an info message with the same text, they
should both be shown, not replaced automatically. | test_show_message_twice | 676e01677183825d19107d3b2fbf1bb2c0684ede | qutebrowser | test_messageview.py | 9 | 4 | https://github.com/qutebrowser/qutebrowser.git | 1 | 33 | 0 | 11 | 53 | Python | {
"docstring": "Show the exact same message twice -> only one should be shown.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def test_show_message_twice(view, info1, info2, count):
view.show_message(info1)
view.show_message(info2)
assert len(view._messages) == count
| |
70,858 | 245,712 | 275 | mmdet/models/task_modules/assigners/iou2d_calculator.py | 94 | 17 | def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):
bboxes1 = get_box_tensor(bboxes1)
bboxes2 = get_box_tensor(bboxes2)
assert bboxes1.size(-1) in [0, 4, 5]
assert bboxes2.size(-1) in [0, 4, 5]
if bboxes2.size(-1) == 5:
bboxes2 = bboxes2[..., ... | [Refactor] Refactor anchor head and base head with boxlist (#8625)
* Refactor anchor head
* Update
* Update
* Update
* Add a series of boxes tools
* Fix box type to support n x box_dim boxes
* revert box type changes
* Add docstring
* refactor retina_head
* Update
* Update
* Fix commen... | __call__ | d915740fa8228cf57741b27d9e5d66e358456b8e | mmdetection | iou2d_calculator.py | 12 | 17 | https://github.com/open-mmlab/mmdetection.git | 6 | 183 | 0 | 54 | 279 | Python | {
"docstring": "Calculate IoU between 2D bboxes.\n\n Args:\n bboxes1 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4)\n in <x1, y1, x2, y2> format, or shape (m, 5) in <x1, y1, x2,\n y2, score> format.\n bboxes2 (Tensor or :obj:`BaseBoxes`): bboxes have ... | def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):
bboxes1 = get_box_tensor(bboxes1)
bboxes2 = get_box_tensor(bboxes2)
assert bboxes1.size(-1) in [0, 4, 5]
assert bboxes2.size(-1) in [0, 4, 5]
if bboxes2.size(-1) == 5:
bboxes2 = bboxes2[..., ... | |
21,037 | 101,629 | 20 | tools/sort/sort_methods_aligned.py | 6 | 6 | def binning(self) -> List[List[str]]:
return self._binning_linear_threshold(multiplier=100)
| Overhaul sort:
- Standardize image data reading and writing
- Optimize loading (just one pass required)
- Make all sort groups binnable (to greater or lesser results)
- Add sort by pitch
- Deprecate multiple options
- linting, docs + locales | binning | 98d01760e469fd2108eed8d0b0a1ba6297c3177c | faceswap | sort_methods_aligned.py | 8 | 9 | https://github.com/deepfakes/faceswap.git | 1 | 23 | 0 | 6 | 38 | Python | {
"docstring": " Create bins to split linearly from the lowest to the highest sample value\n\n Returns\n -------\n list\n List of bins of filenames\n ",
"language": "en",
"n_whitespaces": 61,
"n_words": 21,
"vocab_size": 17
} | def binning(self) -> List[List[str]]:
return self._binning_linear_threshold(multiplier=100)
| |
54,062 | 215,636 | 21 | salt/transport/base.py | 7 | 6 | def connect(self, publish_port, connect_callback=None, disconnect_callback=None):
raise NotImpleme | Add NotImplimentedError to stubs | connect | 8683fed190f0ac807ab3f87e0e66808f7dbc130c | salt | base.py | 6 | 2 | https://github.com/saltstack/salt.git | 1 | 18 | 0 | 7 | 28 | Python | {
"docstring": "\n Create a network connection to the the PublishServer or broker.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 9
} | def connect(self, publish_port, connect_callback=None, disconnect_callback=None):
raise NotImplementedError
| |
47,586 | 196,086 | 79 | sympy/combinatorics/free_groups.py | 18 | 6 | def contains(self, g):
if not isinstance(g, FreeGroupElement):
return False
elif self != g.group:
return False
else:
ret | Updated import locations | contains | 498015021131af4dbb07eb110e5badaba8250c7b | sympy | free_groups.py | 8 | 7 | https://github.com/sympy/sympy.git | 3 | 32 | 0 | 15 | 53 | Python | {
"docstring": "Tests if Free Group element ``g`` belong to self, ``G``.\n\n In mathematical terms any linear combination of generators\n of a Free Group is contained in it.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import free_group\n >>> f, x, y, z = free_gro... | def contains(self, g):
if not isinstance(g, FreeGroupElement):
return False
elif self != g.group:
return False
else:
return True
| |
14,170 | 66,339 | 16 | erpnext/loan_management/doctype/loan_security_unpledge/loan_security_unpledge.py | 34 | 14 | def get_pledged_security_qty(loan):
current_pledges = {}
unpledges = frappe._dict(
frappe.db.sql(
,
(loan),
)
)
pledges = frappe._dict(
frappe.db.sql(
,
(loan),
)
)
for security, qt | style: format code with black | get_pledged_security_qty | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | loan_security_unpledge.py | 11 | 32 | https://github.com/frappe/erpnext.git | 2 | 85 | 0 | 24 | 131 | Python | {
"docstring": "\n\t\tSELECT u.loan_security, sum(u.qty) as qty\n\t\tFROM `tabLoan Security Unpledge` up, `tabUnpledge` u\n\t\tWHERE up.loan = %s\n\t\tAND u.parent = up.name\n\t\tAND up.status = 'Approved'\n\t\tGROUP BY u.loan_security\n\t\n\t\tSELECT p.loan_security, sum(p.qty) as qty\n\t\tFROM `tabLoan Security Ple... | def get_pledged_security_qty(loan):
current_pledges = {}
unpledges = frappe._dict(
frappe.db.sql(
,
(loan),
)
)
pledges = frappe._dict(
frappe.db.sql(
,
(loan),
)
)
for security, qty in pledges.items():
current_pledges.setdefault(security, qty)
current_pledges[security] -= unpledges.ge... | |
51,234 | 205,838 | 436 | django/db/models/sql/compiler.py | 101 | 25 | def get_select(self):
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += ... | Refs #33476 -- Reformatted code with Black. | get_select | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | compiler.py | 12 | 49 | https://github.com/django/django.git | 11 | 311 | 0 | 65 | 327 | Python | {
"docstring": "\n Return three values:\n - a list of 3-tuples of (expression, (sql, params), alias)\n - a klass_info structure,\n - a dictionary of annotations\n\n The (sql, params) is what the expression will produce, and alias is the\n \"AS alias\" for the column (possibly... | def get_select(self):
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += ... | |
77,467 | 263,850 | 81 | PyInstaller/depend/imphookapi.py | 20 | 7 | def set_module_collection_mode(self, name, mode):
if name is None:
name = self.__name__
if mode is None:
self._module_collection_mode.pop(name)
else:
| building & hooks: implement module collection mode setting
Implement a mechanism for controlling the collection mode of
modules and packages, with granularity ranging from top-level
packages to individual sub-modules. Therefore, the hooks can
now specify whether the hooked package should be collected as
byte-compiled ... | set_module_collection_mode | 5b2ab7067ba954bd7950a79ed31e5ee177ff3f43 | pyinstaller | imphookapi.py | 11 | 7 | https://github.com/pyinstaller/pyinstaller.git | 3 | 43 | 0 | 14 | 70 | Python | {
"docstring": "\"\n Set the package/module collection mode for the specified module\n name. If `name` is `None`, the hooked module/package name is used.\n Valid values for `mode` are: `'pyc'`, `'py'`, and `None`.\n ",
"language": "en",
"n_whitespaces": 58,
"n_words": 30,
"vocab_si... | def set_module_collection_mode(self, name, mode):
if name is None:
name = self.__name__
if mode is None:
self._module_collection_mode.pop(name)
else:
self._module_collection_mode[name] = mode
| |
56,817 | 222,933 | 55 | python3.10.4/Lib/distutils/file_util.py | 18 | 8 | def write_file (filename, contents):
f = open(filename, "w")
try:
for line in contents:
f.writ | add python 3.10.4 for windows | write_file | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | file_util.py | 13 | 7 | https://github.com/XX-net/XX-Net.git | 3 | 38 | 0 | 18 | 69 | Python | {
"docstring": "Create a file with the specified name and write 'contents' (a\n sequence of strings without line terminators) to it.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 19,
"vocab_size": 19
} | def write_file (filename, contents):
f = open(filename, "w")
try:
for line in contents:
f.write(line + "\n")
finally:
f.close()
| |
11,792 | 58,650 | 41 | tests/orion/schemas/test_core.py | 13 | 8 | async def test_flow_run_policy_is_backwards_compatible(self):
empty_new_policy = schemas.core.FlowRunPolicy()
# should not raise an error
self.OldFlowRu | Revert breaking schema change (#6521)
* Revert breaking schema change
* Add new retry properties on policies; deprecate prior ones
* Add tests for schema compat
* Use root_validator to populate properties from deprecated | test_flow_run_policy_is_backwards_compatible | 82c78fe8b65117dc5fe89365acb62e7aa902f8ba | prefect | test_core.py | 10 | 3 | https://github.com/PrefectHQ/prefect.git | 1 | 26 | 0 | 13 | 49 | Python | {
"docstring": "\n In version 2.1.1 and prior, the FlowRunPolicy schema required two properties,\n `max_retries` and `retry_delay_seconds`. These properties are deprecated.\n\n This test ensures old clients can load new FlowRunPolicySchemas. It can be removed\n when the corresponding prope... | async def test_flow_run_policy_is_backwards_compatible(self):
empty_new_policy = schemas.core.FlowRunPolicy()
# should not raise an error
self.OldFlowRunPolicy(**empty_new_policy.dict())
| |
44,041 | 183,062 | 33 | tests/css/test_help_text.py | 18 | 5 | def test_help_text_examples_are_contextualized():
rendered_inline = render(spacing_invalid_value("padding", "inline"))
assert "widget.styles.padding" in rendered_inline
rendered_css = render(spacing_invalid_value("padding", "css"))
assert "padding:" in rendered_css
| Testing for help text | test_help_text_examples_are_contextualized | 91783b7c1e06a45e93fd89dbdb6aa3d1a9c2e990 | textual | test_help_text.py | 11 | 5 | https://github.com/Textualize/textual.git | 1 | 35 | 0 | 12 | 70 | Python | {
"docstring": "Ensure that if the user is using CSS, they see CSS-specific examples\n and if they're using inline styles they see inline-specific examples.",
"language": "en",
"n_whitespaces": 24,
"n_words": 22,
"vocab_size": 18
} | def test_help_text_examples_are_contextualized():
rendered_inline = render(spacing_invalid_value("padding", "inline"))
assert "widget.styles.padding" in rendered_inline
rendered_css = render(spacing_invalid_value("padding", "css"))
assert "padding:" in rendered_css
| |
@pytest.mark.parametrize("patch", [True, False])
@pytest.mark.parametrize("connection_strategy", ["eager", "lazy"]) | 73,885 | 251,912 | 245 | test/mitmproxy/proxy/layers/test_modes.py | 80 | 30 | def test_reverse_proxy(tctx, keep_host_header):
server = Placeholder(Server)
tctx.options.mode = "reverse:http://localhost:8000"
tctx.options.connection_strategy = "lazy"
tctx.options.keep_host_header = keep_host_header
assert (
Playbook(modes.ReverseProxy(tctx), hooks=False)
>>... | make it black! | test_reverse_proxy | b3587b52b25077f68116b9852b041d33e7fc6601 | mitmproxy | test_modes.py | 18 | 25 | https://github.com/mitmproxy/mitmproxy.git | 2 | 160 | 1 | 58 | 319 | Python | {
"docstring": "Test mitmproxy in reverse proxy mode.\n\n - make sure that we connect to the right host\n - make sure that we respect keep_host_header\n - make sure that we include non-standard ports in the host header (#4280)\n ",
"language": "en",
"n_whitespaces": 48,
"n_words": 36,
"vocab_size"... | def test_reverse_proxy(tctx, keep_host_header):
server = Placeholder(Server)
tctx.options.mode = "reverse:http://localhost:8000"
tctx.options.connection_strategy = "lazy"
tctx.options.keep_host_header = keep_host_header
assert (
Playbook(modes.ReverseProxy(tctx), hooks=False)
>>... |
50,337 | 203,366 | 226 | django/contrib/admin/checks.py | 36 | 16 | def _check_filter_horizontal(self, obj):
if not isinstance(obj.filter_horizontal, (list, tuple)):
return must_be(
"a list or tuple", option="filter_horizontal", obj=obj, id="admin.E018"
)
else:
return list(
chain.from_iterable(... | Refs #33476 -- Reformatted code with Black. | _check_filter_horizontal | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | checks.py | 16 | 14 | https://github.com/django/django.git | 3 | 74 | 0 | 32 | 119 | Python | {
"docstring": "Check that filter_horizontal is a sequence of field names.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def _check_filter_horizontal(self, obj):
if not isinstance(obj.filter_horizontal, (list, tuple)):
return must_be(
"a list or tuple", option="filter_horizontal", obj=obj, id="admin.E018"
)
else:
return list(
chain.from_iterable(... | |
4,523 | 23,163 | 49 | ppocr/data/imaug/fce_aug.py | 24 | 9 | def poly_intersection(poly_det, poly_gt):
assert isinstance(poly_det, plg.Polygon)
assert isinstance(poly_gt, plg.Polygon)
poly_inter = poly_det & poly_gt
if len(poly_inter) == 0:
return 0, poly_in | add fcenet | poly_intersection | 9f62b610dea6161627200ed85d92e19b1923279a | PaddleOCR | fce_aug.py | 8 | 7 | https://github.com/PaddlePaddle/PaddleOCR.git | 2 | 51 | 0 | 19 | 81 | Python | {
"docstring": "Calculate the intersection area between two polygon.\n\n Args:\n poly_det (Polygon): A polygon predicted by detector.\n poly_gt (Polygon): A gt polygon.\n\n Returns:\n intersection_area (float): The intersection area between two polygons.\n ",
"language": "en",
"n_whi... | def poly_intersection(poly_det, poly_gt):
assert isinstance(poly_det, plg.Polygon)
assert isinstance(poly_gt, plg.Polygon)
poly_inter = poly_det & poly_gt
if len(poly_inter) == 0:
return 0, poly_inter
return poly_inter.area(), poly_inter
| |
73,544 | 250,781 | 26 | mitmproxy/dns.py | 12 | 10 | def size(self) -> int:
return sum(len(x.data) for x in [*self.answers, *self.authorities, *self.additionals])
| [dns] first commit | size | 8c700ec6e45fc69379eec230da1bd840854ac20e | mitmproxy | dns.py | 11 | 3 | https://github.com/mitmproxy/mitmproxy.git | 2 | 37 | 0 | 12 | 59 | Python | {
"docstring": "Returns the cumulative data size of all resource record sections.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def size(self) -> int:
return sum(len(x.data) for x in [*self.answers, *self.authorities, *self.additionals])
| |
28,234 | 126,691 | 340 | dashboard/modules/snapshot/snapshot_head.py | 49 | 18 | async def get_job_submission_info(self):
jobs = {}
fetched_jobs = await self._job_info_client.get_all_jobs()
for (
job_submission_id,
job_info,
) in fetched_jobs.items():
if job_info is not None:
entry = {
... | Convert job_manager to be async (#27123)
Updates jobs api
Updates snapshot api
Updates state api
Increases jobs api version to 2
Signed-off-by: Alan Guo aguo@anyscale.com
Why are these changes needed?
follow-up for #25902 (comment) | get_job_submission_info | 326b5bd1acc6d3d00ab0546e4ae45da6bed501f7 | ray | snapshot_head.py | 13 | 21 | https://github.com/ray-project/ray.git | 3 | 104 | 0 | 43 | 173 | Python | {
"docstring": "Info for Ray job submission. Here a job can have 0 or many drivers.",
"language": "en",
"n_whitespaces": 14,
"n_words": 14,
"vocab_size": 13
} | async def get_job_submission_info(self):
jobs = {}
fetched_jobs = await self._job_info_client.get_all_jobs()
for (
job_submission_id,
job_info,
) in fetched_jobs.items():
if job_info is not None:
entry = {
... | |
48,473 | 197,330 | 156 | sympy/ntheory/qs.py | 65 | 11 | def _gen_sieve_array(M, factor_base):
sieve_array = | Remove abbreviations in documentation | _gen_sieve_array | 65be461082dda54c8748922f9c29a19af1279fe1 | sympy | qs.py | 13 | 12 | https://github.com/sympy/sympy.git | 6 | 112 | 0 | 45 | 171 | Python | {
"docstring": "Sieve Stage of the Quadratic Sieve. For every prime in the factor_base\n that does not divide the coefficient `a` we add log_p over the sieve_array\n such that ``-M <= soln1 + i*p <= M`` and ``-M <= soln2 + i*p <= M`` where `i`\n is an integer. When p = 2 then log_p is only added using\n ... | def _gen_sieve_array(M, factor_base):
sieve_array = [0]*(2*M + 1)
for factor in factor_base:
if factor.soln1 is None: #The prime does not divides a
continue
for idx in range((M + factor.soln1) % factor.prime, 2*M, factor.prime):
sieve_array[idx] += factor.log_p
... | |
1,906 | 10,748 | 122 | setup.py | 46 | 9 | def rescue_docarray():
try:
import docarray as docarray
__docarray_version__ = docarray.__version__
except AttributeError:
# Being here means docarray is not installed correctly, attempt to reinstall it
# as recommended by pip https://pip.pypa.io/en/latest/user_guide/#usin... | fix: rescue docarray in setup (#4203) | rescue_docarray | 1f2c86359246e00eae7cba081d9e952cb64c9aea | jina | setup.py | 12 | 10 | https://github.com/jina-ai/jina.git | 2 | 59 | 0 | 39 | 110 | Python | {
"docstring": "Upgrading from 2.x to 3.x is broken (https://github.com/jina-ai/jina/issues/4194)\n This function checks if docarray is broken and if so attempts to rescue it\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 22,
"vocab_size": 18
} | def rescue_docarray():
try:
import docarray as docarray
__docarray_version__ = docarray.__version__
except AttributeError:
# Being here means docarray is not installed correctly, attempt to reinstall it
# as recommended by pip https://pip.pypa.io/en/latest/user_guide/#usin... | |
75,313 | 258,597 | 161 | sklearn/kernel_approximation.py | 55 | 15 | def transform(self, X):
msg = (
"%(name)s is not fitted. Call f | DOC Fix docstring for AdditiveChi2Sampler (#22138) | transform | ff85a34c95a9d8de13805be55f1a72f1b7ee2a42 | scikit-learn | kernel_approximation.py | 10 | 11 | https://github.com/scikit-learn/scikit-learn.git | 2 | 68 | 0 | 47 | 118 | Python | {
"docstring": "Apply approximate feature map to X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Returns\n ... | def transform(self, X):
msg = (
"%(name)s is not fitted. Call fit to set the parameters before"
" calling transform"
)
check_is_fitted(self, msg=msg)
X = self._validate_data(X, accept_sparse="csr", reset=False)
check_non_negative(X, "X in Additiv... | |
12,561 | 61,417 | 53 | .venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py | 14 | 4 | def get_repository_root(cls, location):
# type: (str) -> Optional[str]
if cls.is_repository_directory(location):
return location
return None
| upd; format | get_repository_root | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | versioncontrol.py | 7 | 4 | https://github.com/jindongwang/transferlearning.git | 2 | 20 | 0 | 13 | 35 | Python | {
"docstring": "\n Return the \"root\" (top-level) directory controlled by the vcs,\n or `None` if the directory is not in any.\n\n It is meant to be overridden to implement smarter detection\n mechanisms for specific vcs.\n\n This can do more than is_repository_directory() alone. F... | def get_repository_root(cls, location):
# type: (str) -> Optional[str]
if cls.is_repository_directory(location):
return location
return None
| |
517 | 3,695 | 41 | airbyte-integrations/connectors/source-hubspot/unit_tests/test_client.py | 26 | 17 | def test_it_should_not_read_quotes_stream_if_it_does_not_exist_in_client(oauth_config, configured_catalog):
source = SourceHubspot()
all_records = list(source.read(logger, config=oauth_config, catalog=configured_catalog, state=None))
records = [record for record in all_records if reco | Source Hubspot: fix "quotes" key error exception (#10055)
* check if stream exists in source
* check if stream exists in source, added comment
* test skipping reading quotes stream
* format code
* airbyte-cdk version
* added __init__.py to unit_tests
* fix importing airbyte models
* bump the versi... | test_it_should_not_read_quotes_stream_if_it_does_not_exist_in_client | b22efc03a18c5545c12cf8a0462dea7505aec410 | airbyte | test_client.py | 11 | 5 | https://github.com/airbytehq/airbyte.git | 3 | 56 | 0 | 22 | 87 | Python | {
"docstring": "\n If 'quotes' stream is not in the client, it should skip it.\n ",
"language": "en",
"n_whitespaces": 19,
"n_words": 12,
"vocab_size": 12
} | def test_it_should_not_read_quotes_stream_if_it_does_not_exist_in_client(oauth_config, configured_catalog):
source = SourceHubspot()
all_records = list(source.read(logger, config=oauth_config, catalog=configured_catalog, state=None))
records = [record for record in all_records if record.type == Type.R... | |
29,221 | 130,300 | 553 | python/ray/_private/utils.py | 226 | 17 | def get_conda_env_dir(env_name):
conda_prefix = os.environ.get("CONDA_PREFIX")
if conda_prefix is None:
# The caller is neither in a conda env or in (base) env. This is rare
# because by default, new terminals start in (base), but we can still
# support this case.
conda_exe... | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | get_conda_env_dir | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | utils.py | 14 | 26 | https://github.com/ray-project/ray.git | 6 | 142 | 0 | 130 | 268 | Python | {
"docstring": "Find and validate the conda directory for a given conda environment.\n\n For example, given the environment name `tf1`, this function checks\n the existence of the corresponding conda directory, e.g.\n `/Users/scaly/anaconda3/envs/tf1`, and returns it.\n ",
"language": "en",
"n_whitesp... | def get_conda_env_dir(env_name):
conda_prefix = os.environ.get("CONDA_PREFIX")
if conda_prefix is None:
# The caller is neither in a conda env or in (base) env. This is rare
# because by default, new terminals start in (base), but we can still
# support this case.
conda_exe... | |
110,215 | 311,550 | 147 | tests/components/homekit_controller/test_switch.py | 39 | 14 | async def test_switch_change_outlet_state(hass, utcnow):
helper = await setup_test_component(hass, create_switch_service)
await hass.services.async_call(
"switch", "turn_on", {"entity_id": "switch.testdevice"}, blocking=True
)
helper.async_assert_service_values(
ServicesTypes.OUTLE... | Improve homekit_controller tests (#65266) | test_switch_change_outlet_state | 58b8c30221a6f6e5acbbe98b7e3298b03fb741f5 | core | test_switch.py | 11 | 20 | https://github.com/home-assistant/core.git | 1 | 95 | 0 | 24 | 158 | Python | {
"docstring": "Test that we can turn a HomeKit outlet on and off again.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | async def test_switch_change_outlet_state(hass, utcnow):
helper = await setup_test_component(hass, create_switch_service)
await hass.services.async_call(
"switch", "turn_on", {"entity_id": "switch.testdevice"}, blocking=True
)
helper.async_assert_service_values(
ServicesTypes.OUTLE... | |
22,743 | 107,428 | 460 | lib/matplotlib/axis.py | 111 | 30 | def _update_label_position(self, renderer):
if not self._autolabelpos:
return
# get bounding boxes for this axis and any siblings
# that have been set by `fig.align_xlabels()`
bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer)
x, y = self.la... | FIX: use window_extent instead | _update_label_position | c0a78bdff86d7b02b8a23d373b64c72297f935d5 | matplotlib | axis.py | 16 | 27 | https://github.com/matplotlib/matplotlib.git | 5 | 191 | 0 | 65 | 315 | Python | {
"docstring": "\n Update the label position based on the bounding box enclosing\n all the ticklabels and axis spine\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 16,
"vocab_size": 14
} | def _update_label_position(self, renderer):
if not self._autolabelpos:
return
# get bounding boxes for this axis and any siblings
# that have been set by `fig.align_xlabels()`
bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer)
x, y = self.la... | |
4,547 | 23,225 | 47 | ppocr/postprocess/fce_postprocess.py | 26 | 12 | def poly_union(poly_det, poly_gt):
assert isinstance(poly_det, plg.Polygon)
assert isinstance(poly_gt, plg.Polygon)
area_det = poly_det.area()
area_gt = poly_gt.area()
area_inters, _ = poly_intersection(poly_det, poly_gt)
return area_det + area_gt - area_inters
| add fcenet | poly_union | 9f62b610dea6161627200ed85d92e19b1923279a | PaddleOCR | fce_postprocess.py | 8 | 7 | https://github.com/PaddlePaddle/PaddleOCR.git | 1 | 56 | 0 | 20 | 89 | Python | {
"docstring": "Calculate the union area between two polygon.\n\n Args:\n poly_det (Polygon): A polygon predicted by detector.\n poly_gt (Polygon): A gt polygon.\n\n Returns:\n union_area (float): The union area between two polygons.\n ",
"language": "en",
"n_whitespaces": 59,
"n_w... | def poly_union(poly_det, poly_gt):
assert isinstance(poly_det, plg.Polygon)
assert isinstance(poly_gt, plg.Polygon)
area_det = poly_det.area()
area_gt = poly_gt.area()
area_inters, _ = poly_intersection(poly_det, poly_gt)
return area_det + area_gt - area_inters
| |
73,483 | 250,506 | 198 | tests/config/test_tls.py | 48 | 24 | def test_whitelist_idna_result(self) -> None:
config: JsonDict = {
"federation_certificate_verification_whitelist": [
"example.com",
"*.xn--eckwd4c7c.xn--zckzah",
]
}
t = TestConfig()
t.tls.read_ | Add missing type hints to tests.config. (#14681) | test_whitelist_idna_result | 3aeca2588b79111a48a6083c88efc4d68a2cea19 | synapse | test_tls.py | 11 | 19 | https://github.com/matrix-org/synapse.git | 1 | 110 | 0 | 38 | 187 | Python | {
"docstring": "\n The federation certificate whitelist will match on IDNA encoded names.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | def test_whitelist_idna_result(self) -> None:
config: JsonDict = {
"federation_certificate_verification_whitelist": [
"example.com",
"*.xn--eckwd4c7c.xn--zckzah",
]
}
t = TestConfig()
t.tls.read_config(config, config_dir_pa... | |
70,340 | 244,348 | 41 | mmdet/models/dense_heads/dense_test_mixins.py | 13 | 7 | def simple_test_rpn(self, x, img_metas):
rpn_outs = self(x)
proposal_list = self.get_results(*rpn_outs, img_metas=img_metas)
r | [Refactor] Refactor dense head outputs to InstanceResults. | simple_test_rpn | 9a3bf7660e6ced54672741095f96df07919f9ba7 | mmdetection | dense_test_mixins.py | 9 | 4 | https://github.com/open-mmlab/mmdetection.git | 1 | 31 | 0 | 11 | 50 | Python | {
"docstring": "Test without augmentation, only for ``RPNHead`` and its variants,\n e.g., ``GARPNHead``, etc.\n\n Args:\n x (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n img_metas (list[dict]): Meta info of each image.\n\n Retur... | def simple_test_rpn(self, x, img_metas):
rpn_outs = self(x)
proposal_list = self.get_results(*rpn_outs, img_metas=img_metas)
return proposal_list
| |
22,775 | 107,486 | 250 | lib/matplotlib/axis.py | 85 | 25 | def _get_tick_boxes_siblings(self, renderer):
# Get the Grouper keeping track of x or y label groups for this figure.
axis_names = [
name for name, axis in self.axes._get_axis_map().items()
if name in self.figure._align_label_groups and axis is self]
if len(axis_... | DOC: More cleanup axes -> Axes | _get_tick_boxes_siblings | f156db08eee54d285ab0fb4e031e48d078ba6aa3 | matplotlib | axis.py | 13 | 17 | https://github.com/matplotlib/matplotlib.git | 6 | 133 | 0 | 64 | 219 | Python | {
"docstring": "\n Get the bounding boxes for this `.axis` and its siblings\n as set by `.Figure.align_xlabels` or `.Figure.align_ylabels`.\n\n By default it just gets bboxes for self.\n ",
"language": "en",
"n_whitespaces": 54,
"n_words": 24,
"vocab_size": 23
} | def _get_tick_boxes_siblings(self, renderer):
# Get the Grouper keeping track of x or y label groups for this figure.
axis_names = [
name for name, axis in self.axes._get_axis_map().items()
if name in self.figure._align_label_groups and axis is self]
if len(axis_... | |
81,746 | 276,832 | 20 | keras/utils/generic_utils.py | 10 | 3 | def default(method):
method._is_de | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | default | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | generic_utils.py | 7 | 3 | https://github.com/keras-team/keras.git | 1 | 13 | 0 | 10 | 25 | Python | {
"docstring": "Decorates a method to detect overrides in subclasses.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def default(method):
method._is_default = True # pylint: disable=protected-access
return method
| |
26,270 | 118,528 | 38 | lib/tests/streamlit/camera_input_test.py | 10 | 9 | def test_help_tooltip(self):
st.camera_input("the label", help="help_label")
c = self.get_delta_from_queue().new_element.camera_input
self.assert | Feature/camera image input (#4038)
* Camera_input widget
Co-authored-by: willhuang1997 <willhuang1997@gmail.com>
Co-authored-by: Henrikh Kantuni <henrikh.kantuni@gmail.com>
Co-authored-by: William Huang <whuang@streamlit.io>
Co-authored-by: Vincent Donato <vincent@streamlit.io> | test_help_tooltip | 33855278eaf8599b2bec1ddefa5eebb592e55e25 | streamlit | camera_input_test.py | 10 | 4 | https://github.com/streamlit/streamlit.git | 1 | 37 | 0 | 10 | 67 | Python | {
"docstring": "Test that it can be called using a string for type parameter.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def test_help_tooltip(self):
st.camera_input("the label", help="help_label")
c = self.get_delta_from_queue().new_element.camera_input
self.assertEqual(c.help, "help_label")
| |
107,643 | 308,917 | 253 | tests/components/nest/test_camera_sdm.py | 96 | 33 | async def test_multiple_event_images(hass, auth):
subscriber = await async_setup_camera(hass, DEVICE_TRAITS, auth=auth)
assert len(hass.states.async_all()) == 1
assert hass.states.get("camera.my_camera")
event_timestamp = utcnow()
await subscriber.async_receive_event(
make_motion_event... | Delete nest event image fetching and use same APIs as media player (#62789) | test_multiple_event_images | 4203e1b0640e16fbda55672c5be089431face880 | core | test_camera_sdm.py | 12 | 28 | https://github.com/home-assistant/core.git | 1 | 183 | 0 | 63 | 301 | Python | {
"docstring": "Test fallback for an event event image that has been cleaned up on expiration.",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 13
} | async def test_multiple_event_images(hass, auth):
subscriber = await async_setup_camera(hass, DEVICE_TRAITS, auth=auth)
assert len(hass.states.async_all()) == 1
assert hass.states.get("camera.my_camera")
event_timestamp = utcnow()
await subscriber.async_receive_event(
make_motion_event... | |
72,668 | 249,161 | 399 | tests/rest/admin/test_room.py | 87 | 26 | def test_delete_same_room_twice(self) -> None:
body = {"new_room_user_id": self.admin_user}
# first call to delete room
# and do not wait for finish the task
first_channel = self.make_request(
"DELETE",
self.url.encode("ascii"),
content=body... | Use literals in place of `HTTPStatus` constants in tests (#13469) | test_delete_same_room_twice | c97042f7eef3748e17c90e48a4122389a89c4735 | synapse | test_room.py | 11 | 32 | https://github.com/matrix-org/synapse.git | 1 | 176 | 0 | 61 | 290 | Python | {
"docstring": "Test that the call for delete a room at second time gives an exception.",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 14
} | def test_delete_same_room_twice(self) -> None:
body = {"new_room_user_id": self.admin_user}
# first call to delete room
# and do not wait for finish the task
first_channel = self.make_request(
"DELETE",
self.url.encode("ascii"),
content=body... | |
110,313 | 311,654 | 261 | homeassistant/components/august/__init__.py | 44 | 13 | async def _async_refresh_device_detail_by_ids(self, device_ids_list):
for device_id in device_ids_list:
try:
await self._async_refresh_device_detail_by_id(device_id)
except asyncio.TimeoutError:
_LOGGER.warning(
"Timed out call... | Improve reliability of august setup with recent api changes (#65314) | _async_refresh_device_detail_by_ids | fab9c4aa20b4c2549691d0aa5066798a0259e803 | core | __init__.py | 13 | 15 | https://github.com/home-assistant/core.git | 4 | 58 | 0 | 33 | 96 | Python | {
"docstring": "Refresh each device in sequence.\n\n This used to be a gather but it was less reliable with august's\n recent api changes.\n\n The august api has been timing out for some devices so\n we want the ones that it isn't timing out for to keep working.\n ",
"language": "... | async def _async_refresh_device_detail_by_ids(self, device_ids_list):
for device_id in device_ids_list:
try:
await self._async_refresh_device_detail_by_id(device_id)
except asyncio.TimeoutError:
_LOGGER.warning(
"Timed out call... | |
6,841 | 37,631 | 146 | src/transformers/models/yolos/feature_extraction_yolos.py | 86 | 33 | def masks_to_boxes(masks):
if masks.size == 0:
return np.zeros((0, 4))
h, w = masks.shape[-2:]
y = np.arange(0, h, dtype=np.float32)
x = np.arange(0, w, dtype=np.float32)
# see https://github.com/pytorch/pytorch/issues/50276
y, x = np.meshgrid(y, x, indexing="ij")
x_mask = ma... | Add YOLOS (#16848)
* First draft
* Add YolosForObjectDetection
* Make forward pass work
* Add mid position embeddings
* Add interpolation of position encodings
* Add expected values
* Add YOLOS to tests
* Add integration test
* Support tiny model as well
* Support all models in conversion sc... | masks_to_boxes | 1ac698744c4dbdf1495d303246d08ffacdf4f5b8 | transformers | feature_extraction_yolos.py | 14 | 18 | https://github.com/huggingface/transformers.git | 2 | 289 | 0 | 56 | 442 | Python | {
"docstring": "\n Compute the bounding boxes around the provided panoptic segmentation masks.\n\n The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.\n\n Returns a [N, 4] tensor, with the boxes in corner (xyxy) format.\n ",
"language": "en",
"n_w... | def masks_to_boxes(masks):
if masks.size == 0:
return np.zeros((0, 4))
h, w = masks.shape[-2:]
y = np.arange(0, h, dtype=np.float32)
x = np.arange(0, w, dtype=np.float32)
# see https://github.com/pytorch/pytorch/issues/50276
y, x = np.meshgrid(y, x, indexing="ij")
x_mask = ma... | |
@_wraps(np.roots, lax_description="""\
Unlike the numpy version of this function, the JAX version returns the roots in
a complex array regardless of the values of the roots. Additionally, the jax
version of this function adds the ``strip_zeros`` function which must be set to
False for the function to be compatible with... | 27,011 | 121,009 | 73 | jax/_src/numpy/polynomial.py | 68 | 18 | def _roots_with_zeros(p, num_leading_zeros):
# Avoid lapack errors when p is all zero
p = _where(len(p) == num_leading_zeros, 1.0, p)
# Roll any leading zeros to the end & compute the roots
roots = _roots_no_zeros(roll(p, -num_leading_zeros))
# Sort zero roots to the end.
roots = lax.sort_key_val(roots == 0... | jnp.roots: better support for computation under JIT | _roots_with_zeros | f6476f7a03f8390627c1a8e2a2ec8702d8a320e5 | jax | polynomial.py | 11 | 5 | https://github.com/google/jax.git | 1 | 80 | 1 | 51 | 147 | Python | {
"docstring": "\\\nUnlike the numpy version of this function, the JAX version returns the roots in\na complex array regardless of the values of the roots. Additionally, the jax\nversion of this function adds the ``strip_zeros`` function which must be set to\nFalse for the function to be compatible with JIT and other... | def _roots_with_zeros(p, num_leading_zeros):
# Avoid lapack errors when p is all zero
p = _where(len(p) == num_leading_zeros, 1.0, p)
# Roll any leading zeros to the end & compute the roots
roots = _roots_no_zeros(roll(p, -num_leading_zeros))
# Sort zero roots to the end.
roots = lax.sort_key_val(roots == 0... |
2,178 | 12,072 | 85 | jina/orchestrate/flow/base.py | 16 | 9 | def port_monitoring(self) -> int:
if GATEWAY_NAME in self._deployment_nodes:
return self[GATEWAY_NAME].args.port_monitoring
else:
return self._common_kwargs.get(
'port_monitoring', __default_port_monitoring__
)
| feat: expose prometheus metrics (#4526) | port_monitoring | 8dc2999a588c46deca60b3f0d5c1b6278a6e165c | jina | base.py | 11 | 10 | https://github.com/jina-ai/jina.git | 2 | 37 | 0 | 15 | 62 | Python | {
"docstring": "Return if the monitoring is enabled\n .. # noqa: DAR201\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 10,
"vocab_size": 10
} | def port_monitoring(self) -> int:
if GATEWAY_NAME in self._deployment_nodes:
return self[GATEWAY_NAME].args.port_monitoring
else:
return self._common_kwargs.get(
'port_monitoring', __default_port_monitoring__
)
| |
51,655 | 206,720 | 391 | django/utils/module_loading.py | 117 | 18 | def autodiscover_modules(*args, **kwargs):
from django.apps import apps
register_to = kwargs.get("register_to")
for app_config in apps.get_app_configs():
for module_to_search in args:
# Attempt to import the app's module.
try:
if register_to:
... | Refs #33476 -- Reformatted code with Black. | autodiscover_modules | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | module_loading.py | 17 | 14 | https://github.com/django/django.git | 7 | 87 | 0 | 78 | 152 | Python | {
"docstring": "\n Auto-discover INSTALLED_APPS modules and fail silently when\n not present. This forces an import on them to register any admin bits they\n may want.\n\n You may provide a register_to keyword parameter as a way to access a\n registry. This register_to object must have a _registry inst... | def autodiscover_modules(*args, **kwargs):
from django.apps import apps
register_to = kwargs.get("register_to")
for app_config in apps.get_app_configs():
for module_to_search in args:
# Attempt to import the app's module.
try:
if register_to:
... | |
56,137 | 220,829 | 13 | python3.10.4/Lib/asyncio/tasks.py | 7 | 4 | def ensure_future(coro_or_future, *, loop=None):
return _ensure_future(coro_or_future, loop=loop)
| add python 3.10.4 for windows | ensure_future | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | tasks.py | 8 | 2 | https://github.com/XX-net/XX-Net.git | 1 | 21 | 0 | 7 | 34 | Python | {
"docstring": "Wrap a coroutine or an awaitable in a future.\n\n If the argument is a Future, it is returned directly.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 19,
"vocab_size": 16
} | def ensure_future(coro_or_future, *, loop=None):
return _ensure_future(coro_or_future, loop=loop)
| |
69,846 | 242,362 | 101 | src/PIL/Image.py | 25 | 9 | def getpalette(self, rawmode="RGB"):
self.load()
try:
mode = self.im.getpalettemode()
except ValueError:
return None # no palette
if rawmode is None:
rawmode = mode
| Allow rawmode None to return the palette in the current mode | getpalette | 6be87277f71948bc7e4b945c46660cac3e5ce919 | Pillow | Image.py | 11 | 9 | https://github.com/python-pillow/Pillow.git | 3 | 53 | 0 | 21 | 91 | Python | {
"docstring": "\n Returns the image palette as a list.\n\n :param rawmode: The mode in which to return the palette. ``None`` will\n return the palette in its current mode.\n :returns: A list of color values [r, g, b, ...], or None if the\n image has no palette.\n ",
... | def getpalette(self, rawmode="RGB"):
self.load()
try:
mode = self.im.getpalettemode()
except ValueError:
return None # no palette
if rawmode is None:
rawmode = mode
return list(self.im.getpalette(mode, rawmode))
| |
2,630 | 13,404 | 33 | jina/types/request/data.py | 8 | 6 | def last_executor(self):
if len(self.proto_wo | feat: pass `docs_map` to Executor (#5366) | last_executor | ad96553b064b9c17d626f6fcb78e4a45987be2c3 | jina | data.py | 11 | 3 | https://github.com/jina-ai/jina.git | 2 | 30 | 0 | 8 | 50 | Python | {
"docstring": "\n Returns the name of the last Executor that has processed this Request\n\n :return: the name of the last Executor that processed this Request\n ",
"language": "en",
"n_whitespaces": 45,
"n_words": 23,
"vocab_size": 12
} | def last_executor(self):
if len(self.proto_wo_data.routes) > 0:
return self.proto_wo_data.routes[-1].executor
| |
81,689 | 276,584 | 256 | keras/tests/temporal_sample_weights_correctness_test.py | 58 | 18 | def custom_generator_multi_io_temporal(self, sample_weights=None):
batch_size = 3
num_samples = 3
iteration = 0
while True:
batch_index = iteration * batch_size % num_samples
iteration += 1
start = batch_index
end = start + batch_s... | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | custom_generator_multi_io_temporal | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | temporal_sample_weights_correctness_test.py | 15 | 18 | https://github.com/keras-team/keras.git | 3 | 116 | 0 | 39 | 180 | Python | {
"docstring": "Generator for getting data for temporal multi io model.\n\n Args:\n sample_weights: List of sample_weights.\n\n Yields:\n Tuple of inputs, label, sample weights data.\n ",
"language": "en",
"n_whitespaces": 61,
"n_words": 22,
"vocab_size": 20
} | def custom_generator_multi_io_temporal(self, sample_weights=None):
batch_size = 3
num_samples = 3
iteration = 0
while True:
batch_index = iteration * batch_size % num_samples
iteration += 1
start = batch_index
end = start + batch_s... | |
78,920 | 267,484 | 379 | test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/main.py | 61 | 18 | def _just_docs(self):
try:
for child in self.ast.body:
if not isinstance(child, as | ansible-test - Allow docstring in docs-only module | _just_docs | 5b3557f8ba5c176eb7d2de21b3a4da3dcab3bada | ansible | main.py | 17 | 16 | https://github.com/ansible/ansible.git | 11 | 107 | 0 | 45 | 171 | Python | {
"docstring": "Module can contain just docs and from __future__ boilerplate\n ",
"language": "en",
"n_whitespaces": 16,
"n_words": 9,
"vocab_size": 9
} | def _just_docs(self):
try:
for child in self.ast.body:
if not isinstance(child, ast.Assign):
# allow string constant expressions (these are docstrings)
if isinstance(child, ast.Expr) and isinstance(child.value, ast.Constant) and isinst... | |
17,587 | 83,043 | 40 | zerver/openapi/openapi.py | 12 | 5 | def response_validator(self) -> RequestValidator:
self.check_reload()
assert s | openapi: Use openapi_core ResponseValidator to validate responses.
Signed-off-by: Anders Kaseorg <anders@zulip.com> | response_validator | 031f4596ab1737a237b1c099e792fe627a937ff7 | zulip | openapi.py | 7 | 8 | https://github.com/zulip/zulip.git | 1 | 24 | 0 | 11 | 41 | Python | {
"docstring": "Reload the OpenAPI file if it has been modified after the last time\n it was read, and then return the openapi_core validator object. Similar\n to preceding functions. Used for proper access to OpenAPI objects.\n ",
"language": "en",
"n_whitespaces": 55,
"n_words": 34,
"vo... | def response_validator(self) -> RequestValidator:
self.check_reload()
assert self._response_validator is not None
return self._response_validator
| |
@_wraps(np.polyfit, lax_description=_POLYFIT_DOC)
@partial(jit, static_argnames=('deg', 'rcond', 'full', 'cov')) | 26,694 | 119,828 | 164 | jax/_src/numpy/polynomial.py | 116 | 25 | def roots(p, *, strip_zeros=True):
# ported from https://github.com/numpy/numpy/blob/v1.17.0/numpy/lib/polynomial.py#L168-L251
p = atleast_1d(p)
if p.ndim != 1:
raise Valu | lax_numpy: move poly functions into numpy.polynomial | roots | 603bb3c5ca288674579211e64fa47c6b2b0fb7a6 | jax | polynomial.py | 15 | 20 | https://github.com/google/jax.git | 6 | 133 | 1 | 74 | 274 | Python | {
"docstring": "\\\nUnlike NumPy's implementation of polyfit, :py:func:`jax.numpy.polyfit` will not warn on rank reduction, which indicates an ill conditioned matrix\nAlso, it works best on rcond <= 10e-3 values.\n",
"language": "en",
"n_whitespaces": 25,
"n_words": 28,
"vocab_size": 27
} | def roots(p, *, strip_zeros=True):
# ported from https://github.com/numpy/numpy/blob/v1.17.0/numpy/lib/polynomial.py#L168-L251
p = atleast_1d(p)
if p.ndim != 1:
raise ValueError("Input must be a rank-1 array.")
# strip_zeros=False is unsafe because leading zeros aren't removed
if not strip_zeros:
if ... |
28,957 | 129,465 | 31 | python/ray/tune/trainable.py | 10 | 10 | def _storage_path(self, local_path):
rel_local_path = | [tune] only sync up and sync down checkpoint folder for cloud checkpoint. (#21658)
By default, ~/ray_results/exp_name/trial_name/checkpoint_name.
Instead of the whole trial checkpoint (~/ray_results/exp_name/trial_name/) directory.
Stuff like progress.csv, result.json, params.pkl, params.json, events.out etc are com... | _storage_path | 0abcd5eea529fc84c4398620f2808087e4d8c6b6 | ray | trainable.py | 9 | 3 | https://github.com/ray-project/ray.git | 1 | 35 | 0 | 10 | 55 | Python | {
"docstring": "Converts a `local_path` to be based off of\n `self.remote_checkpoint_dir`.",
"language": "en",
"n_whitespaces": 15,
"n_words": 9,
"vocab_size": 9
} | def _storage_path(self, local_path):
rel_local_path = os.path.relpath(local_path, self.logdir)
return os.path.join(self.remote_checkpoint_dir, rel_local_path)
| |
1,588 | 9,373 | 22 | reconstruction/ostec/external/stylegan2/dnnlib/submission/run_context.py | 8 | 5 | def get_time_since_last_update(self) -> float:
return time.time() - self.last_upda | initialize ostec | get_time_since_last_update | 7375ee364e0df2a417f92593e09557f1b2a3575a | insightface | run_context.py | 8 | 3 | https://github.com/deepinsight/insightface.git | 1 | 18 | 0 | 8 | 32 | Python | {
"docstring": "How much time has passed since the last call to update.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def get_time_since_last_update(self) -> float:
return time.time() - self.last_update_time
| |
1,643 | 9,622 | 1,136 | reconstruction/ostec/external/graphonomy/FaceHairMask/deeplab_xception.py | 192 | 19 | def train_fixbn(self, mode=True, freeze_bn=True, freeze_bn_affine=False):
r
super(DeepLabv3_plus, self).train(mode)
if freeze_bn:
print("Freezing Mean/Var of BatchNorm2D.")
if freeze_bn_affine:
print("Freezing Weight/Bias of BatchNorm2D.")
if freez... | Graphonomy Face/Hair Segmentation added | train_fixbn | 2e5d23ee0e7fc1fdd7ad2e615fd651655aeb0f5b | insightface | deeplab_xception.py | 16 | 23 | https://github.com/deepinsight/insightface.git | 7 | 90 | 0 | 35 | 188 | Python | {
"docstring": "Sets the module in training mode.\n\n This has any effect only on certain modules. See documentations of\n particular modules for details of their behaviors in training/evaluation\n mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,\n etc.\n\n Ret... | def train_fixbn(self, mode=True, freeze_bn=True, freeze_bn_affine=False):
r
super(DeepLabv3_plus, self).train(mode)
if freeze_bn:
print("Freezing Mean/Var of BatchNorm2D.")
if freeze_bn_affine:
print("Freezing Weight/Bias of BatchNorm2D.")
if freez... | |
70,831 | 245,586 | 91 | tests/test_models/test_backbones/test_resnet.py | 34 | 14 | def assert_params_all_zeros(module) -> bool:
weight_data = module.weight.data
is_weight_zero = weight_data.allclose(
weight_data.new_zeros(weight_data.size()))
if hasattr(module, 'bias') and module.bias is not None:
bias_data = module.bias.data
is_bias_zero = bias_data.allclose... | [Fix] Fix UT and remove delete mmcv ops. (#8623)
* Remove get_root_logger
* Fix UT
* Update | assert_params_all_zeros | 73a12e6508d4ba0331b84b1313027a511ba26fe3 | mmdetection | test_resnet.py | 14 | 19 | https://github.com/open-mmlab/mmdetection.git | 4 | 80 | 0 | 26 | 133 | Python | {
"docstring": "Check if the parameters of the module is all zeros.\n\n Args:\n module (nn.Module): The module to be checked.\n\n Returns:\n bool: Whether the parameters of the module is all zeros.\n ",
"language": "en",
"n_whitespaces": 52,
"n_words": 29,
"vocab_size": 18
} | def assert_params_all_zeros(module) -> bool:
weight_data = module.weight.data
is_weight_zero = weight_data.allclose(
weight_data.new_zeros(weight_data.size()))
if hasattr(module, 'bias') and module.bias is not None:
bias_data = module.bias.data
is_bias_zero = bias_data.allclose... | |
52,741 | 209,587 | 188 | scapy/contrib/automotive/scanner/executor.py | 40 | 16 | def cleanup_state(self):
# type: () -> None
for f in self.cleanup_functions:
if not callable(f):
continue
try:
if not f(self.socket, self.configuration):
log_automotive.info(
| Add Automotive Logger for all debug outputs of the automotive layer | cleanup_state | 495b21f2867e48286767085c8cf2918e4092e9dc | scapy | executor.py | 15 | 11 | https://github.com/secdev/scapy.git | 5 | 73 | 0 | 38 | 123 | Python | {
"docstring": "\n Executes all collected cleanup functions from a traversed path\n :return: None\n ",
"language": "en",
"n_whitespaces": 33,
"n_words": 11,
"vocab_size": 11
} | def cleanup_state(self):
# type: () -> None
for f in self.cleanup_functions:
if not callable(f):
continue
try:
if not f(self.socket, self.configuration):
log_automotive.info(
"Cleanup function %s... | |
15,998 | 73,261 | 226 | wagtail/contrib/modeladmin/tests/test_simple_modeladmin.py | 55 | 17 | def test_model_with_two_tabbed_panels_only(self):
Publisher.settings_panels = [FieldPanel("name")]
Publisher.promote_panels = [FieldPanel("headquartered_in")]
warning_1 = checks.Warning(
"Publisher.promo | Reformat with black | test_model_with_two_tabbed_panels_only | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_simple_modeladmin.py | 10 | 26 | https://github.com/wagtail/wagtail.git | 1 | 102 | 0 | 37 | 178 | Python | {
"docstring": "Ensure that Publisher uses `panels` instead of `promote_panels`\\\nor set up an `edit_handler` if you want a tabbed editing interface.\nThere are no default tabs on non-Page models so there will be no\\\n Promote tab for the promote_panels to render in.Ensure that Publisher uses `panels` instead of `s... | def test_model_with_two_tabbed_panels_only(self):
Publisher.settings_panels = [FieldPanel("name")]
Publisher.promote_panels = [FieldPanel("headquartered_in")]
warning_1 = checks.Warning(
"Publisher.promote_panels will have no effect on modeladmin editing",
hint=,
... | |
81,181 | 274,158 | 25 | keras/layers/serialization.py | 9 | 7 | def get_builtin_layer(class_name):
if not hasattr(LOCAL, "ALL_OBJECTS"):
populate_deserializable_objects()
return L | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | get_builtin_layer | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | serialization.py | 9 | 4 | https://github.com/keras-team/keras.git | 2 | 27 | 0 | 9 | 49 | Python | {
"docstring": "Returns class if `class_name` is registered, else returns None.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def get_builtin_layer(class_name):
if not hasattr(LOCAL, "ALL_OBJECTS"):
populate_deserializable_objects()
return LOCAL.ALL_OBJECTS.get(class_name)
| |
47,617 | 196,117 | 491 | sympy/combinatorics/perm_groups.py | 90 | 28 | def abelian_invariants(self):
if self.is_trivial:
return []
gns = self.generators
inv = []
G = self
H = G.derived_subgroup()
Hgens = H.generators
for p in primefactors(G.order()):
ranks = []
while True:
... | Updated import locations | abelian_invariants | 498015021131af4dbb07eb110e5badaba8250c7b | sympy | perm_groups.py | 16 | 31 | https://github.com/sympy/sympy.git | 11 | 181 | 0 | 53 | 300 | Python | {
"docstring": "\n Returns the abelian invariants for the given group.\n Let ``G`` be a nontrivial finite abelian group. Then G is isomorphic to\n the direct product of finitely many nontrivial cyclic groups of\n prime-power order.\n\n Explanation\n ===========\n\n The... | def abelian_invariants(self):
if self.is_trivial:
return []
gns = self.generators
inv = []
G = self
H = G.derived_subgroup()
Hgens = H.generators
for p in primefactors(G.order()):
ranks = []
while True:
... | |
80,751 | 271,364 | 652 | keras/engine/functional_utils.py | 292 | 35 | def clone_graph_nodes(inputs, outputs):
nodes_to_clone = find_nodes_by_inputs_and_outputs(inputs, outputs)
cloned_inputs = []
cloned_outputs = []
# We not only need to create copies of Nodes (mimic the calls), also need to
# clone keras_tensors to avoid the override of _keras_history attached o... | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | clone_graph_nodes | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | functional_utils.py | 14 | 34 | https://github.com/keras-team/keras.git | 5 | 221 | 0 | 161 | 364 | Python | {
"docstring": "Clone the `Node` between the inputs and output tensors.\n\n This function is used to create a new functional model from any intermediate\n keras tensors. The clone of the nodes mimic the behavior of reconstructing the\n functional graph network by re-executing all the __call__ methods. The cl... | def clone_graph_nodes(inputs, outputs):
nodes_to_clone = find_nodes_by_inputs_and_outputs(inputs, outputs)
cloned_inputs = []
cloned_outputs = []
# We not only need to create copies of Nodes (mimic the calls), also need to
# clone keras_tensors to avoid the override of _keras_history attached o... | |
49,276 | 199,471 | 83 | sympy/physics/mechanics/rigidbody.py | 26 | 14 | def parallel_axis(self, point, frame=None):
# circular import issue
from sympy.physics.mechanics.functions import inertia_of_point_mass
if frame is None:
frame = self.frame
return self.central_inertia.express(frame) + inertia_of_point_mass(
sel | Add optional frame argument to parallel axis method | parallel_axis | 801e149d69d5f88919a735f8b55b6024f97c6950 | sympy | rigidbody.py | 11 | 6 | https://github.com/sympy/sympy.git | 2 | 59 | 0 | 24 | 90 | Python | {
"docstring": "Returns the inertia dyadic of the body with respect to another\n point.\n\n Parameters\n ==========\n\n point : sympy.physics.vector.Point\n The point to express the inertia dyadic about.\n frame : sympy.physics.vector.ReferenceFrame\n The refer... | def parallel_axis(self, point, frame=None):
# circular import issue
from sympy.physics.mechanics.functions import inertia_of_point_mass
if frame is None:
frame = self.frame
return self.central_inertia.express(frame) + inertia_of_point_mass(
self.mass, sel... | |
75,568 | 259,103 | 38 | sklearn/utils/tests/test_class_weight.py | 26 | 5 | def test_class_weight_does_not_contains_more_classses():
tree = DecisionTreeClassifier(class_weight={0: 1, 1: 10, 2: 20})
# Does not raise
tree.fit([[0, 0, 1], [1, 0, 1], [1, 2, 0]], [0, 0, 1])
| FIX Support extra class_weights in compute_class_weight (#22595) | test_class_weight_does_not_contains_more_classses | 3605c140af992b6ac52f04f1689c58509cc0b5b2 | scikit-learn | test_class_weight.py | 11 | 3 | https://github.com/scikit-learn/scikit-learn.git | 1 | 63 | 0 | 22 | 89 | Python | {
"docstring": "Check that class_weight can contain more labels than in y.\n\n Non-regression test for #22413\n ",
"language": "en",
"n_whitespaces": 20,
"n_words": 14,
"vocab_size": 14
} | def test_class_weight_does_not_contains_more_classses():
tree = DecisionTreeClassifier(class_weight={0: 1, 1: 10, 2: 20})
# Does not raise
tree.fit([[0, 0, 1], [1, 0, 1], [1, 2, 0]], [0, 0, 1])
| |
78,532 | 266,720 | 429 | test/lib/ansible_test/_internal/cli/argparsing/parsers.py | 89 | 28 | def parse(self, state): # type: (ParserState) -> str
if state.mode == ParserMode.PARSE:
path = AnyParser(). | ansible-test - Code cleanup and refactoring. (#77169)
* Remove unnecessary PyCharm ignores.
* Ignore intentional undefined attribute usage.
* Add missing type hints. Fix existing type hints.
* Fix docstrings and comments.
* Use function to register completion handler.
* Pass strings to display functions.
* Fix C... | parse | a06fa496d3f837cca3c437ab6e9858525633d147 | ansible | parsers.py | 22 | 21 | https://github.com/ansible/ansible.git | 9 | 145 | 0 | 65 | 285 | Python | {
"docstring": "Parse the input from the given state and return the result.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 9
} | def parse(self, state): # type: (ParserState) -> str
if state.mode == ParserMode.PARSE:
path = AnyParser().parse(state)
if not os.path.isfile(path):
raise ParserError(f'Not a file: {path}')
else:
path = ''
with state.delimit(PAT... | |
43,751 | 182,133 | 94 | src/textual/view.py | 31 | 4 | def layout(self) -> Layout:
# self.log("I", self._inline_styles)
# self.log("C", self._css_styles)
# self.log("S", self.styles)
assert self.s | implement inline styles | layout | c90cdd4ec8a10c689fee83a6a71e025393dcb38d | textual | view.py | 7 | 7 | https://github.com/Textualize/textual.git | 1 | 20 | 0 | 21 | 46 | Python | {
"docstring": "Convenience property for accessing ``self.styles.layout``.\n\n Returns: The Layout associated with this view\n Convenience property setter for setting ``view.styles.layout``.\n # Args:\n # new_value:\n\n # Returns:\n # None\n # ",
"language": ... | def layout(self) -> Layout:
# self.log("I", self._inline_styles)
# self.log("C", self._css_styles)
# self.log("S", self.styles)
assert self.styles.layout
return self.styles.layout
# @layout.setter
# def layout(self, new_value: Layout) -> None:
#
# ... | |
53,965 | 215,425 | 355 | salt/transport/rabbitmq.py | 64 | 15 | def timeout_message(self, message):
future = self.send_future_map.pop(message, None)
# In a race condition the message might have been sent by the time
# we're timing it out. Make sure the future is not None
if future is not None:
del self.send_timeout_map[message]
... | Start to add base class defs | timeout_message | ab4803984bce4a4de7cc10910e7310c4babf557e | salt | rabbitmq.py | 15 | 19 | https://github.com/saltstack/salt.git | 3 | 96 | 0 | 54 | 152 | Python | {
"docstring": "\n Handle a message timeout by removing it from the sending queue\n and informing the caller\n\n :raises: SaltReqTimeoutError\n ",
"language": "en",
"n_whitespaces": 46,
"n_words": 17,
"vocab_size": 16
} | def timeout_message(self, message):
future = self.send_future_map.pop(message, None)
# In a race condition the message might have been sent by the time
# we're timing it out. Make sure the future is not None
if future is not None:
del self.send_timeout_map[message]
... | |
768 | 5,410 | 467 | airbyte-integrations/connectors/source-freshdesk/unit_tests/test_300_page.py | 152 | 22 | def test_not_all_records(self, requests_mock, authenticator, config, responses):
expected_output = [
{"id": 1, "updated_at": "2018-01-02T00:00:00Z"},
{"id": 2, "updated_at": "2018-02-02T00:00:00Z"},
{"id": 2, "updated_at": "2018-02-02T00:00:00Z"}, # duplicate
... | 🎉 Source Freshdesk: Migrated to latest CDK (#12334) | test_not_all_records | 9d1cd42ff9f3118e2312ea9c94ad647f1baaad73 | airbyte | test_300_page.py | 13 | 27 | https://github.com/airbytehq/airbyte.git | 2 | 201 | 0 | 90 | 360 | Python | {
"docstring": "\n TEST 1 - not all records are retrieved\n\n During test1 the tickets_stream changes the state of parameters on page: 2,\n by updating the params:\n `params[\"order_by\"] = \"updated_at\"`\n `params[\"updated_since\"] = last_record`\n continues to fetch recor... | def test_not_all_records(self, requests_mock, authenticator, config, responses):
expected_output = [
{"id": 1, "updated_at": "2018-01-02T00:00:00Z"},
{"id": 2, "updated_at": "2018-02-02T00:00:00Z"},
{"id": 2, "updated_at": "2018-02-02T00:00:00Z"}, # duplicate
... | |
50,844 | 204,704 | 29 | django/core/management/commands/test.py | 8 | 6 | def run_from_argv(self, argv):
s | Refs #33476 -- Reformatted code with Black. | run_from_argv | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | test.py | 9 | 3 | https://github.com/django/django.git | 1 | 26 | 0 | 8 | 46 | Python | {
"docstring": "\n Pre-parse the command line to extract the value of the --testrunner\n option. This allows a test runner to define additional command line\n arguments.\n ",
"language": "en",
"n_whitespaces": 52,
"n_words": 23,
"vocab_size": 18
} | def run_from_argv(self, argv):
self.test_runner = get_command_line_option(argv, "--testrunner")
super().run_from_argv(argv)
| |
18,999 | 93,612 | 346 | tests/sentry/integrations/slack/notifications/test_new_processing_issues.py | 57 | 20 | def test_new_processing_issue(self, mock_func):
notification = NewProcessingIssuesActivityNotification(
Activity(
project=self.project,
user=self.user,
type=ActivityType.NEW_PROCESSING_ISSUES,
data={
"issue... | fix(slack): Fix broken url formatting (#36976)
Fix the URL format, it should be `<url|text>`. | test_new_processing_issue | e4c6ad69c22692e2999baa26d8bf8f44947cd1c1 | sentry | test_new_processing_issues.py | 15 | 27 | https://github.com/getsentry/sentry.git | 1 | 95 | 0 | 45 | 208 | Python | {
"docstring": "\n Test that a Slack message is sent with the expected payload when an issue is held back in reprocessing\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 19,
"vocab_size": 18
} | def test_new_processing_issue(self, mock_func):
notification = NewProcessingIssuesActivityNotification(
Activity(
project=self.project,
user=self.user,
type=ActivityType.NEW_PROCESSING_ISSUES,
data={
"issue... | |
37,541 | 158,401 | 20 | d2l/torch.py | 11 | 9 | def load_array(data_arrays, batch_size, is_train=True):
dataset = data.TensorDataset(*data_arrays)
| [PaddlePaddle] Merge master into Paddle branch (#1186)
* change 15.2 title in chinese version (#1109)
change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘
* 修改部分语义表述 (#1105)
* Update r0.17.5 (#1120)
* Bump versions in installation
* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)
* line 313: "b... | load_array | b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2 | d2l-zh | torch.py | 9 | 3 | https://github.com/d2l-ai/d2l-zh.git | 1 | 34 | 0 | 10 | 53 | Python | {
"docstring": "Construct a PyTorch data iterator.\n\n Defined in :numref:`sec_linear_concise`",
"language": "en",
"n_whitespaces": 10,
"n_words": 8,
"vocab_size": 8
} | def load_array(data_arrays, batch_size, is_train=True):
dataset = data.TensorDataset(*data_arrays)
return data.DataLoader(dataset, batch_size, shuffle=is_train)
| |
4,172 | 22,092 | 56 | pipenv/patched/pip/_vendor/requests/models.py | 13 | 7 | def is_permanent_redirect(self):
| Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir. | is_permanent_redirect | cd5a9683be69c86c8f3adcd13385a9bc5db198ec | pipenv | models.py | 9 | 5 | https://github.com/pypa/pipenv.git | 2 | 27 | 0 | 12 | 45 | Python | {
"docstring": "True if this Response one of the permanent versions of redirect.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 10
} | def is_permanent_redirect(self):
return "location" in self.headers and self.status_code in (
codes.moved_permanently,
codes.permanent_redirect,
)
| |
82,535 | 278,431 | 329 | keras/utils/generic_utils.py | 102 | 7 | def _estimate_step_duration(self, current, now):
if current:
# there are a few special scenarios here:
# 1) somebody is calling the progress bar without ever supplying
# step 1
# 2) somebody is calling the p | resolve line-too-long in utils | _estimate_step_duration | 80ee2fa4e1db2dda14370110830db82be3eb97b7 | keras | generic_utils.py | 15 | 13 | https://github.com/keras-team/keras.git | 5 | 69 | 0 | 68 | 116 | Python | {
"docstring": "Estimate the duration of a single step.\n\n Given the step number `current` and the corresponding time `now` this\n function returns an estimate for how long a single step takes. If this\n is called before one step has been completed (i.e. `current == 0`) then\n zero is giv... | def _estimate_step_duration(self, current, now):
if current:
# there are a few special scenarios here:
# 1) somebody is calling the progress bar without ever supplying
# step 1
# 2) somebody is calling the progress bar and supplies step one
... | |
35,202 | 152,956 | 91 | modin/core/dataframe/pandas/dataframe/dataframe.py | 34 | 10 | def _propagate_index_objs(self, axis=None):
self._filter_empties()
if axis is None or axis == 0:
cum_row_lengths = np.cumsum([0] + self._row_lengths)
if axis is None or axis == 1:
cum_col_widths = np.cumsum([0] + self._column | FEAT-#3111: Ensure relabeling Modin Frame does not lose partition shape (#3662)
Co-authored-by: Devin Petersohn <devin.petersohn@gmail.com>
Signed-off-by: Naren Krishna <naren@ponder.io> | _propagate_index_objs | 3c740dbfcdd69ddc3ab45a42be996e5c61104342 | modin | dataframe.py | 12 | 64 | https://github.com/modin-project/modin.git | 15 | 373 | 0 | 20 | 107 | Python | {
"docstring": "\n Synchronize labels by applying the index object for specific `axis` to the `self._partitions` lazily.\n\n Adds `set_axis` function to call-queue of each partition from `self._partitions`\n to apply new axis.\n\n Parameters\n ----------\n axis : int, default... | def _propagate_index_objs(self, axis=None):
self._filter_empties()
if axis is None or axis == 0:
cum_row_lengths = np.cumsum([0] + self._row_lengths)
if axis is None or axis == 1:
cum_col_widths = np.cumsum([0] + self._column_widths)
if axis is None:
| |
71,784 | 247,618 | 1,107 | tests/handlers/test_e2e_keys.py | 114 | 23 | def test_query_devices_remote_no_sync(self) -> None:
remote_user_id = "@test:other"
local_user_id = "@test:test"
remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY"
remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ"
self.hs.get_feder... | Add type hints to some tests/handlers files. (#12224) | test_query_devices_remote_no_sync | 5dd949bee6158a8b651db9f2ae417a62c8184bfd | synapse | test_e2e_keys.py | 21 | 66 | https://github.com/matrix-org/synapse.git | 1 | 244 | 0 | 52 | 423 | Python | {
"docstring": "Tests that querying keys for a remote user that we don't share a room\n with returns the cross signing keys correctly.\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 21,
"vocab_size": 18
} | def test_query_devices_remote_no_sync(self) -> None:
remote_user_id = "@test:other"
local_user_id = "@test:test"
remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY"
remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ"
self.hs.get_feder... | |
48,956 | 198,477 | 38 | sympy/core/mul.py | 17 | 6 | def _matches_get_other_nodes(dictionary, nodes, node_ind):
ind_node = nodes[node_ind]
return [ind for ind in dictionary if nodes[ind] == ind_node]
| Code cleanup | _matches_get_other_nodes | 9d58006fc0a23afcba38f641c9472917c436428a | sympy | mul.py | 9 | 3 | https://github.com/sympy/sympy.git | 3 | 31 | 0 | 17 | 47 | Python | {
"docstring": "Find other wildcards that may have already been matched.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def _matches_get_other_nodes(dictionary, nodes, node_ind):
ind_node = nodes[node_ind]
return [ind for ind in dictionary if nodes[ind] == ind_node]
| |
6,372 | 35,025 | 121 | src/transformers/processing_utils.py | 37 | 11 | def save_pretrained(self, save_directory):
for attribute_name in self.attributes:
attribute = getattr(self, attribute_name)
# Include the processor class in the attribute config so this processor can then be reloaded with the
# `AutoProcessor` API.
if has... | PoC for a ProcessorMixin class (#15549)
* PoC for a ProcessorMixin class
* Documentation
* Apply suggestions from code review
Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>
Co-authored-by: Suraj Patil <surajp815@gmail.com>
Co-authored-by: Patrick von Platen <patrick.v.platen@gmail... | save_pretrained | b5c6fdecf0cab6ffe22bee2ca5b8474afba0d813 | transformers | processing_utils.py | 13 | 6 | https://github.com/huggingface/transformers.git | 3 | 47 | 0 | 31 | 79 | Python | {
"docstring": "\n Saves the attributes of this processor (feature extractor, tokenizer...) in the specified directory so that it\n can be reloaded using the [`~ProcessorMixin.from_pretrained`] method.\n\n <Tip>\n\n This class method is simply calling [`~feature_extraction_utils.FeatureExt... | def save_pretrained(self, save_directory):
for attribute_name in self.attributes:
attribute = getattr(self, attribute_name)
# Include the processor class in the attribute config so this processor can then be reloaded with the
# `AutoProcessor` API.
if has... | |
5,284 | 30,033 | 102 | saleor/permission/models.py | 24 | 9 | def _user_has_module_perms(user, app_label):
for backend in auth.get_backends():
if not hasattr(backend, "has_module_perms"):
continue
try:
if backend.has_module_perms(user, app_label | Move PermissionsMixin from django auth | _user_has_module_perms | d5ef58653803075849a6a13177e7a6e604aa2f60 | saleor | models.py | 11 | 10 | https://github.com/saleor/saleor.git | 5 | 48 | 0 | 19 | 80 | Python | {
"docstring": "Backend can raise `PermissionDenied` to short-circuit permission checking.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def _user_has_module_perms(user, app_label):
for backend in auth.get_backends():
if not hasattr(backend, "has_module_perms"):
continue
try:
if backend.has_module_perms(user, app_label):
return True
except PermissionDenied:
return False... | |
@frappe.whitelist() | 14,578 | 67,615 | 13 | erpnext/stock/doctype/delivery_trip/delivery_trip.py | 26 | 11 | def get_default_address(out, name):
shipping_addresses = frappe.db.sql(
,
(name),
as_dict=1,
)
if shipping_addresses:
for out.shipping_address in shipping_addresses:
if out.shipping_address.is_shipping_address:
return out.shipping_address
out.shipping_address = shipping_addresses[0]
return out.... | style: format code with black | get_default_address | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | delivery_trip.py | 12 | 21 | https://github.com/frappe/erpnext.git | 4 | 59 | 1 | 19 | 101 | Python | {
"docstring": "\n\t\t\tSELECT parent,\n\t\t\t\t(SELECT is_shipping_address FROM tabAddress a WHERE a.name=dl.parent) AS is_shipping_address\n\t\t\tFROM\n\t\t\t\t`tabDynamic Link` dl\n\t\t\tWHERE\n\t\t\t\tdl.link_doctype=\"Customer\"\n\t\t\t\tAND dl.link_name=%s\n\t\t\t\tAND dl.parenttype = \"Address\"\n\t\t",
"lan... | def get_default_address(out, name):
shipping_addresses = frappe.db.sql(
,
(name),
as_dict=1,
)
if shipping_addresses:
for out.shipping_address in shipping_addresses:
if out.shipping_address.is_shipping_address:
return out.shipping_address
out.shipping_address = shipping_addresses[0]
return out.... |
29,007 | 129,707 | 56 | rllib/utils/__init__.py | 30 | 8 | def force_list(elements=None, to_tuple=False):
ctor = list
if to_tuple is True:
ctor = tuple
return ctor() if elements is None else ctor(elements) \
if type(el | [RLlib] Make `policies_to_train` more flexible via callable option. (#20735) | force_list | 371fbb17e4120f569e0b6c5efde9a00a097f438e | ray | __init__.py | 10 | 6 | https://github.com/ray-project/ray.git | 4 | 57 | 0 | 24 | 87 | Python | {
"docstring": "\n Makes sure `elements` is returned as a list, whether `elements` is a single\n item, already a list, or a tuple.\n\n Args:\n elements (Optional[any]): The inputs as single item, list, or tuple to\n be converted into a list/tuple. If None, returns empty list/tuple.\n ... | def force_list(elements=None, to_tuple=False):
ctor = list
if to_tuple is True:
ctor = tuple
return ctor() if elements is None else ctor(elements) \
if type(elements) in [list, set, tuple] else ctor([elements])
| |
36,905 | 157,365 | 467 | ldm/models/diffusion/dpm_solver/dpm_solver.py | 159 | 17 | def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
if order == 3:
K = steps // 3 + 1
if steps % 3 == 0:
orders = [3, ] * (K - 2) + [2, 1]
elif steps % 3 == 1:
orders = [3, ] * (K - 1) +... | release more models | get_orders_and_timesteps_for_singlestep_solver | ca86da3a30c4e080d4db8c25fca73de843663cb4 | stablediffusion | dpm_solver.py | 18 | 27 | https://github.com/Stability-AI/stablediffusion.git | 8 | 228 | 0 | 66 | 359 | Python | {
"docstring": "\n Get the order of each step for sampling by the singlestep DPM-Solver.\n We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as \"DPM-Solver-fast\".\n Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-f... | def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
if order == 3:
K = steps // 3 + 1
if steps % 3 == 0:
orders = [3, ] * (K - 2) + [2, 1]
elif steps % 3 == 1:
orders = [3, ] * (K - 1) +... | |
38,693 | 160,675 | 247 | numpy/core/tests/test_multiarray.py | 129 | 27 | def _aligned_zeros(shape, dtype=float, order="C", align=None):
dtype = np.dtype(dtype)
if dtype == np.dtype(object):
# Can't do this, fall back to standard allocation (which
# should always be sufficiently aligned)
if align is not None:
raise ValueError("object array ali... | MAINT: Simplify element setting and use it for filling
This slightly modifies the behaviour of `arr.fill()` to be
`arr.fill(scalar)`, i.e. match `arr1d[0] = scalar`, rather than
`arr.fill(np.asarray(scalar))`, which subtely different!
(Note that object was already special cased to have the scalar
logic.)
Otherwise, `... | _aligned_zeros | ac624d012cc0c3f90da4593e7bb8d9d335fa9696 | numpy | test_multiarray.py | 12 | 22 | https://github.com/numpy/numpy.git | 7 | 204 | 0 | 86 | 323 | Python | {
"docstring": "\n Allocate a new ndarray with aligned memory.\n\n The ndarray is guaranteed *not* aligned to twice the requested alignment.\n Eg, if align=4, guarantees it is not aligned to 8. If align=None uses\n dtype.alignment.",
"language": "en",
"n_whitespaces": 44,
"n_words": 32,
"vocab_siz... | def _aligned_zeros(shape, dtype=float, order="C", align=None):
dtype = np.dtype(dtype)
if dtype == np.dtype(object):
# Can't do this, fall back to standard allocation (which
# should always be sufficiently aligned)
if align is not None:
raise ValueError("object array ali... | |
@keras_export('keras.utils.array_to_img',
'keras.preprocessing.image.array_to_img') | 79,774 | 268,942 | 374 | keras/preprocessing/image.py | 228 | 36 | def smart_resize(x, size, interpolation='bilinear'):
if len(size) != 2:
raise ValueError('Expected `size` to be a tuple of 2 integers, '
f'but got: {size}.')
img = tf.convert_to_tensor(x)
if img.shape.rank is not None:
if img.shape.rank < 3 or img.shape.rank > 4:
raise ValueE... | Copy image utils from keras_preprocessing directly into core keras
This is not new code, we are just moving these utilities directly
into keras from keras-preprocessing.
For the library code, just fixed linting errors.
For the test code, had to do more major changes to port from pytest, but
hopefully any errors have ... | smart_resize | 373ad97c72ed1ac4b6898e85b2cfd7b016e4b469 | keras | image.py | 15 | 43 | https://github.com/keras-team/keras.git | 11 | 404 | 1 | 124 | 661 | Python | {
"docstring": "Resize images to a target size without aspect ratio distortion.\n\n TensorFlow image datasets typically yield images that have each a different\n size. However, these images need to be batched before they can be\n processed by Keras layers. To be batched, images need to share the same height\n and... | def smart_resize(x, size, interpolation='bilinear'):
if len(size) != 2:
raise ValueError('Expected `size` to be a tuple of 2 integers, '
f'but got: {size}.')
img = tf.convert_to_tensor(x)
if img.shape.rank is not None:
if img.shape.rank < 3 or img.shape.rank > 4:
raise ValueE... |
120,987 | 337,266 | 68 | src/accelerate/utils.py | 25 | 13 | def gather(tensor):
if AcceleratorState().distributed_type == DistributedType.TPU:
return _tpu_gather(tensor, name="accelerate.utils.gather")
elif AcceleratorState().distribut | Basic fixes for DeepSpeed (#264) | gather | bbccd2c3fbaa93ed5984e22fc8bf66eb13fdb82b | accelerate | utils.py | 11 | 9 | https://github.com/huggingface/accelerate.git | 4 | 68 | 0 | 18 | 113 | Python | {
"docstring": "\n Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices.\n\n Args:\n tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n The data to gather.\n\n Returns:\n The same data structure as :obj:`tensor` with all tensors sent to... | def gather(tensor):
if AcceleratorState().distributed_type == DistributedType.TPU:
return _tpu_gather(tensor, name="accelerate.utils.gather")
elif AcceleratorState().distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]:
return _gpu_gather(tensor)
elif AcceleratorSt... | |
6,038 | 32,946 | 91 | src/transformers/trainer_utils.py | 45 | 11 | def speed_metrics(split, start_time, num_samples=None, num_steps=None):
runtime = time.time() - start_time
result = {f"{split}_runtime": round(runtime, 4)}
if num_samples is not None:
samples_per_second = num_samples / runtime
result[f"{split}_samples_per_second"] = round(samples_per_se... | Fix docstrings with last version of hf-doc-builder styler (#18581)
* Fix docstrings with last version of hf-doc-builder styler
* Remove empty Parameter block | speed_metrics | c23cbdff4c097d3f3039999827a675cf8f06a32e | transformers | trainer_utils.py | 11 | 10 | https://github.com/huggingface/transformers.git | 3 | 86 | 0 | 29 | 145 | Python | {
"docstring": "\n Measure and return speed performance metrics.\n\n This function requires a time snapshot `start_time` before the operation to be measured starts and this function\n should be run immediately after the operation to be measured has completed.\n\n Args:\n - split: name to prefix metric ... | def speed_metrics(split, start_time, num_samples=None, num_steps=None):
runtime = time.time() - start_time
result = {f"{split}_runtime": round(runtime, 4)}
if num_samples is not None:
samples_per_second = num_samples / runtime
result[f"{split}_samples_per_second"] = round(samples_per_se... | |
40,017 | 167,440 | 70 | pandas/io/date_converters.py | 27 | 16 | def parse_date_fields(year_col, month_col, day_col) -> npt.NDArray[np.object_]:
warnings.warn(
, # noqa: E501
FutureWarning,
stacklevel=find_stack_level(),
)
year_col = _maybe_cast(year_col)
| TYP: more return annotations for io/* (#47524)
* TYP: more return annotations for io/*
* import future | parse_date_fields | e48c9c3973286e257f6da1966c91806d86b917e0 | pandas | date_converters.py | 10 | 19 | https://github.com/pandas-dev/pandas.git | 1 | 63 | 0 | 23 | 100 | Python | {
"docstring": "\n Parse columns with years, months and days into a single date column.\n\n .. deprecated:: 1.2\n \n Use pd.to_datetime({\"year\": year_col, \"month\": month_col, \"day\": day_col}) instead to get a Pandas Series.\n Use ser = pd.to_datetime({\"year\": year_col, \"month\": month_... | def parse_date_fields(year_col, month_col, day_col) -> npt.NDArray[np.object_]:
warnings.warn(
, # noqa: E501
FutureWarning,
stacklevel=find_stack_level(),
)
year_col = _maybe_cast(year_col)
month_col = _maybe_cast(month_col)
day_col = _maybe_cast(day_col)
return p... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.