ast_errors stringlengths 0 3.2k | d_id int64 44 121k | id int64 70 338k | n_whitespaces int64 3 14k | path stringlengths 8 134 | n_words int64 4 4.82k | n_identifiers int64 1 131 | random_cut stringlengths 16 15.8k | commit_message stringlengths 2 15.3k | fun_name stringlengths 1 84 | commit_id stringlengths 40 40 | repo stringlengths 3 28 | file_name stringlengths 5 79 | ast_levels int64 6 31 | nloc int64 1 548 | url stringlengths 31 59 | complexity int64 1 66 | token_counts int64 6 2.13k | n_ast_errors int64 0 28 | vocab_size int64 4 1.11k | n_ast_nodes int64 15 19.2k | language stringclasses 1
value | documentation dict | code stringlengths 101 62.2k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
18,905 | 92,382 | 121 | src/sentry/sentry_metrics/indexer/base.py | 33 | 13 | def get_mapped_key_strings_to_ints(self) -> MutableMapping[str, int]:
cache_ | feat(metrics_indexer): Add rate limits functionality to indexer [INGEST-1380] (#36263)
* feat(metrics_indexer): Add rate limits functionality to indexer [INGEST-1380]
The postgres string indexer now is able to rate limit writes using four
sentry options. If that happens, `None` is returned in place of an
integer,... | get_mapped_key_strings_to_ints | c4cc0467974bcfb2b3c95120bd19c337aa977183 | sentry | base.py | 13 | 18 | https://github.com/getsentry/sentry.git | 4 | 66 | 0 | 26 | 111 | Python | {
"docstring": "\n Return the results, but formatted as the following:\n {\n \"1:a\": 10,\n \"1:b\": 11,\n \"1:c\", 12,\n \"2:e\": 13\n }\n This is for when we use indexer_cache.set_many()\n ",
"language": "en",
... | def get_mapped_key_strings_to_ints(self) -> MutableMapping[str, int]:
cache_key_results: MutableMapping[str, int] = {}
for org_id, result_dict in self.results.items():
for string, id in result_dict.items():
key = f"{org_id}:{string}"
if id is not None... | |
56,992 | 223,585 | 71 | python3.10.4/Lib/email/_header_value_parser.py | 29 | 12 | def get_atext(value):
m = _non_atom_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected atext but found '{}'".format(value))
atext = m.group()
value = value[len(atext):]
atext = ValueTerminal(atext, 'atext')
_validate_xtext(atext)
return atext, | add python 3.10.4 for windows | get_atext | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | _header_value_parser.py | 12 | 10 | https://github.com/XX-net/XX-Net.git | 2 | 61 | 0 | 23 | 106 | Python | {
"docstring": "atext = <matches _atext_matcher>\n\n We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to\n the token's defects list if we find non-atext characters.\n ",
"language": "en",
"n_whitespaces": 33,
"n_words": 24,
"vocab_size": 24
} | def get_atext(value):
m = _non_atom_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected atext but found '{}'".format(value))
atext = m.group()
value = value[len(atext):]
atext = ValueTerminal(atext, 'atext')
_validate_xtext(atext)
return atext, valu... | |
3,328 | 20,336 | 20 | pipenv/patched/notpip/_vendor/pygments/formatters/img.py | 6 | 5 | def _get_linenumber_pos(self, lineno):
retur | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | _get_linenumber_pos | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | img.py | 8 | 2 | https://github.com/pypa/pipenv.git | 1 | 21 | 0 | 6 | 34 | Python | {
"docstring": "\n Get the actual position for the start of a line number.\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 11,
"vocab_size": 10
} | def _get_linenumber_pos(self, lineno):
return (self.image_pad, self._get_line_y(lineno))
| |
15,780 | 71,919 | 1,056 | wagtail/admin/tests/test_contentstate.py | 111 | 10 | def test_image_inside_paragraph(self):
| Reformat with black | test_image_inside_paragraph | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_contentstate.py | 16 | 52 | https://github.com/wagtail/wagtail.git | 1 | 181 | 0 | 72 | 347 | Python | {
"docstring": "\n <p>before <embed embedtype=\"image\" alt=\"an image\" id=\"1\" format=\"left\" /> after</p>\n ",
"language": "en",
"n_whitespaces": 32,
"n_words": 9,
"vocab_size": 9
} | def test_image_inside_paragraph(self):
# In Draftail's data model, images are block-level elements and therefore
# split up preceding / following text into their own paragraphs
converter = ContentstateConverter(features=["image"])
result = json.loads(
converter.from_database_... | |
76,515 | 260,816 | 216 | sklearn/svm/_bounds.py | 93 | 26 | def l1_min_c(X, y, *, loss="squared_hinge", fit_intercept=True, intercept_scaling=1.0):
if loss not in ("squared_hinge", "log"):
raise ValueError('loss type not in ("squared_hinge", "log")')
X = check_array(X, accept_sparse="csc")
check_consistent_length(X, y)
Y = LabelBinarizer(neg_label... | DOC Ensures that l1_min_c passes numpydoc validation (#24134) | l1_min_c | 6d16698dd8ba4407e5c3c588d7b5e6a5257eddc9 | scikit-learn | _bounds.py | 16 | 21 | https://github.com/scikit-learn/scikit-learn.git | 5 | 176 | 0 | 70 | 276 | Python | {
"docstring": "Return the lowest bound for C.\n\n The lower bound for C is computed such that for C in (l1_min_C, infinity)\n the model is guaranteed not to be empty. This applies to l1 penalized\n classifiers, such as LinearSVC with penalty='l1' and\n linear_model.LogisticRegression with penalty='l1'.\n... | def l1_min_c(X, y, *, loss="squared_hinge", fit_intercept=True, intercept_scaling=1.0):
if loss not in ("squared_hinge", "log"):
raise ValueError('loss type not in ("squared_hinge", "log")')
X = check_array(X, accept_sparse="csc")
check_consistent_length(X, y)
Y = LabelBinarizer(neg_label... | |
7,856 | 43,184 | 89 | airflow/migrations/versions/0111_2_3_3_add_indexes_for_cascade_deletes.py | 32 | 10 | def _mysql_tables_where_indexes_already_present(conn):
to_check = [
('xcom', 'idx_xcom_task_instance'),
('task_reschedule', 'idx_task_reschedule_dag_run'),
| Add indexes for CASCADE deletes for task_instance (#24488)
When we add foreign keys with ON DELETE CASCADE, and we delete rows in the foreign table, the database needs to join back to the referencing table. If there's no suitable index, then it can be slow to perform the deletes. | _mysql_tables_where_indexes_already_present | 677c42227c08f705142f298ab88915f133cd94e5 | airflow | 0111_2_3_3_add_indexes_for_cascade_deletes.py | 13 | 11 | https://github.com/apache/airflow.git | 3 | 61 | 0 | 29 | 115 | Python | {
"docstring": "\n If user downgraded and is upgrading again, we have to check for existing\n indexes on mysql because we can't (and don't) drop them as part of the\n downgrade.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 28,
"vocab_size": 27
} | def _mysql_tables_where_indexes_already_present(conn):
to_check = [
('xcom', 'idx_xcom_task_instance'),
('task_reschedule', 'idx_task_reschedule_dag_run'),
('task_fail', 'idx_task_fail_task_instance'),
]
tables = set()
for tbl, idx in to_check:
if conn.execute(f"show... | |
70,986 | 246,075 | 548 | tests/http/test_webclient.py | 103 | 36 | def test_webclient_resolves_with_client_resource(self):
for resource_name_order_list in [
["webclient", "client"],
["client", "webclient"],
]:
# Create a dictionary from path regex -> resource
resource_dict: Dict[str, Resource] = {}
| Add a regression test for using both webclient and client resources simultaneously (#11765) | test_webclient_resolves_with_client_resource | 121b9e2475f4d7b3bca50d81732f07db80b2264f | synapse | test_webclient.py | 18 | 30 | https://github.com/matrix-org/synapse.git | 3 | 150 | 0 | 79 | 241 | Python | {
"docstring": "\n Tests that both client and webclient resources can be accessed simultaneously.\n\n This is a regression test created in response to https://github.com/matrix-org/synapse/issues/11763.\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 21,
"vocab_size": 21
} | def test_webclient_resolves_with_client_resource(self):
for resource_name_order_list in [
["webclient", "client"],
["client", "webclient"],
]:
# Create a dictionary from path regex -> resource
resource_dict: Dict[str, Resource] = {}
f... | |
33,921 | 147,365 | 65 | python/ray/cloudpickle/cloudpickle.py | 32 | 10 | def unregister_pickle_by_value(module):
if not isinstance(module, types.ModuleType):
raise ValueError(f"Input should be a module object, got {str(module)} instead")
if module.__name__ not in _PICKLE_BY_VALUE_MODULES:
| [docs] fix doctests and activate CI (#23418) | unregister_pickle_by_value | 60054995e65304fb14e6d0ab69bdec07aa9389fe | ray | cloudpickle.py | 13 | 7 | https://github.com/ray-project/ray.git | 3 | 47 | 0 | 28 | 92 | Python | {
"docstring": "Unregister that the input module should be pickled by value.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def unregister_pickle_by_value(module):
if not isinstance(module, types.ModuleType):
raise ValueError(f"Input should be a module object, got {str(module)} instead")
if module.__name__ not in _PICKLE_BY_VALUE_MODULES:
raise ValueError(f"{module} is not registered for pickle by value")
el... | |
16,362 | 75,130 | 155 | wagtail/images/tests/test_admin_views.py | 30 | 19 | def test_get_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.a | Reformat with black | test_get_bad_permissions | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_admin_views.py | 14 | 12 | https://github.com/wagtail/wagtail.git | 1 | 78 | 0 | 24 | 135 | Python | {
"docstring": "\n This tests that the view returns a \"permission denied\" redirect if a user without correct\n permissions attempts to access it\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 20,
"vocab_size": 19
} | def test_get_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
)
self.user.... | |
3,772 | 21,342 | 40 | pipenv/patched/notpip/_vendor/distlib/_backport/shutil.py | 14 | 7 | def get_archive_formats():
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items | Vendor in pip 22.1.2 | get_archive_formats | c69d55f7c82d5ae2cce542bcfb98d043ca4836a0 | pipenv | shutil.py | 10 | 5 | https://github.com/pypa/pipenv.git | 2 | 34 | 0 | 13 | 56 | Python | {
"docstring": "Returns a list of supported formats for archiving and unarchiving.\n\n Each element of the returned sequence is a tuple (name, description)\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 21,
"vocab_size": 19
} | def get_archive_formats():
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
| |
46,070 | 189,462 | 1,007 | manim/mobject/svg/svg_mobject.py | 245 | 48 | def _handle_transforms(self, element, mobject):
if element.hasAttribute("x") and element.hasAttribute("y"):
x = self._attribute_to_float(element.getAttribute("x"))
# Flip y
y = -self._attribute_to_float(element.getAttribute("y"))
mobject.shift(x * RIGHT ... | Hide more private methods from the docs. (#2468)
* hide privs from text_mobject.py
* hide privs from tex_mobject.py
* hide privs from code_mobject.py
* hide privs from svg_mobject.py
* remove SVGPath and utils from __init__.py
* don't import string_to_numbers
* hide privs from geometry.py
* hide p... | handle_transforms | 902e7eb4f0147b5882a613b67467e38a1d47f01e | manim | svg_mobject.py | 18 | 48 | https://github.com/ManimCommunity/manim.git | 14 | 429 | 0 | 143 | 706 | Python | {
"docstring": "Applies the SVG transform to the specified mobject. Transforms include:\n ``matrix``, ``translate``, and ``scale``.\n\n Parameters\n ----------\n element : :class:`minidom.Element`\n The transform command to perform\n\n mobject : :class:`Mobject`\n ... | def _handle_transforms(self, element, mobject):
if element.hasAttribute("x") and element.hasAttribute("y"):
x = self._attribute_to_float(element.getAttribute("x"))
# Flip y
y = -self._attribute_to_float(element.getAttribute("y"))
mobject.shift(x * RIGHT ... | |
@pytest.fixture(name="pro") | 97,233 | 298,288 | 11 | tests/components/airvisual/conftest.py | 6 | 7 | def pro_data_fixture():
return json.loads(load_fixture("data.json", "airvisual_pro"))
@pytest.fixture( | Ensure AirVisual Pro migration includes device and entity customizations (#84798)
* Ensure AirVisual Pro migration includes device and entity customizations
* Update homeassistant/components/airvisual/__init__.py
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Code review
* Fix tests
* Fix tests ... | pro_data_fixture | 34dc47ad1037c6bf569f8cb2199f5933c2a0a079 | core | conftest.py | 10 | 2 | https://github.com/home-assistant/core.git | 1 | 17 | 1 | 6 | 51 | Python | {
"docstring": "Define an update coordinator data example for the Pro.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def pro_data_fixture():
return json.loads(load_fixture("data.json", "airvisual_pro"))
@pytest.fixture(name="pro") |
76,408 | 260,671 | 549 | sklearn/datasets/_species_distributions.py | 179 | 50 | def fetch_species_distributions(*, data_home=None, download_if_missing=True):
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the ... | DOC Ensures that fetch_species_distributions passes numpydoc validation (#24162)
Co-authored-by: Franck Charras <franck.charras@inria.fr> | fetch_species_distributions | fc656c2189d64a43089f514dcdedb0fae70dfe56 | scikit-learn | _species_distributions.py | 16 | 43 | https://github.com/scikit-learn/scikit-learn.git | 8 | 302 | 0 | 115 | 485 | Python | {
"docstring": "Loader for species distribution dataset from Phillips et. al. (2006).\n\n Read more in the :ref:`User Guide <datasets>`.\n\n Parameters\n ----------\n data_home : str, default=None\n Specify another download and cache folder for the datasets. By default\n all scikit-learn dat... | def fetch_species_distributions(*, data_home=None, download_if_missing=True):
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the ... | |
53,677 | 213,613 | 33 | ivy/core/device.py | 19 | 7 | def set_split_factor(factor, dev=None):
assert 0 <= factor
global split_factors
dev = ivy.default(dev, default_device())
split_f | renamed dev_str arg to dev for all methods. | set_split_factor | d743336b1f3654cd0315f380f43eed4116997c1d | ivy | device.py | 10 | 5 | https://github.com/unifyai/ivy.git | 1 | 34 | 0 | 17 | 56 | Python | {
"docstring": "\n Set the global split factor for a given device, which can be used to scale batch splitting chunk sizes for the\n device across the codebase.\n\n :param factor: The factor to set the device-specific split factor to.\n :type factor: float\n :param dev: The device to set the split facto... | def set_split_factor(factor, dev=None):
assert 0 <= factor
global split_factors
dev = ivy.default(dev, default_device())
split_factors[dev] = factor
# noinspection PyShadowingNames | |
48,271 | 196,977 | 539 | sympy/testing/runtests.py | 197 | 40 | def run(self, test, compileflags=None, out=None, clear_globs=True):
self.test = test
# Remove ``` from the end of example, which may appear in Markdown
# files
for example in test.examples:
example.want = example.want.replace('```\n', '')
example.exc_msg... | Enable doctests in Markdown files | run | 3ebd6862a0c33fcf357d9f4ac5c2a8fd80a98675 | sympy | runtests.py | 15 | 26 | https://github.com/sympy/sympy.git | 7 | 195 | 0 | 129 | 392 | Python | {
"docstring": "\n Run the examples in ``test``, and display the results using the\n writer function ``out``.\n\n The examples are run in the namespace ``test.globs``. If\n ``clear_globs`` is true (the default), then this namespace will\n be cleared after the test runs, to help wit... | def run(self, test, compileflags=None, out=None, clear_globs=True):
self.test = test
# Remove ``` from the end of example, which may appear in Markdown
# files
for example in test.examples:
example.want = example.want.replace('```\n', '')
example.exc_msg... | |
5,217 | 29,303 | 41 | saleor/graphql/product/tests/queries/test_product_variants_query.py | 19 | 10 | def _fetch_all_variants(client, variables={}, permissions=None):
query =
response = client.post_graphql(
query, variables, permissions=permissions, check_no_permissions=False
)
content = get_graphql_content(response)
return content["data"]["productVariants"]
| Split test_product.py and test_variant.py into multiple files (#11173)
* Split test_product.py into multiple files
* Split test_variant.py into multiple files | _fetch_all_variants | d90be220d6b687d08153934a51354011a3cb5ca1 | saleor | test_product_variants_query.py | 9 | 18 | https://github.com/saleor/saleor.git | 1 | 49 | 0 | 17 | 78 | Python | {
"docstring": "\n query fetchAllVariants($channel: String) {\n productVariants(first: 10, channel: $channel) {\n totalCount\n edges {\n node {\n id\n }\n }\n }\n }\n ",
"... | def _fetch_all_variants(client, variables={}, permissions=None):
query =
response = client.post_graphql(
query, variables, permissions=permissions, check_no_permissions=False
)
content = get_graphql_content(response)
return content["data"]["productVariants"]
| |
2,940 | 19,350 | 552 | ArmNavigation/arm_obstacle_navigation/arm_obstacle_navigation.py | 192 | 49 | def astar_torus(grid, start_node, goal_node):
colors = ['white', 'black', 'red', 'pink', 'yellow', 'green', 'orange']
levels = [0, 1, 2, 3, 4, 5, 6, 7]
cmap, norm = from_levels_and_colors(levels, colors)
grid[start_node] = 4
grid[goal_node] = 5
parent_map = [[() for _ in range(M)] for _ i... | docs: Fix a few typos (#695)
There are small typos in:
- ArmNavigation/arm_obstacle_navigation/arm_obstacle_navigation.py
- ArmNavigation/arm_obstacle_navigation/arm_obstacle_navigation_2.py
- docs/modules/slam/FastSLAM1/FastSLAM1_main.rst
- docs/modules/slam/ekf_slam/ekf_slam_main.rst
Fixes:
- Should read `co... | astar_torus | c6bdd48715adcbe17c4146b7cae3b0fc569f7bde | PythonRobotics | arm_obstacle_navigation.py | 17 | 47 | https://github.com/AtsushiSakai/PythonRobotics.git | 13 | 475 | 0 | 134 | 721 | Python | {
"docstring": "\n Finds a path between an initial and goal joint configuration using\n the A* Algorithm on a tororiadal grid.\n\n Args:\n grid: An occupancy grid (ndarray)\n start_node: Initial joint configuration (tuple)\n goal_node: Goal joint configuration (tuple)\n\n Returns:\n ... | def astar_torus(grid, start_node, goal_node):
colors = ['white', 'black', 'red', 'pink', 'yellow', 'green', 'orange']
levels = [0, 1, 2, 3, 4, 5, 6, 7]
cmap, norm = from_levels_and_colors(levels, colors)
grid[start_node] = 4
grid[goal_node] = 5
parent_map = [[() for _ in range(M)] for _ i... | |
75,049 | 257,234 | 53 | haystack/pipelines/base.py | 20 | 8 | def root_node(self) -> Optional[str]:
if len(self.graph.nodes) < 1:
retur | Validate YAML files without loading the nodes (#2438)
* Remove BasePipeline and make a module for RayPipeline
* Can load pipelines from yaml, plenty of issues left
* Extract graph validation logic into _add_node_to_pipeline_graph & refactor load_from_config and add_node to use it
* Fix pipeline tests
* Mov... | root_node | f8e02310bf0dfbd1ab79a1c3c73434e0aeba4f4b | haystack | base.py | 10 | 7 | https://github.com/deepset-ai/haystack.git | 2 | 37 | 0 | 19 | 61 | Python | {
"docstring": "\n Returns the root node of the pipeline's graph.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 8,
"vocab_size": 7
} | def root_node(self) -> Optional[str]:
if len(self.graph.nodes) < 1:
return None
return list(self.graph.nodes)[0] # List conversion is required, see networkx docs
| |
48,760 | 197,989 | 352 | sympy/core/add.py | 71 | 29 | def as_coefficients_dict(self, *syms):
i | 22531: as_coefficients_dict accepts symbols | as_coefficients_dict | ea7fed2718f07bac46d4e154bd4e7ec31a4289e7 | sympy | add.py | 16 | 23 | https://github.com/sympy/sympy.git | 7 | 187 | 0 | 47 | 297 | Python | {
"docstring": "Return a dictionary mapping terms to their Rational coefficient.\n Since the dictionary is a defaultdict, inquiries about terms which\n were not present will return a coefficient of 0. If an expression is\n not an Add it is considered to have a single term.\n\n If symbols `... | def as_coefficients_dict(self, *syms):
if not syms:
d = defaultdict(list)
for ai in self.args:
c, m = ai.as_coeff_Mul()
d[m].append(c)
for k, v in d.items():
if len(v) == 1:
d[k] = v[0]
... | |
17,632 | 83,227 | 272 | zerver/lib/test_classes.py | 73 | 23 | def verify_emoji_code_foreign_keys(self) -> None:
dct = {}
for row in RealmEmoji.objects.all():
dct[row.id] = row
if not dct:
raise AssertionError("test needs RealmEmoji rows")
count = 0
for row in Reaction.objects.filter(reaction_type=Reaction... | docs: Fix many spelling mistakes.
Signed-off-by: Anders Kaseorg <anders@zulip.com> | verify_emoji_code_foreign_keys | b0ce4f1bce8031881addecb1e86073483517f392 | zulip | test_classes.py | 11 | 29 | https://github.com/zulip/zulip.git | 6 | 179 | 0 | 40 | 282 | Python | {
"docstring": "\n DB tables that refer to RealmEmoji use int(emoji_code) as the\n foreign key. Those tables tend to de-normalize emoji_name due\n to our inheritance-based setup. This helper makes sure those\n invariants are intact, which is particularly tricky during\n the import/e... | def verify_emoji_code_foreign_keys(self) -> None:
dct = {}
for row in RealmEmoji.objects.all():
dct[row.id] = row
if not dct:
raise AssertionError("test needs RealmEmoji rows")
count = 0
for row in Reaction.objects.filter(reaction_type=Reaction... | |
14,080 | 65,988 | 16 | erpnext/erpnext_integrations/doctype/mpesa_settings/mpesa_settings.py | 28 | 15 | def format_string_to_json(balance_info):
Working Account|KES|481000.00|481000.00|0.00|0.00
balance_dict = frappe._dict()
for account_info in balance_info.split("&"):
account_info = account_info.split("|")
balance_dict[account_info[0]] = dict(
current_balance=fmt_money(account_info[2], currency="KES"),
avai... | style: format code with black | format_string_to_json | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | mpesa_settings.py | 15 | 11 | https://github.com/frappe/erpnext.git | 2 | 103 | 0 | 22 | 166 | Python | {
"docstring": "\n\tFormat string to json.\n\n\te.g: \n\t=> {'Working Account': {'current_balance': '481000.00',\n\t 'available_balance': '481000.00',\n\t 'reserved_balance': '0.00',\n\t 'uncleared_balance': '0.00'}}\n\t",
"language": "en",
"n_whitespaces": 35,
"n_words": 16,
"vocab_size"... | def format_string_to_json(balance_info):
Working Account|KES|481000.00|481000.00|0.00|0.00
balance_dict = frappe._dict()
for account_info in balance_info.split("&"):
account_info = account_info.split("|")
balance_dict[account_info[0]] = dict(
current_balance=fmt_money(account_info[2], currency="KES"),
avai... | |
40,626 | 170,943 | 495 | pandas/io/xml.py | 148 | 23 | def _validate_path(self) -> list[Any]:
msg = (
"xpath does not return any nodes or attributes. "
"Be sure to specify in `xpath` the parent nodes of | STYLE: fix pylint: no-else-raise (#49520)
* fix pylint: no-else-raise
* fix possible imbalanced tuple unpacking warning
Co-authored-by: carlotta <c.fabian@turbit.de> | _validate_path | d13c9e034ce8a1d738766c4b1cec80c76f5523be | pandas | xml.py | 13 | 35 | https://github.com/pandas-dev/pandas.git | 14 | 160 | 0 | 86 | 268 | Python | {
"docstring": "\n Notes\n -----\n `etree` supports limited XPath. If user attempts a more complex\n expression syntax error will raise.\n ",
"language": "en",
"n_whitespaces": 53,
"n_words": 17,
"vocab_size": 17
} | def _validate_path(self) -> list[Any]:
msg = (
"xpath does not return any nodes or attributes. "
"Be sure to specify in `xpath` the parent nodes of "
"children and attributes to parse. "
"If document uses namespaces denoted with "
"xmlns, be ... | |
35,242 | 153,058 | 183 | modin/core/execution/ray/implementations/pandas_on_ray/partitioning/partition.py | 38 | 13 | def mask(self, row_labels, col_labels):
new_obj = | REFACTOR-#2656: Update modin to fit algebra (code only) (#3717)
Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru>
Co-authored-by: Vasily Litvinov <vasilij.n.litvinov@intel.com>
Co-authored-by: Alexey Prutskov <alexey.prutskov@intel.com>
Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com... | mask | 58bbcc37477866d19c8b092a0e1974a4f0baa586 | modin | partition.py | 11 | 15 | https://github.com/modin-project/modin.git | 5 | 86 | 0 | 26 | 131 | Python | {
"docstring": "\n Lazily create a mask that extracts the indices provided.\n\n Parameters\n ----------\n row_labels : list-like, slice or label\n The row labels for the rows to extract.\n col_labels : list-like, slice or label\n The column labels for the colum... | def mask(self, row_labels, col_labels):
new_obj = super().mask(row_labels, col_labels)
if isinstance(row_labels, slice) and isinstance(
self._length_cache, ObjectIDType
):
new_obj._length_cache = compute_sliced_len.remote(
row_labels, self._length... | |
10,347 | 51,540 | 102 | modules/image/classification/efficientnetb0_imagenet/processor.py | 33 | 17 | def postprocess(data_out, label_list, top_k):
output = []
for result in data_out:
result_i = softmax(result)
output_i = {}
indexs = np.argsort(result_i)[::-1][0:top_k]
for index in indexs:
label = label_list[index].split(',')[0]
output_i[label] = floa... | update efficientnetb0_imagenet (#2041)
* update efficientnetb0_imagenet
* remove unused print | postprocess | 7cd67aba38c19a835c3229d9b4be21798c5c8673 | PaddleHub | processor.py | 14 | 11 | https://github.com/PaddlePaddle/PaddleHub.git | 3 | 86 | 0 | 25 | 138 | Python | {
"docstring": "\n Postprocess output of network, one image at a time.\n\n Args:\n data_out (numpy.ndarray): output data of network.\n label_list (list): list of label.\n top_k (int): Return top k results.\n ",
"language": "en",
"n_whitespaces": 58,
"n_words": 27,
"vocab_size": 2... | def postprocess(data_out, label_list, top_k):
output = []
for result in data_out:
result_i = softmax(result)
output_i = {}
indexs = np.argsort(result_i)[::-1][0:top_k]
for index in indexs:
label = label_list[index].split(',')[0]
output_i[label] = floa... | |
2,635 | 13,415 | 164 | jina/serve/executors/__init__.py | 47 | 8 | def requests(self):
if hasattr(self, '_requests'):
return self._requests
else:
if not hasattr(self, 'requests_by_class'):
| fix: fix bug inheritance, requests nested dict (#5380) | requests | b44d767f22bd862cdb75926ba388c14f5db0323c | jina | __init__.py | 14 | 10 | https://github.com/jina-ai/jina.git | 4 | 83 | 0 | 34 | 137 | Python | {
"docstring": "\n Get the request dictionary corresponding to this specific class\n\n :return: Returns the requests corresponding to the specific Executor instance class\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 20,
"vocab_size": 14
} | def requests(self):
if hasattr(self, '_requests'):
return self._requests
else:
if not hasattr(self, 'requests_by_class'):
self.requests_by_class = {}
if self.__class__.__name__ not in self.requests_by_class:
self.requests_by_cl... | |
6,170 | 33,860 | 65 | src/transformers/pipelines/text2text_generation.py | 27 | 12 | def __call__(self, *args, **kwargs):
r
result = sup | Fixing t2t pipelines lists outputs. (#15008)
Backward compatibility broken in
https://github.com/huggingface/transformers/pull/14988 | __call__ | 8c2618e6aac3473da7757fb230690ffd4aea4c32 | transformers | text2text_generation.py | 10 | 32 | https://github.com/huggingface/transformers.git | 5 | 68 | 0 | 23 | 102 | Python | {
"docstring": "\n Generate the output text(s) using text(s) given as inputs.\n\n Args:\n args (`str` or `List[str]`):\n Input text for the encoder.\n return_tensors (`bool`, *optional*, defaults to `False`):\n Whether or not to include the tensors of ... | def __call__(self, *args, **kwargs):
r
result = super().__call__(*args, **kwargs)
if isinstance(args[0], list) and all(isinstance(el, str) for el in args[0]):
return [res[0] for res in result]
return result
| |
4,217 | 22,145 | 133 | pipenv/patched/pip/_vendor/requests/utils.py | 42 | 10 | def rewind_body(prepared_request):
body_seek = getattr(prepared_request.body, "seek", None)
if body_seek is not None and isinstance(
prepared_request._body_p | Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir. | rewind_body | cd5a9683be69c86c8f3adcd13385a9bc5db198ec | pipenv | utils.py | 13 | 13 | https://github.com/pypa/pipenv.git | 4 | 56 | 0 | 37 | 97 | Python | {
"docstring": "Move file pointer back to its recorded starting position\n so it can be read again on redirect.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 17,
"vocab_size": 17
} | def rewind_body(prepared_request):
body_seek = getattr(prepared_request.body, "seek", None)
if body_seek is not None and isinstance(
prepared_request._body_position, integer_types
):
try:
body_seek(prepared_request._body_position)
except OSError:
raise Un... | |
118,400 | 323,181 | 192 | paddlenlp/trainer/utils/helper.py | 64 | 18 | def nested_concat(tensors, new_tensors, padding_index=-100):
assert type(tensors) == type(
new_tensors
), f"Expected `tensors` and `new_tensors` to have the same type but found {type(tensors)} and {type(new_tensors)}."
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_c... | [Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761)
* add some datasets for finetune.
* support fine tune for all tastks.
* add trainer prototype.
* init verison for paddlenlp trainer.
* refine trainer.
* update for some details.
* support multi-card... | nested_concat | 44a290e94d1becd1f09fddc3d873f9e19c9d6919 | PaddleNLP | helper.py | 14 | 17 | https://github.com/PaddlePaddle/PaddleNLP.git | 5 | 116 | 0 | 50 | 200 | Python | {
"docstring": "\n Concat the `new_tensors` to `tensors` on the first dim and pad them on the second if needed. Works for tensors or\n nested list/tuples of tensors.\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 25,
"vocab_size": 22
} | def nested_concat(tensors, new_tensors, padding_index=-100):
assert type(tensors) == type(
new_tensors
), f"Expected `tensors` and `new_tensors` to have the same type but found {type(tensors)} and {type(new_tensors)}."
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_c... | |
10,764 | 53,269 | 72 | src/prefect/cli/orion.py | 22 | 10 | def kubernetes_manifest():
tem | Add kubernetes manifest commands | kubernetes_manifest | 23365cf7727c45f38ad983d610ffec5c15ceca21 | prefect | orion.py | 15 | 10 | https://github.com/PrefectHQ/prefect.git | 1 | 44 | 0 | 18 | 83 | Python | {
"docstring": "\n Generates a kubernetes manifest for to deploy Orion to a cluster.\n\n Example:\n $ prefect orion kubernetes-manifest | kubectl apply -f -\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 21,
"vocab_size": 19
} | def kubernetes_manifest():
template = Template(
(prefect.__module_path__ / "cli" / "templates" / "kubernetes.yaml").read_text()
)
manifest = template.substitute(
{
"image_name": get_prefect_image_name(),
}
)
print(manifest)
| |
18,149 | 86,690 | 1,253 | tests/sentry/api/endpoints/test_project_dynamic_sampling.py | 183 | 43 | def test_queries_when_requested_project_is_head_of_trace(self, mock_query, mock_querybuilder):
# Case A: Head of trace project
self.login_as(self.user)
heart = self.create_project(
name="Heart", slug="heart", teams=[self.team], fire_project_created=True
)
moc... | feat(dynamic-sampling): Improve empty transaction breakdown message [TET-338] (#39539)
This PR add new attribute parentProjectBreakdown to
/api/0/projects/<organization_slug>/<project_slug>/dynamic-sampling/distribution/
api:
```
{
"projectBreakdown": null,
"sampleSize": 0,
"startTimestamp": null,
"end... | test_queries_when_requested_project_is_head_of_trace | ceee9dfd8d6fed70d34546e7b46ebb7bf1d49745 | sentry | test_project_dynamic_sampling.py | 14 | 77 | https://github.com/getsentry/sentry.git | 1 | 384 | 0 | 103 | 644 | Python | {
"docstring": "\n Case A: Requesting for a project (bar) that is root but is a head of distributed traces\n Example of smart query response (DYNAMIC_SAMPLING_DISTRIBUTION_FETCH_PROJECT_STATS):\n |---------+-------+------|\n | project | count | root |\n |---------+-------+------|\n ... | def test_queries_when_requested_project_is_head_of_trace(self, mock_query, mock_querybuilder):
# Case A: Head of trace project
self.login_as(self.user)
heart = self.create_project(
name="Heart", slug="heart", teams=[self.team], fire_project_created=True
)
moc... | |
14,129 | 66,180 | 18 | erpnext/hr/doctype/leave_ledger_entry/leave_ledger_entry.py | 29 | 15 | def validate_leave_allocation_against_leave_application(ledger):
leave_app | style: format code with black | validate_leave_allocation_against_leave_application | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | leave_ledger_entry.py | 14 | 20 | https://github.com/frappe/erpnext.git | 2 | 61 | 0 | 27 | 100 | Python | {
"docstring": "Checks that leave allocation has no leave application against it\n\t\tSELECT transaction_name\n\t\tFROM `tabLeave Ledger Entry`\n\t\tWHERE\n\t\t\temployee=%s\n\t\t\tAND leave_type=%s\n\t\t\tAND transaction_type='Leave Application'\n\t\t\tAND from_date>=%s\n\t\t\tAND to_date<=%s\n\t",
"language": "en... | def validate_leave_allocation_against_leave_application(ledger):
leave_application_records = frappe.db.sql_list(
,
(ledger.employee, ledger.leave_type, ledger.from_date, ledger.to_date),
)
if leave_application_records:
frappe.throw(
_("Leave allocation {0} is linked with the Leave Application {1}").forma... | |
52,657 | 209,346 | 129 | scapy/contrib/pnio_rpc.py | 53 | 6 | def dce_rpc_endianess(pkt):
try:
endianness = pkt.underlayer.endian
except AttributeError:
# handle the case where a PN | MS-RPCE support (#3674)
* Add DCE/RPC
* Add tests to DCERPC5 / PNIO_RPC fixes
* Support for NDR fields in DCERPC
* Fully implement KRB5_GSS
* Support also RFC4121 | dce_rpc_endianess | a738a0b375a5599187626c9a9b081f7c25392f69 | scapy | pnio_rpc.py | 10 | 11 | https://github.com/secdev/scapy.git | 4 | 38 | 0 | 39 | 78 | Python | {
"docstring": "determine the symbol for the endianness of a the DCE/RPC",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 8
} | def dce_rpc_endianess(pkt):
try:
endianness = pkt.underlayer.endian
except AttributeError:
# handle the case where a PNIO class is
# built without its DCE-RPC under-layer
# i.e there is no endianness indication
return "!"
if endianness == 0: # big endian
... | |
10,853 | 53,590 | 424 | src/prefect/flow_runners.py | 108 | 17 | def _get_extra_hosts(self, docker_client) -> Dict[str, str]:
if sys.platform == "linux" and (
# Do not | Add pattern for loading CLI defaults from settings
Also, renames ORION_HOST to API_URL and adds utils to `Settings` to retrieve things by the envar key | _get_extra_hosts | b25d9d283b714c719f363176d49892188c50dffd | prefect | flow_runners.py | 14 | 25 | https://github.com/PrefectHQ/prefect.git | 5 | 99 | 0 | 87 | 188 | Python | {
"docstring": "\n A host.docker.internal -> host-gateway mapping is necessary for communicating\n with the API on Linux machines. Docker Desktop on macOS will automatically\n already have this mapping.\n ",
"language": "en",
"n_whitespaces": 54,
"n_words": 25,
"vocab_size": 24
} | def _get_extra_hosts(self, docker_client) -> Dict[str, str]:
if sys.platform == "linux" and (
# Do not warn if the user has specified a host manually that does not use
# a local address
"PREFECT_API_URL" not in self.env
or re.search(
".*(l... | |
54,254 | 215,927 | 247 | tests/pytests/unit/modules/test_win_certutil.py | 47 | 12 | def test_del_store():
with patch("salt.modules.win_certutil.get_cert_serial") as cert_serial_mock:
cmd_mock = MagicMock(
return_value=(
"CertInfo\r\n"
"================ Certificate 0 ================\r\n"
"Serial Number: 180720d39cd2db3244ba03... | Add tests, fix state module | test_del_store | a8d2d1e1397cdc79b2c5f1ad7f6e3b729dcf8857 | salt | test_win_certutil.py | 14 | 20 | https://github.com/saltstack/salt.git | 1 | 93 | 0 | 42 | 188 | Python | {
"docstring": "\n Test removing a certificate to a specific store\n ",
"language": "en",
"n_whitespaces": 15,
"n_words": 8,
"vocab_size": 7
} | def test_del_store():
with patch("salt.modules.win_certutil.get_cert_serial") as cert_serial_mock:
cmd_mock = MagicMock(
return_value=(
"CertInfo\r\n"
"================ Certificate 0 ================\r\n"
"Serial Number: 180720d39cd2db3244ba03... | |
117,233 | 320,622 | 95 | tests/conftest.py | 44 | 15 | def _select_backend(config):
backend_arg = config.getoption('--qute-backend')
backend_env = os. | tests: Remove some unused imports | _select_backend | 9c4169c7b7d96a10012a72c70fc38c6154f7481f | qutebrowser | conftest.py | 10 | 11 | https://github.com/qutebrowser/qutebrowser.git | 5 | 62 | 0 | 31 | 113 | Python | {
"docstring": "Select the backend for running tests.\n\n The backend is auto-selected in the following manner:\n 1. Use QtWebKit if available\n 2. Otherwise use QtWebEngine as a fallback\n\n Auto-selection is overridden by either passing a backend via\n `--qute-backend=<backend>` or setting the enviro... | def _select_backend(config):
backend_arg = config.getoption('--qute-backend')
backend_env = os.environ.get('QUTE_TESTS_BACKEND')
backend = backend_arg or backend_env or _auto_select_backend()
# Fail early if selected backend is not available
# pylint: disable=unused-import
if backend == '... | |
47,842 | 196,342 | 85 | sympy/logic/boolalg.py | 28 | 15 | def equals(self, other):
from sympy.logic.inference import satisfiable
from sympy.core.relational import Relational
if self.has(Relational) or other.has(Relational):
raise NotImplementedError('handling of relationals')
return self.atoms() == other.atoms() and \
... | Updated import locations | equals | 498015021131af4dbb07eb110e5badaba8250c7b | sympy | boolalg.py | 13 | 7 | https://github.com/sympy/sympy.git | 4 | 71 | 0 | 26 | 113 | Python | {
"docstring": "\n Returns True if the given formulas have the same truth table.\n For two formulas to be equal they must have the same literals.\n\n Examples\n ========\n\n >>> from sympy.abc import A, B, C\n >>> from sympy import And, Or, Not\n >>> (A >> B).equals(~B... | def equals(self, other):
from sympy.logic.inference import satisfiable
from sympy.core.relational import Relational
if self.has(Relational) or other.has(Relational):
raise NotImplementedError('handling of relationals')
return self.atoms() == other.atoms() and \
... | |
14,703 | 67,999 | 45 | erpnext/stock/utils.py | 64 | 16 | def get_latest_stock_qty(item_code, warehouse=None):
values, condition = [item_code], ""
if warehouse:
lft, rgt, is_group = frappe.db.get_value("Warehouse", warehouse, ["lft", "rgt", "is_group"])
if is_group:
values.extend([lft, rgt])
condition += "and exists (\
select | style: format code with black | get_latest_stock_qty | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | utils.py | 13 | 20 | https://github.com/frappe/erpnext.git | 3 | 98 | 0 | 52 | 165 | Python | {
"docstring": "select sum(actual_qty) from tabBin\n\t\twhere item_code=%s {0}",
"language": "en",
"n_whitespaces": 5,
"n_words": 7,
"vocab_size": 7
} | def get_latest_stock_qty(item_code, warehouse=None):
values, condition = [item_code], ""
if warehouse:
lft, rgt, is_group = frappe.db.get_value("Warehouse", warehouse, ["lft", "rgt", "is_group"])
if is_group:
values.extend([lft, rgt])
condition += "and exists (\
select name from `tabWarehouse` wh where... | |
75,584 | 259,125 | 138 | sklearn/kernel_approximation.py | 45 | 21 | def get_feature_names_out(self, input_features=None):
input_features = _check_feature_names_in(
self, input_features, generate_names=True
)
est_name = self.__class__.__nam | ENH Adds get_feature_names_out for AdditiveChi2Sampler (#22137)
Co-authored-by: Olivier Grisel <olivier.grisel@gmail.com>
Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com> | get_feature_names_out | 67a3feed2fe4e82c1cc129c34b9e223b94a8d531 | scikit-learn | kernel_approximation.py | 11 | 11 | https://github.com/scikit-learn/scikit-learn.git | 5 | 94 | 0 | 31 | 176 | Python | {
"docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Only used to validate feature names with the names seen in :meth:`fit`.\n\n Returns\n -------\n feature_names_out :... | def get_feature_names_out(self, input_features=None):
input_features = _check_feature_names_in(
self, input_features, generate_names=True
)
est_name = self.__class__.__name__.lower()
names_list = [f"{est_name}_{name}_sqrt" for name in input_features]
for j ... | |
16,391 | 75,312 | 63 | wagtail/images/tests/test_templatetags.py | 21 | 12 | def test_render_valid_image_as_context_variable(self):
context = {"image": self.image, "image_node": "fake value"}
node = ImageNode(Variable("image"), "original", "image_node")
rendered = node.render(context) | Reformat with black | test_render_valid_image_as_context_variable | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_templatetags.py | 11 | 6 | https://github.com/wagtail/wagtail.git | 1 | 59 | 0 | 19 | 108 | Python | {
"docstring": "\n Tests that an ImageNode with a valid image and a context variable name\n renders an empty string and puts a rendition in the context variable\n ",
"language": "en",
"n_whitespaces": 47,
"n_words": 25,
"vocab_size": 19
} | def test_render_valid_image_as_context_variable(self):
context = {"image": self.image, "image_node": "fake value"}
node = ImageNode(Variable("image"), "original", "image_node")
rendered = node.render(context)
self.assertEqual(rendered, "")
self.assertIsInstance(context... | |
47,479 | 195,934 | 61 | sympy/polys/rootisolation.py | 29 | 12 | def dup_cauchy_lower_bound(f, K):
g = dup_reverse(f)
if len(g) < 2:
raise PolynomialError('Polynomial has no non-zero roots.')
if K.is_ZZ:
K = K.get_field()
b = dup_cauchy_upper_bound(g, K)
return | Add `dup_...` funcs for Cauchy bounds. | dup_cauchy_lower_bound | 4f34fcc3406452ace4a70d541064f2dfdcee9205 | sympy | rootisolation.py | 10 | 8 | https://github.com/sympy/sympy.git | 3 | 53 | 0 | 25 | 89 | Python | {
"docstring": "Compute the Cauchy lower bound on the absolute value of all non-zero\n roots of f, real or complex.",
"language": "en",
"n_whitespaces": 23,
"n_words": 18,
"vocab_size": 16
} | def dup_cauchy_lower_bound(f, K):
g = dup_reverse(f)
if len(g) < 2:
raise PolynomialError('Polynomial has no non-zero roots.')
if K.is_ZZ:
K = K.get_field()
b = dup_cauchy_upper_bound(g, K)
return K.one / b
| |
35,807 | 154,142 | 1,287 | modin/core/io/column_stores/parquet_dispatcher.py | 327 | 50 | def call_deploy(cls, fname, col_partitions, storage_options, **kwargs):
from pyarrow.parquet import ParquetFile
from modin.core.storage_formats.pandas.parsers import ParquetFileToRead
# If we don't have any columns to read, we should just return an empty
# set of references.
... | FIX-#4756: Correctly propagate `storage_options` in `read_parquet` (#4764)
Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru>
Co-authored-by: Alexey Prutskov <lehaprutskov@gmail.com>
Signed-off-by: Karthik Velayutham <vkarthik@ponder.io> | call_deploy | 4548012a6372b8ce79d7e07c9ae13fd7444a91c8 | modin | parquet_dispatcher.py | 13 | 69 | https://github.com/modin-project/modin.git | 9 | 287 | 0 | 182 | 460 | Python | {
"docstring": "\n Deploy remote tasks to the workers with passed parameters.\n\n Parameters\n ----------\n fname : str, path object or file-like object\n Name of the file to read.\n col_partitions : list\n List of arrays with columns names that should be read\... | def call_deploy(cls, fname, col_partitions, storage_options, **kwargs):
from pyarrow.parquet import ParquetFile
from modin.core.storage_formats.pandas.parsers import ParquetFileToRead
# If we don't have any columns to read, we should just return an empty
# set of references.
... | |
35,407 | 153,459 | 98 | modin/db_conn.py | 26 | 12 | def get_connection(self):
if self.lib == _PSYCOPG_LIB_NAME:
import psycopg2
return psycopg2.connect(*self.args, **self.kwargs)
if self.lib == _SQLALCHEMY_LIB_NAME:
from sqlalchemy import create_engine
| FEAT-#979: Enable reading from SQL server. (#4279)
Co-authored-by: eavidan <eran.avidan@intel.com>
Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com>
Signed-off-by: mvashishtha <mahesh@ponder.io> | get_connection | 2d40797b2b700d81d4db4a4cd023d563edf6431f | modin | db_conn.py | 13 | 8 | https://github.com/modin-project/modin.git | 3 | 63 | 0 | 21 | 106 | Python | {
"docstring": "\n Make the database connection and get it.\n\n For psycopg2, pass all arguments to psycopg2.connect() and return the\n result of psycopg2.connect(). For sqlalchemy, pass all arguments to\n sqlalchemy.create_engine() and return the result of calling connect()\n on th... | def get_connection(self):
if self.lib == _PSYCOPG_LIB_NAME:
import psycopg2
return psycopg2.connect(*self.args, **self.kwargs)
if self.lib == _SQLALCHEMY_LIB_NAME:
from sqlalchemy import create_engine
return create_engine(*self.args, **self.kwar... | |
@keras_export("keras.backend.argmin")
@tf.__internal__.dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs | 80,095 | 269,459 | 12 | keras/backend.py | 9 | 10 | def argmax(x, axis=-1):
return tf.argmax(x, axis)
@keras_export("keras.backend.argmin")
@tf.__internal__.dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | argmax | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | backend.py | 7 | 2 | https://github.com/keras-team/keras.git | 1 | 20 | 1 | 9 | 62 | Python | {
"docstring": "Returns the index of the maximum value along an axis.\n\n Args:\n x: Tensor or variable.\n axis: axis along which to perform the reduction.\n\n Returns:\n A tensor.\n ",
"language": "en",
"n_whitespaces": 56,
"n_words": 26,
"vocab_size": 23
} | def argmax(x, axis=-1):
return tf.argmax(x, axis)
@keras_export("keras.backend.argmin")
@tf.__internal__.dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs |
49,448 | 199,955 | 184 | sympy/core/facts.py | 51 | 8 | def print_rules(self) -> Iterator[str]:
yield from self._defined_facts_lines()
yield ''
yield ''
yield from self._full_implications_lines()
yield ''
yield ''
yield from self._prereq_lines()
yield ''
yield ''
yield from self._beta_r... | refactor | print_rules | f68e8de4252200cfc74b9433d00f77c4510ac68d | sympy | facts.py | 8 | 18 | https://github.com/sympy/sympy.git | 1 | 63 | 0 | 24 | 140 | Python | {
"docstring": " Returns a generator with lines to represent the facts and rules ",
"language": "en",
"n_whitespaces": 12,
"n_words": 11,
"vocab_size": 11
} | def print_rules(self) -> Iterator[str]:
yield from self._defined_facts_lines()
yield ''
yield ''
yield from self._full_implications_lines()
yield ''
yield ''
yield from self._prereq_lines()
yield ''
yield ''
yield from self._beta_r... | |
56,673 | 222,610 | 21 | python3.10.4/Lib/distutils/cmd.py | 7 | 5 | def ensure_string(self, option, default=None):
self._ensure_stringlike(option, "strin | add python 3.10.4 for windows | ensure_string | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | cmd.py | 8 | 2 | https://github.com/XX-net/XX-Net.git | 1 | 22 | 0 | 7 | 36 | Python | {
"docstring": "Ensure that 'option' is a string; if not defined, set it to\n 'default'.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 13,
"vocab_size": 13
} | def ensure_string(self, option, default=None):
self._ensure_stringlike(option, "string", default)
| |
9,926 | 49,815 | 27 | modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/nn.py | 14 | 11 | def update_ema(target_params, source_params, rate=0.99):
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 | add disco_diffusion_cnclip_vitb16 module | update_ema | f4d6e64cdc132ae868699a0ba442f4ab1d304a14 | PaddleHub | nn.py | 13 | 3 | https://github.com/PaddlePaddle/PaddleHub.git | 2 | 47 | 0 | 14 | 70 | Python | {
"docstring": "\n Update target parameters to be closer to those of source parameters using\n an exponential moving average.\n\n :param target_params: the target parameter sequence.\n :param source_params: the source parameter sequence.\n :param rate: the EMA rate (closer to 1 means slower).\n ",
... | def update_ema(target_params, source_params, rate=0.99):
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
| |
@lru_cache(maxsize=1) | 3,904 | 21,526 | 188 | pipenv/patched/notpip/_vendor/platformdirs/android.py | 68 | 19 | def _android_folder() -> str | None:
try:
# First try to get path to android app via pyjnius
from jnius import autoclass
| Vendor in pip 22.1.2 | _android_folder | c69d55f7c82d5ae2cce542bcfb98d043ca4836a0 | pipenv | android.py | 17 | 15 | https://github.com/pypa/pipenv.git | 4 | 86 | 1 | 52 | 164 | Python | {
"docstring": ":return: base folder for the Android OS or None if cannot be found",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | def _android_folder() -> str | None:
try:
# First try to get path to android app via pyjnius
from jnius import autoclass
Context = autoclass("android.content.Context") # noqa: N806
result: str | None = Context.getFilesDir().getParentFile().getAbsolutePath()
except Exceptio... |
52,587 | 209,060 | 63 | scapy/volatile.py | 27 | 8 | def de_bruijn(charset, n, maxlen):
# type: (str, int, int) -> str
k = len(char | Add CyclicPattern class for generation of payload data (#3508)
* Add CyclicPattern class for generation of payload data
* minor enhancment
* fix python2
* fix python2
* use six
* fix flake | de_bruijn | e2fc7dddb40a7b80f2e65ad6593c0b10080019d0 | scapy | volatile.py | 9 | 7 | https://github.com/secdev/scapy.git | 1 | 44 | 0 | 21 | 50 | Python | {
"docstring": "\n Generate the De Bruijn Sequence up to `maxlen` characters\n for the charset `charset` and subsequences of length `n`.\n Algorithm modified from wikipedia\n https://en.wikipedia.org/wiki/De_Bruijn_sequence\n ",
"language": "en",
"n_whitespaces": 59,
"n_words"... | def de_bruijn(charset, n, maxlen):
# type: (str, int, int) -> str
k = len(charset)
a = [0] * k * n
sequence = [] # type: List[str]
| |
21,283 | 101,901 | 168 | lib/gui/display_command.py | 33 | 24 | def _add_option_refresh(self) -> None:
logger.debug("Adding refresh option")
btnrefresh = ttk.Button(self.optsframe,
image=get_images().icons["reload"],
command=lambda x="update": preview_trigger().set(x)) # type:ignore
bt... | Typing - lib.gui.display_command | _add_option_refresh | dab823a3eb7a5257cb1e0818ee10ed234d3de97f | faceswap | display_command.py | 14 | 11 | https://github.com/deepfakes/faceswap.git | 1 | 86 | 0 | 30 | 147 | Python | {
"docstring": " Add refresh button to refresh preview immediately ",
"language": "en",
"n_whitespaces": 8,
"n_words": 7,
"vocab_size": 6
} | def _add_option_refresh(self) -> None:
logger.debug("Adding refresh option")
btnrefresh = ttk.Button(self.optsframe,
image=get_images().icons["reload"],
command=lambda x="update": preview_trigger().set(x)) # type:ignore
bt... | |
69,929 | 242,808 | 201 | src/PIL/Image.py | 60 | 14 | def close(self):
try:
if hasattr(self, "_close__fp"):
self._close__fp()
if self.fp:
self.fp.close()
self.fp = None
except Exception as msg:
logger.debug("Error closing: %s", msg)
if getat | [Private] class names should be CamelCase | close | 7fa92c67b1471a66739c4768cdef616c27675981 | Pillow | Image.py | 12 | 12 | https://github.com/python-pillow/Pillow.git | 5 | 77 | 0 | 51 | 138 | Python | {
"docstring": "\n Closes the file pointer, if possible.\n\n This operation will destroy the image core and release its memory.\n The image data will be unusable afterward.\n\n This function is required to close images that have multiple frames or\n have not had their file read and ... | def close(self):
try:
if hasattr(self, "_close__fp"):
self._close__fp()
if self.fp:
self.fp.close()
self.fp = None
except Exception as msg:
logger.debug("Error closing: %s", msg)
if getattr(self, "map", Non... | |
47,458 | 195,871 | 31 | sympy/solvers/diophantine/diophantine.py | 16 | 12 | def diop_general_sum_of_squares(eq, limit=1):
r
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == GeneralSumOfSquares.name:
return set(GeneralSumOfSquares(eq).solve(limit=limit))
| Improved documentation formatting | diop_general_sum_of_squares | cda8dfe6f45dc5ed394c2f5cda706cd6c729f713 | sympy | diophantine.py | 13 | 37 | https://github.com/sympy/sympy.git | 2 | 47 | 0 | 15 | 73 | Python | {
"docstring": "\n Solves the equation `x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`.\n\n Returns at most ``limit`` number of solutions.\n\n Usage\n =====\n\n ``general_sum_of_squares(eq, limit)`` : Here ``eq`` is an expression which\n is assumed to be zero. Also, ``eq`` should be in the form,\n ... | def diop_general_sum_of_squares(eq, limit=1):
r
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == GeneralSumOfSquares.name:
return set(GeneralSumOfSquares(eq).solve(limit=limit))
| |
@Directory.register | 45,631 | 186,806 | 97 | acme/acme/messages.py | 34 | 14 | def resolved_combinations(self) -> Tuple[Tuple[ChallengeBody, ...], ...]:
| deprecate more attributes in acme (#9369)
* deprecate more attributes in acme
* Deprecate .Authorization.combinations by renaming the field and
deprecating in getters/setters
* Silence deprecation warnings from our own imports of acme.mixins
Co-authored-by: Brad Warren <bmw@users.noreply.github.com> | resolved_combinations | f7e61edcb2ea3195c9889c407a08e6dffb7f60dc | certbot | messages.py | 11 | 11 | https://github.com/certbot/certbot.git | 3 | 50 | 1 | 31 | 87 | Python | {
"docstring": "Combinations with challenges instead of indices.\n\n .. deprecated: 1.30.0\n\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 9,
"vocab_size": 9
} | def resolved_combinations(self) -> Tuple[Tuple[ChallengeBody, ...], ...]:
warnings.warn(
"acme.messages.Authorization.resolved_combinations is deprecated and will be "
"removed in a future release.", DeprecationWarning)
return tuple(tuple(self.challenges[idx] for idx in ... |
75,768 | 259,434 | 383 | sklearn/_loss/tests/test_loss.py | 174 | 25 | def test_tweedie_log_identity_consistency(p):
half_tweedie_log = HalfTweedieLoss(power=p)
half_tweedie_identity = HalfTweedieLossIdentity(power=p)
n_samples = 10
y_true, raw_prediction = random_y_true_raw_prediction(
loss=half_tweedie_log, n_samples=n_samples, seed=42
)
y_pred = hal... | ENH migrate GLMs / TweedieRegressor to linear loss (#22548)
Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org>
Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> | test_tweedie_log_identity_consistency | 75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc | scikit-learn | test_loss.py | 10 | 25 | https://github.com/scikit-learn/scikit-learn.git | 1 | 155 | 0 | 109 | 255 | Python | {
"docstring": "Test for identical losses when only the link function is different.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def test_tweedie_log_identity_consistency(p):
half_tweedie_log = HalfTweedieLoss(power=p)
half_tweedie_identity = HalfTweedieLossIdentity(power=p)
n_samples = 10
y_true, raw_prediction = random_y_true_raw_prediction(
loss=half_tweedie_log, n_samples=n_samples, seed=42
)
y_pred = hal... | |
@_noconds_(True) | 48,218 | 196,851 | 306 | sympy/integrals/transforms.py | 89 | 33 | def laplace_transform(f, t, s, legacy_matrix=True, **hints):
r
debug('\n***** laplace_transform(%s, %s, %s)'%(f, t, s))
if isinstance(f, MatrixBase) and hasattr(f, 'applyfunc'):
conds = not hints.get('noconds', False)
if conds and legacy_matrix:
SymPyDeprecationWarning(
... | Fix a few docstring formatting issues | laplace_transform | 1eeb01e15f06c6692a5bfd6fd2d2a3002d864a07 | sympy | transforms.py | 17 | 85 | https://github.com/sympy/sympy.git | 7 | 196 | 1 | 71 | 315 | Python | {
"docstring": "\n Compute the Laplace Transform `F(s)` of `f(t)`,\n\n .. math :: F(s) = \\int_{0^{-}}^\\infty e^{-st} f(t) \\mathrm{d}t.\n\n Explanation\n ===========\n\n For all sensible functions, this converges absolutely in a\n half-plane\n\n .. math :: a < \\operatorname{Re}(s)\n\n This ... | def laplace_transform(f, t, s, legacy_matrix=True, **hints):
r
debug('\n***** laplace_transform(%s, %s, %s)'%(f, t, s))
if isinstance(f, MatrixBase) and hasattr(f, 'applyfunc'):
conds = not hints.get('noconds', False)
if conds and legacy_matrix:
SymPyDeprecationWarning(
... |
74,867 | 256,315 | 171 | test/benchmarks/nq_to_squad.py | 71 | 14 | def reduce_annotations(anno_types, answers):
for at in set(anno_types):
assert at in ("no_answer", "short_answer")
if anno_types.count("short_answer") >= anno_types.count("no_answer"):
majority = "short_answer"
is_impossible = False
else:
majority = "no_answer"
i... | Apply black formatting (#2115)
* Testing black on ui/
* Applying black on docstores
* Add latest docstring and tutorial changes
* Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too
* Remove comments
* Relax constraints on pydoc-ma... | reduce_annotations | a59bca366174d9c692fa19750c24d65f47660ef7 | haystack | nq_to_squad.py | 10 | 20 | https://github.com/deepset-ai/haystack.git | 6 | 112 | 0 | 45 | 191 | Python | {
"docstring": "\n In cases where there is annotator disagreement, this fn picks either only the short_answers or only the no_answers,\n depending on which is more numerous, with a bias towards picking short_answers.\n\n Note: By this stage, all long_answer annotations and all samples with yes/no answer have... | def reduce_annotations(anno_types, answers):
for at in set(anno_types):
assert at in ("no_answer", "short_answer")
if anno_types.count("short_answer") >= anno_types.count("no_answer"):
majority = "short_answer"
is_impossible = False
else:
majority = "no_answer"
i... | |
107,162 | 308,405 | 222 | homeassistant/components/mqtt/cover.py | 35 | 22 | async def async_open_cover(self, **kwargs):
await mqtt.async_publish(
self.ha | Add mqtt encoding support for publishing (#62739)
* encoding support for mqtt publishing - todo tests
* signature allows None values for qos and retain
* common test for mqtt publishing encoding
* better test with command templates
* more tests
* fix tests alarm control panel+tests light basic
* te... | async_open_cover | d0c4f0fec4216e4193da716001b5e13e1e3f2106 | core | cover.py | 14 | 16 | https://github.com/home-assistant/core.git | 3 | 98 | 0 | 32 | 150 | Python | {
"docstring": "Move the cover up.\n\n This method is a coroutine.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 9,
"vocab_size": 9
} | async def async_open_cover(self, **kwargs):
await mqtt.async_publish(
self.hass,
self._config.get(CONF_COMMAND_TOPIC),
self._config[CONF_PAYLOAD_OPEN],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],... | |
3,191 | 20,042 | 176 | pipenv/patched/notpip/_vendor/distro.py | 43 | 10 | def _parse_distro_release_content(line):
# type: (str) -> Dict[str, str]
matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1])
distro_info = {}
if matches:
# regexp ensures non-None
distro_info["name"] = matches.group(3 | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | _parse_distro_release_content | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | distro.py | 13 | 12 | https://github.com/pypa/pipenv.git | 5 | 109 | 0 | 32 | 201 | Python | {
"docstring": "\n Parse a line from a distro release file.\n\n Parameters:\n * line: Line from the distro release file. Must be a unicode string\n or a UTF-8 encoded byte string.\n\n Returns:\n A dictionary containing all information items.\n ",
"languag... | def _parse_distro_release_content(line):
# type: (str) -> Dict[str, str]
matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1])
distro_info = {}
if matches:
# regexp ensures non-None
distro_info["name"] = matches.group(3)[::-1]
... | |
6,866 | 37,755 | 333 | src/transformers/modeling_utils.py | 167 | 45 | def load_sharded_checkpoint(model, folder, strict=True):
# Load the index
index_file = os.path.join(folder, WEIGHTS_INDEX_NAME)
if not os.path.isfile(index_file):
raise ValueError(f"Can't find a checkpoint index ({WEIGHTS_INDEX_NAME}) in {folder}.")
with open(index_file, "r", encoding="utf... | Make Trainer compatible with sharded checkpoints (#17053)
* Make Trainer compatible with sharded checkpoints
* Add doc | load_sharded_checkpoint | a8fa2f91f409a0657937016b983b74f58a07ae72 | transformers | modeling_utils.py | 15 | 26 | https://github.com/huggingface/transformers.git | 14 | 264 | 0 | 104 | 468 | Python | {
"docstring": "\n This is the same as\n [`torch.nn.Module.load_state_dict`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html?highlight=load_state_dict#torch.nn.Module.load_state_dict)\n but for a sharded checkpoint.\n\n This load is performed efficiently: each checkpoint shard is loaded one... | def load_sharded_checkpoint(model, folder, strict=True):
# Load the index
index_file = os.path.join(folder, WEIGHTS_INDEX_NAME)
if not os.path.isfile(index_file):
raise ValueError(f"Can't find a checkpoint index ({WEIGHTS_INDEX_NAME}) in {folder}.")
with open(index_file, "r", encoding="utf... | |
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs | 13,980 | 65,661 | 62 | erpnext/controllers/queries.py | 86 | 27 | def customer_query(doctype, txt, searchfield, start, page_len, filters):
conditions = []
cust_master_name = frappe.defaults.get_user_default("cust_master_name")
if cust_master_name == "Customer Name":
fields = ["name", "customer_group", "territory"]
else:
fields = ["name", "customer_name", "customer_group", "t... | style: format code with black | customer_query | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | queries.py | 16 | 30 | https://github.com/frappe/erpnext.git | 3 | 172 | 1 | 69 | 322 | Python | {
"docstring": "select {fields} from `tabCustomer`\n\t\twhere docstatus < 2\n\t\t\tand ({scond}) and disabled=0\n\t\t\t{fcond} {mcond}\n\t\torder by\n\t\t\tif(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),\n\t\t\tif(locate(%(_txt)s, customer_name), locate(%(_txt)s, customer_name), 99999),\n\t\t\tidx desc,\n\... | def customer_query(doctype, txt, searchfield, start, page_len, filters):
conditions = []
cust_master_name = frappe.defaults.get_user_default("cust_master_name")
if cust_master_name == "Customer Name":
fields = ["name", "customer_group", "territory"]
else:
fields = ["name", "customer_name", "customer_group", "t... |
18,042 | 85,773 | 21 | src/sentry/tagstore/base.py | 7 | 6 | def get_group_tag_value_count(self, group, environment_id, key):
raise No | feat(perf_issues): Fix `GroupTagKeyDetailsEndpoint` to work for performance issues (#38860)
This allows this endpoint to return results for performance issues. | get_group_tag_value_count | 72e351082168f68cbaa5700a51e8ed577222e887 | sentry | base.py | 6 | 2 | https://github.com/getsentry/sentry.git | 1 | 14 | 0 | 7 | 22 | Python | {
"docstring": "\n >>> get_group_tag_value_count(group, 3, 'key1')\n ",
"language": "en",
"n_whitespaces": 19,
"n_words": 4,
"vocab_size": 4
} | def get_group_tag_value_count(self, group, environment_id, key):
raise NotImplementedError
| |
9,144 | 47,522 | 174 | tests/jobs/test_scheduler_job.py | 47 | 35 | def test_enqueue_task_instances_sets_ti_state_to_None_if_dagrun_in_finish_state(self, state, dag_maker):
dag_i | Replace usage of `DummyOperator` with `EmptyOperator` (#22974)
* Replace usage of `DummyOperator` with `EmptyOperator` | test_enqueue_task_instances_sets_ti_state_to_None_if_dagrun_in_finish_state | 49e336ae0302b386a2f47269a6d13988382d975f | airflow | test_scheduler_job.py | 11 | 17 | https://github.com/apache/airflow.git | 1 | 139 | 0 | 38 | 233 | Python | {
"docstring": "This tests that task instances whose dagrun is in finished state are not queued",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 14
} | def test_enqueue_task_instances_sets_ti_state_to_None_if_dagrun_in_finish_state(self, state, dag_maker):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
session = settings.Session()
with dag_maker(dag_id=dag_id, start_date=DEFAULT_DA... | |
12,746 | 61,907 | 633 | .venv/lib/python3.8/site-packages/pip/_vendor/distlib/compat.py | 155 | 21 | def match_hostname(cert, hostname):
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.... | upd; format | match_hostname | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | compat.py | 15 | 30 | https://github.com/jindongwang/transferlearning.git | 12 | 166 | 0 | 106 | 314 | Python | {
"docstring": "Verify that *cert* (in decoded format as returned by\n SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125\n rules are followed, but IP addresses are not accepted for *hostname*.\n\n CertificateError is raised on failure. On success, the function\n retur... | def match_hostname(cert, hostname):
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.... | |
22,008 | 104,860 | 112 | src/datasets/iterable_dataset.py | 18 | 19 | def take(self, n) -> "IterableDataset":
ex_iterable = TakeExamplesIterable(self._ex_iterable, n)
return iterable_dataset(
ex_iterable=ex_iterable,
info=self._info.copy(),
split=self._split,
format_type=self._format_type,
shuffling=copy... | Stream private zipped images (#4173)
* keep track of repo_id and token to decode remote images
* add test
* fix
* docstrings + comments
* fix string_to_dict
* fix tests | take | f51b6994db27ea69261ef919fb7775928f9ec10b | datasets | iterable_dataset.py | 11 | 29 | https://github.com/huggingface/datasets.git | 1 | 67 | 0 | 17 | 106 | Python | {
"docstring": "\n Create a new IterableDataset with only the first ``n`` elements.\n\n Args:\n n (:obj:`int`): number of elements to take.\n\n Example:\n\n ```py\n >>> from datasets import load_dataset\n >>> ds = load_dataset(\"rotten_tomatoes\", split=\"train\", ... | def take(self, n) -> "IterableDataset":
ex_iterable = TakeExamplesIterable(self._ex_iterable, n)
return iterable_dataset(
ex_iterable=ex_iterable,
info=self._info.copy(),
split=self._split,
format_type=self._format_type,
shuffling=copy... | |
19,068 | 94,333 | 615 | tests/sentry/event_manager/test_event_manager.py | 66 | 29 | def test_category_match_group(self):
from sentry.grouping.enhancer import Enhancements
enhancement = Enhancements.from_config_string(
,
)
event = make_event(
platform="native",
exception={
"values": [
{
... | test(event_manager): Fix incorrect invocations of manager.save (#36615) | test_category_match_group | 39cfdcb446e74732c67ce07d7dd8d8d5ace471b1 | sentry | test_event_manager.py | 20 | 38 | https://github.com/getsentry/sentry.git | 1 | 154 | 0 | 47 | 265 | Python | {
"docstring": "\n Regression test to ensure categories are applied consistently and don't\n produce hash mismatches.\n \n function:foo category=foo_like\n category:foo_like -group\n ",
"language": "en",
"n_whitespaces": 73,
"n_words": 17,
"vocab_size": 17... | def test_category_match_group(self):
from sentry.grouping.enhancer import Enhancements
enhancement = Enhancements.from_config_string(
,
)
event = make_event(
platform="native",
exception={
"values": [
{
... | |
19,326 | 96,559 | 366 | src/sentry/plugins/bases/notify.py | 48 | 26 | def notify(self, notification, raise_exception=False):
event = notification.event
try:
return self.notify_users(
event.group, event, triggering_rules=[r.label for r in notification.rules]
)
except (
ApiError,
HTTPError,
... | fix(plugins): Silence error (#32042) | notify | 542484c0cd71625e62e086f3f7c5aaf85360f724 | sentry | notify.py | 16 | 26 | https://github.com/getsentry/sentry.git | 4 | 114 | 0 | 45 | 175 | Python | {
"docstring": "\n This calls the notify_users method of the plugin.\n Normally this method eats the error and logs it but if we\n set raise_exception=True like we do for the test plugin button,\n the exception is raised\n ",
"language": "en",
"n_whitespaces": 70,
"n_words": 3... | def notify(self, notification, raise_exception=False):
event = notification.event
try:
return self.notify_users(
event.group, event, triggering_rules=[r.label for r in notification.rules]
)
except (
ApiError,
HTTPError,
... | |
17,345 | 82,298 | 51 | cms/tests/test_rendering.py | 16 | 14 | def test_processors(self):
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from cms.plugin_pool import plugin_pool
instance = CMSPlugin.objects.all()[0].get_plugin_instance()[0]
load_from_string = self.load_template_from_string
| Enabled isort workflow (#7200)
* Ran isort
* Enabled isort workflow
Co-authored-by: Vinit Kumar <mail@vinitkumar.me> | test_processors | a3110e1ff24085373898c7d2a85f628abeb8518d | django-cms | test_rendering.py | 13 | 27 | https://github.com/django-cms/django-cms.git | 1 | 169 | 0 | 13 | 69 | Python | {
"docstring": "\n Tests that plugin processors and plugin context processors can be defined\n in settings and are working and that extra plugin context processors can be\n passed to PluginContext.\n ",
"language": "en",
"n_whitespaces": 56,
"n_words": 27,
"vocab_size": 17
} | def test_processors(self):
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from cms.plugin_pool import plugin_pool
instance = CMSPlugin.objects.all()[0].get_plugin_instance()[0]
load_from_string = self.load_template_from_string
| |
115,025 | 316,447 | 25 | tests/test_config_entries.py | 13 | 9 | async def test_unique_id_ignore(hass, manager):
async_setup_entry = AsyncMock(return_value=False)
mock_integration(hass, MockModule("comp", async_setup_entry=async_setup_entry))
mock_entity_platform(hass, "config_flow.comp", None)
| Search/replace RESULT_TYPE_* by FlowResultType enum (#74642) | test_unique_id_ignore | 7cd68381f1d4f58930ffd631dfbfc7159d459832 | core | test_config_entries.py | 10 | 24 | https://github.com/home-assistant/core.git | 1 | 185 | 0 | 13 | 63 | Python | {
"docstring": "Test that we can ignore flows that are in progress and have a unique ID.",
"language": "en",
"n_whitespaces": 14,
"n_words": 15,
"vocab_size": 14
} | async def test_unique_id_ignore(hass, manager):
async_setup_entry = AsyncMock(return_value=False)
mock_integration(hass, MockModule("comp", async_setup_entry=async_setup_entry))
mock_entity_platform(hass, "config_flow.comp", None)
| |
53,448 | 212,840 | 58 | PySimpleGUI.py | 19 | 10 | def bind(self, bind_string, key, propagate=True):
if not self._is_window_created('tried Window.bind'):
return
self.TKroot.bind(bind_string, lambda evt: self._user_bind_callback(bind_string, evt, propagate))
self.user_bind_d | Added propagate parameter to the Element.bind and Window.bind methods. Indicates whether tkinter should propagate the event to the corresponding element/window or stop with the user callback | bind | b3680477c755277192715b343e9cd4254de7c45e | PySimpleGUI | PySimpleGUI.py | 10 | 5 | https://github.com/PySimpleGUI/PySimpleGUI.git | 2 | 54 | 0 | 19 | 85 | Python | {
"docstring": "\n Used to add tkinter events to a Window.\n The tkinter specific data is in the Window's member variable user_bind_event\n :param bind_string: The string tkinter expected in its bind function\n :type bind_string: (str)\n :param key: The event that will be g... | def bind(self, bind_string, key, propagate=True):
if not self._is_window_created('tried Window.bind'):
return
self.TKroot.bind(bind_string, lambda evt: self._user_bind_callback(bind_string, evt, propagate))
self.user_bind_dict[bind_string] = key
| |
52,997 | 211,000 | 759 | ppdet/modeling/heads/cascade_head.py | 167 | 52 | def forward(self, body_feats=None, rois=None, rois_num=None, inputs=None):
targets = []
if self.training:
rois, rois_num, targets = self.bbox_assigner(rois, rois_num, inputs)
targets_list = [targets]
self.assigned_rois = (rois, rois_num)
self.assi... | upgrade cascade model (#6346)
* add reg_class_agnostic
* add loss_rpn_bbox | forward | d409ec06779e9de0cdbd76af4dc2c00b4b58ccb0 | PaddleDetection | cascade_head.py | 17 | 41 | https://github.com/PaddlePaddle/PaddleDetection.git | 10 | 390 | 0 | 107 | 585 | Python | {
"docstring": "\n body_feats (list[Tensor]): Feature maps from backbone\n rois (Tensor): RoIs generated from RPN module\n rois_num (Tensor): The number of RoIs in each image\n inputs (dict{Tensor}): The ground-truth of image\n ",
"language": "en",
"n_whitespaces": 64,
"n_word... | def forward(self, body_feats=None, rois=None, rois_num=None, inputs=None):
targets = []
if self.training:
rois, rois_num, targets = self.bbox_assigner(rois, rois_num, inputs)
targets_list = [targets]
self.assigned_rois = (rois, rois_num)
self.assi... | |
56,997 | 223,601 | 24 | python3.10.4/Lib/email/_header_value_parser.py | 12 | 7 | def get_fws(value):
newvalue = value.lstrip()
fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')
return fws, newvalue
| add python 3.10.4 for windows | get_fws | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | _header_value_parser.py | 13 | 4 | https://github.com/XX-net/XX-Net.git | 1 | 37 | 0 | 10 | 64 | Python | {
"docstring": "FWS = 1*WSP\n\n This isn't the RFC definition. We're using fws to represent tokens where\n folding can be done, but when we are parsing the *un*folding has already\n been done so we don't need to watch out for CRLF.\n\n ",
"language": "en",
"n_whitespaces": 52,
"n_words": 39,
"voc... | def get_fws(value):
newvalue = value.lstrip()
fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')
return fws, newvalue
| |
37,386 | 158,218 | 147 | d2l/mxnet.py | 42 | 21 | def load_data_snli(batch_size, num_steps=50):
num_workers = d2l.get_dataloader_workers()
data_dir = d2l.download_extract('SNLI')
train_data = read_snli(data_dir, True)
test_data = read_snli(data_dir, False)
train_set = SNLIDataset(train_data, | [PaddlePaddle] Merge master into Paddle branch (#1186)
* change 15.2 title in chinese version (#1109)
change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘
* 修改部分语义表述 (#1105)
* Update r0.17.5 (#1120)
* Bump versions in installation
* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)
* line 313: "b... | load_data_snli | b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2 | d2l-zh | mxnet.py | 9 | 12 | https://github.com/d2l-ai/d2l-zh.git | 1 | 109 | 0 | 32 | 165 | Python | {
"docstring": "Download the SNLI dataset and return data iterators and vocabulary.\n\n Defined in :numref:`sec_natural-language-inference-and-dataset`",
"language": "en",
"n_whitespaces": 15,
"n_words": 13,
"vocab_size": 12
} | def load_data_snli(batch_size, num_steps=50):
num_workers = d2l.get_dataloader_workers()
data_dir = d2l.download_extract('SNLI')
train_data = read_snli(data_dir, True)
test_data = read_snli(data_dir, False)
train_set = SNLIDataset(train_data, num_steps)
test_set = SNLIDataset(test_data, num... | |
34,972 | 151,197 | 189 | freqtrade/freqai/utils.py | 84 | 34 | def plot_feature_importance(model, feature_names, pair, train_dir, count_max=50) -> None:
try:
import plotly.graph_objects as go
from plotly.subplots import make_subplots
except ImportError:
logger.exception("Module plotly not found \n Please install using `pip3 install plotly`")
... | plot features as html instead of png | plot_feature_importance | 86aa875bc9d5edeba04f908fe45b011e52045c83 | freqtrade | utils.py | 13 | 37 | https://github.com/freqtrade/freqtrade.git | 4 | 229 | 0 | 67 | 261 | Python | {
"docstring": "\n Plot Best and Worst Features by importance for CatBoost model.\n Called once per sub-train.\n Usage: plot_feature_importance(\n model=model,\n feature_names=dk.training_features_list,\n pair=pair,\n train_dir=dk.data_path)\n ",
"... | def plot_feature_importance(model, feature_names, pair, train_dir, count_max=50) -> None:
try:
import plotly.graph_objects as go
from plotly.subplots import make_subplots
except ImportError:
logger.exception("Module plotly not found \n Please install using `pip3 install plotly`")
... | |
51,034 | 205,205 | 95 | django/db/backends/sqlite3/introspection.py | 23 | 12 | def get_primary_key_column(self, cursor, table_name):
cursor.execute(
"PRAGMA table_info(%s)" % self.connection.ops.quote_name(table_name)
)
for _, name, *_, pk in cursor.fetchall():
if pk:
return name
return | Refs #33476 -- Reformatted code with Black. | get_primary_key_column | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | introspection.py | 12 | 8 | https://github.com/django/django.git | 3 | 50 | 0 | 22 | 80 | Python | {
"docstring": "Return the column name of the primary key for the given table.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 10
} | def get_primary_key_column(self, cursor, table_name):
cursor.execute(
"PRAGMA table_info(%s)" % self.connection.ops.quote_name(table_name)
)
for _, name, *_, pk in cursor.fetchall():
if pk:
return name
return None
| |
17,455 | 82,603 | 301 | cms/utils/setup.py | 95 | 14 | def validate_settings():
try:
django_backend = [x for x in settings.TEMPLATES
if x['BACKEND'] == 'django.template.backends.django.DjangoTemplates'][0]
except IndexError:
raise ImproperlyConfigured(
"django CMS requires django.template.context_processors... | fix: Adds a deprecation warning for SEND_BROKEN_LINK_EMAILS (#7420)
* Fix: toolbar bug 3.10.rc1
* Feat: Dark mode support, including input from @marksweb, bugfix for tooltips
* Upstream change to be able to merge
* Feat: Dark mode support, including input from @marksweb, bugfix for tooltips
* Revert "Fix:... | validate_settings | d38f4a1cc7fc6b9e06a01622dd584329b73b410d | django-cms | setup.py | 14 | 21 | https://github.com/django-cms/django-cms.git | 8 | 108 | 0 | 68 | 201 | Python | {
"docstring": "\n Check project settings file for required options\n ",
"language": "en",
"n_whitespaces": 14,
"n_words": 7,
"vocab_size": 7
} | def validate_settings():
try:
django_backend = [x for x in settings.TEMPLATES
if x['BACKEND'] == 'django.template.backends.django.DjangoTemplates'][0]
except IndexError:
raise ImproperlyConfigured(
"django CMS requires django.template.context_processors... | |
35,209 | 152,965 | 44 | modin/config/envvars.py | 16 | 4 | def get(cls):
min_partition_size = super().get()
assert min_partition_size > 0, "`min_partition_size` should be > 0"
return min_partition_size
| REFACTOR-#3768: change 'compute_chunksize' signature (#3769)
Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru>
Signed-off-by: Anatoly Myachev <anatoly.myachev@intel.com> | get | 0bdc482d6f1682e103b4c4d7ee7c4d505d2d3b1c | modin | envvars.py | 10 | 4 | https://github.com/modin-project/modin.git | 1 | 23 | 0 | 13 | 42 | Python | {
"docstring": "\n Get ``MinPartitionSize`` with extra checks.\n\n Returns\n -------\n int\n ",
"language": "en",
"n_whitespaces": 44,
"n_words": 8,
"vocab_size": 8
} | def get(cls):
min_partition_size = super().get()
assert min_partition_size > 0, "`min_partition_size` should be > 0"
return min_partition_size
| |
27,510 | 124,089 | 531 | python/ray/tune/examples/pbt_function.py | 207 | 25 | def pbt_function(config):
lr = config["lr"]
accuracy = 0.0 # end = 1000
start = 0
if session.get_checkpoint():
state = session.get_checkpoint().to_dict()
accuracy = state["acc"]
start = state["step"]
midpoint = 100 # lr starts decreasing after acc > midpoint
q_tol... | [air] update documentation to use `session.report` (#26051)
Update documentation to use `session.report`.
Next steps:
1. Update our internal caller to use `session.report`. Most importantly, CheckpointManager and DataParallelTrainer.
2. Update `get_trial_resources` to use PGF notions to incorporate the requiremen... | pbt_function | ac831fded416381ad3c7fe2ba135eaa4aaab5879 | ray | pbt_function.py | 16 | 37 | https://github.com/ray-project/ray.git | 7 | 253 | 0 | 114 | 407 | Python | {
"docstring": "Toy PBT problem for benchmarking adaptive learning rate.\n\n The goal is to optimize this trainable's accuracy. The accuracy increases\n fastest at the optimal lr, which is a function of the current accuracy.\n\n The optimal lr schedule for this problem is the triangle wave as follows.\n N... | def pbt_function(config):
lr = config["lr"]
accuracy = 0.0 # end = 1000
start = 0
if session.get_checkpoint():
state = session.get_checkpoint().to_dict()
accuracy = state["acc"]
start = state["step"]
midpoint = 100 # lr starts decreasing after acc > midpoint
q_tol... | |
84,764 | 284,498 | 77 | openbb_terminal/stocks/options/hedge/hedge_model.py | 53 | 12 | def add_hedge_option(price, implied_volatility, strike, days, side):
# Determine delta position given the option
delta = calc_delta(price, implied_volatility, strike, days, 0, side)
# Determine gamma position given the option
gamma = calc_gamma(price, implied_volatility, strike, days, 0)
# De... | Feature/hedge (#1768)
* [Bug] Incorrect log for reddit keys. #1733 fix
* Create new feature-hedge
* Significantly improve code of hedge menu
* More robust
* Robustness
* Fix tests
* Fix can't multiply sequence by non-int of type 'numpy.float64' error
* Temporary fix of singular matrix error. Retur... | add_hedge_option | 54a1b6f545a0016c576e9e00eef5c003d229dacf | OpenBBTerminal | hedge_model.py | 8 | 5 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 1 | 64 | 0 | 25 | 88 | Python | {
"docstring": "Determine the delta, gamma and vega value of the portfolio and/or options.\n\n Parameters\n ----------\n price: int\n The price.\n implied_volatility: float\n The implied volatility.\n strike: float\n The strike price.\n days: float\n The amount of days un... | def add_hedge_option(price, implied_volatility, strike, days, side):
# Determine delta position given the option
delta = calc_delta(price, implied_volatility, strike, days, 0, side)
# Determine gamma position given the option
gamma = calc_gamma(price, implied_volatility, strike, days, 0)
# De... | |
57,028 | 223,645 | 93 | python3.10.4/Lib/email/charset.py | 33 | 9 | def header_encode(self, string):
codec = self.output_codec or 'us-ascii'
header_bytes = _encode(string, codec)
# 7bit/8bit encodings return the string unchanged (modulo conversions)
encoder_module = self._get_encoder(header_bytes)
if encoder_module is None:
r... | add python 3.10.4 for windows | header_encode | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | charset.py | 8 | 7 | https://github.com/XX-net/XX-Net.git | 3 | 47 | 0 | 26 | 78 | Python | {
"docstring": "Header-encode a string by converting it first to bytes.\n\n The type of encoding (base64 or quoted-printable) will be based on\n this charset's `header_encoding`.\n\n :param string: A unicode string for the header. It must be possible\n to encode this string to bytes u... | def header_encode(self, string):
codec = self.output_codec or 'us-ascii'
header_bytes = _encode(string, codec)
# 7bit/8bit encodings return the string unchanged (modulo conversions)
encoder_module = self._get_encoder(header_bytes)
if encoder_module is None:
r... | |
7,639 | 42,583 | 720 | nltk/corpus/reader/bcp47.py | 137 | 20 | def data_dict(self, records):
self.version = records[0].replace("File-Date:", "").strip()
dic = {}
dic["deprecated"] = {}
for label in [
"language",
"extlang",
"script",
"region",
"variant",
"redundant",
... | Support both iso639-3 codes and BCP-47 language tags (#3060)
* Add support for iso639-3 language codes
* Add support for retired language codes
* Move langnames.py to the top-level
* Add langcode() function
* Add iso639retired dictionary
* Improve wrapper functions
* Add module docstring with doctest... | data_dict | f019fbedb3d2b6a2e6b58ec1b38db612b106568b | nltk | bcp47.py | 17 | 44 | https://github.com/nltk/nltk.git | 14 | 294 | 0 | 75 | 484 | Python | {
"docstring": "Convert the BCP-47 language subtag registry to a dictionary",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def data_dict(self, records):
self.version = records[0].replace("File-Date:", "").strip()
dic = {}
dic["deprecated"] = {}
for label in [
"language",
"extlang",
"script",
"region",
"variant",
"redundant",
... | |
90,844 | 291,740 | 11 | tests/test_core.py | 5 | 6 | def test_async_add_hass_job_schedule_partial_coroutinefunction(event_loop):
| Upgrade pytest-aiohttp (#82475)
* Upgrade pytest-aiohttp
* Make sure executors, tasks and timers are closed
Some test will trigger warnings on garbage collect, these warnings
spills over into next test.
Some test trigger tasks that raise errors on shutdown, these spill
over into next test.
This is to mim... | test_async_add_hass_job_schedule_partial_coroutinefunction | c576a68d336bc91fd82c299d9b3e5dfdc1c14960 | core | test_core.py | 12 | 8 | https://github.com/home-assistant/core.git | 1 | 82 | 0 | 5 | 34 | Python | {
"docstring": "Test that we schedule partial coros and add jobs to the job pool.",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | def test_async_add_hass_job_schedule_partial_coroutinefunction(event_loop):
hass = MagicMock(loop=MagicMock(wraps=event_loop))
| |
75,208 | 258,256 | 216 | haystack/utils/squad_data.py | 36 | 22 | def to_label_objs(self, answer_type="generative"):
df_labels = self.df[["id", "question", "answer_text", "answer_start", "context", "document_id"]]
record_dicts = df_labels.to_dict("records")
labels = [
Label(
query=record["question"],
answer=... | refactor: update Squad data (#3513)
* refractor the to_squad data class
* fix the validation label
* refractor the to_squad data class
* fix the validation label
* add the test for the to_label object function
* fix the tests for to_label_objects
* move all the test related to squad data to one file... | to_label_objs | d114a994f1af71d3721cecd14da6f6b4592043b8 | haystack | squad_data.py | 17 | 16 | https://github.com/deepset-ai/haystack.git | 2 | 124 | 0 | 32 | 206 | Python | {
"docstring": "Export all labels stored in this object to haystack.Label objects",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def to_label_objs(self, answer_type="generative"):
df_labels = self.df[["id", "question", "answer_text", "answer_start", "context", "document_id"]]
record_dicts = df_labels.to_dict("records")
labels = [
Label(
query=record["question"],
answer=... | |
116,994 | 319,781 | 75 | src/documents/tests/test_api.py | 19 | 13 | def test_api_get_storage_path(self):
response = self.client.get("/api/storage_paths/", format="json")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["count"], 1)
resp_storage_path = response.data["... | Increases test coverage of storage paths | test_api_get_storage_path | 53baed03895f28f24113d376b089e3ef281b34ed | paperless-ngx | test_api.py | 10 | 8 | https://github.com/paperless-ngx/paperless-ngx.git | 1 | 94 | 0 | 16 | 155 | Python | {
"docstring": "\n GIVEN:\n - API request to get all storage paths\n WHEN:\n - API is called\n THEN:\n - Existing storage paths are returned\n ",
"language": "en",
"n_whitespaces": 83,
"n_words": 21,
"vocab_size": 16
} | def test_api_get_storage_path(self):
response = self.client.get("/api/storage_paths/", format="json")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["count"], 1)
resp_storage_path = response.data["... | |
76,249 | 260,439 | 52 | sklearn/manifold/tests/test_mds.py | 33 | 13 | def test_normalize_metric_warning():
msg = "Normalized stress is not supported"
sim = np.array([[0, 5, 3, 4], | ENH Calculate normed stress (Stress-1) in `manifold.MDS` (#22562)
Co-authored-by: Chiara Marmo <cmarmo@users.noreply.github.com>
Co-authored-by: Roth E Conrad <rotheconrad@gatech.edu>
Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> | test_normalize_metric_warning | ae51c13af76af206e6815d0ca0d0052f73167caa | scikit-learn | test_mds.py | 10 | 5 | https://github.com/scikit-learn/scikit-learn.git | 1 | 82 | 0 | 29 | 117 | Python | {
"docstring": "\n Test that a UserWarning is emitted when using normalized stress with\n metric-MDS.\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 12,
"vocab_size": 12
} | def test_normalize_metric_warning():
msg = "Normalized stress is not supported"
sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]])
with pytest.raises(ValueError, match=msg):
mds.smacof(sim, metric=True, normalized_stress=True)
| |
13,576 | 64,188 | 32 | erpnext/patches/v13_0/add_bin_unique_constraint.py | 54 | 25 | def delete_and_patch_duplicate_bins():
duplicate_bins = frappe.db.sql(, as_dict=1)
for duplicate_bin in duplicate_bins:
existing_bins = frappe.get_list("Bin",
filters={
"item_code": duplicate_bin.item_code,
"warehouse": duplicate_bin.warehouse
},
fields=["name"],
order_by="creation",)
... | refactor: patch for fixing broken bins
fix(patch): delete fully broken bins
if bin doesn't have item_code or warehouse then it's not recoverable. | delete_and_patch_duplicate_bins | c2ecc7a2d1da839423fd768821b1f77ddcf7f53d | erpnext | add_bin_unique_constraint.py | 14 | 30 | https://github.com/frappe/erpnext.git | 3 | 158 | 0 | 47 | 254 | Python | {
"docstring": "\n\t\tSELECT\n\t\t\titem_code, warehouse, count(*) as bin_count\n\t\tFROM\n\t\t\ttabBin\n\t\tGROUP BY\n\t\t\titem_code, warehouse\n\t\tHAVING\n\t\t\tbin_count > 1\n\t",
"language": "en",
"n_whitespaces": 8,
"n_words": 16,
"vocab_size": 14
} | def delete_and_patch_duplicate_bins():
duplicate_bins = frappe.db.sql(, as_dict=1)
for duplicate_bin in duplicate_bins:
existing_bins = frappe.get_list("Bin",
filters={
"item_code": duplicate_bin.item_code,
"warehouse": duplicate_bin.warehouse
},
fields=["name"],
order_by="creation",)
... | |
@pytest.mark.asyncio | 28,504 | 127,689 | 269 | dashboard/modules/job/tests/test_job_agent.py | 74 | 40 | async def test_stop_long_running_job(job_sdk_client):
agent_client, head_client = job_sdk_client
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir)
driver_script =
test_script_file = path / "test_script.py"
with open(test_script_file, "w+") as file:
... | [Job Submission][refactor 4/N] Complete the remaining interfaces on JobAgent (#28533)
Signed-off-by: Catch-Bull <burglarralgrub@gmail.com>
just need to implement stop_job, and I remove get_job_info because we can access JobInfoStorage without call `ray.init`. | test_stop_long_running_job | 8840be1942a69b2595a05c5c5556b0daec7abbcd | ray | test_job_agent.py | 13 | 30 | https://github.com/ray-project/ray.git | 1 | 152 | 1 | 57 | 269 | Python | {
"docstring": "\n Submit a job that runs for a while and stop it in the middle.\n \nprint('Hello !')\nimport time\ntime.sleep(300) # This should never finish\nraise RuntimeError('Intentionally failed.')\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 27,
"vocab_size": 26
} | async def test_stop_long_running_job(job_sdk_client):
agent_client, head_client = job_sdk_client
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir)
driver_script =
test_script_file = path / "test_script.py"
with open(test_script_file, "w+") as file:
... |
83,776 | 281,459 | 48 | gamestonk_terminal/cryptocurrency/due_diligence/dd_controller.py | 20 | 12 | def print_help(self):
source_txt = CRYPTO_SOURCES.get(self.source, "?") if self.source != "" else ""
help_text = f
console.print(text=help_text, menu="Stocks - Due Dil | Terminal Wide Rich (#1161)
* My idea for how we handle Rich moving forward
* remove independent consoles
* FIxed pylint issues
* add a few vars
* Switched print to console
* More transitions
* Changed more prints
* Replaced all prints
* Fixing tabulate
* Finished replace tabulate
* Finish... | print_help | 82747072c511beb1b2672846ae2ee4aec53eb562 | OpenBBTerminal | dd_controller.py | 10 | 41 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 2 | 42 | 0 | 18 | 86 | Python | {
"docstring": "Print help[cmds]\n load load a specific cryptocurrency for analysis\n\n[param]Coin: [/param]{self.current_coin}\n[param]Source: [/param]{source_txt}\n\n[src]Glassnode[/src]\n active active addresses\n nonzero addresses with non-zero balances\n change 30d chang... | def print_help(self):
source_txt = CRYPTO_SOURCES.get(self.source, "?") if self.source != "" else ""
help_text = f
console.print(text=help_text, menu="Stocks - Due Diligence")
| |
18,522 | 89,255 | 225 | tests/sentry/integrations/github/test_client.py | 38 | 20 | def test_get_cached_repo_files_with_all_files(self):
responses.add(
method=responses.GET, | feat(derive-code-mappings): Add caching support for fetching files (#41777)
This improves the readability of the code and separates caching logic to their respective functions.
This allows getting files for a repo with caching support without having to call `get_trees_for_org`.
There will be a follow up PR to improv... | test_get_cached_repo_files_with_all_files | 07558e31bd672fab58cff55cf4e9cf0e02b36654 | sentry | test_client.py | 14 | 17 | https://github.com/getsentry/sentry.git | 1 | 103 | 0 | 32 | 201 | Python | {
"docstring": "Fetch files for repo. All files rather than just source code files",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 10
} | def test_get_cached_repo_files_with_all_files(self):
responses.add(
method=responses.GET,
url=f"https://api.github.com/repos/{self.repo.name}/git/trees/master?recursive=1",
status=200,
json={
"tree": [
{"type": "blob", ... | |
50,810 | 204,604 | 421 | django/core/management/base.py | 79 | 26 | def check_migrations(self):
from django.db.migrations.executor import MigrationExecutor
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
pla... | Refs #33476 -- Reformatted code with Black. | check_migrations | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | base.py | 17 | 25 | https://github.com/django/django.git | 4 | 117 | 0 | 69 | 201 | Python | {
"docstring": "\n Print a warning if the set of migrations on disk don't match the\n migrations in the database.\n ",
"language": "en",
"n_whitespaces": 39,
"n_words": 17,
"vocab_size": 14
} | def check_migrations(self):
from django.db.migrations.executor import MigrationExecutor
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
pla... | |
41,910 | 176,449 | 232 | networkx/algorithms/chordal.py | 82 | 24 | def find_induced_nodes(G, s, t, treewidth_bound=sys.maxsize):
if not is_chordal(G):
raise nx.NetworkXError("Input graph is not chordal.")
H = nx.Graph(G)
H.add_edge(s, t)
induced_nodes = set()
triplet = _find_chordality_breaker(H, s, treewidth_bound)
while triplet:
(u, v, w... | Minor improvements from general code readthrough (#5414)
* Add deprecated directive to reversed docstring.
* Add missing dep directives to shpfiles.
* Remove defn of INF sentinel.
* typo.
* str -> comment in forloop.
* STY: appropriate casing for var name. | find_induced_nodes | cc1db275efc709cb964ce88abbfa877798d58c10 | networkx | chordal.py | 16 | 21 | https://github.com/networkx/networkx.git | 8 | 149 | 0 | 60 | 233 | Python | {
"docstring": "Returns the set of induced nodes in the path from s to t.\n\n Parameters\n ----------\n G : graph\n A chordal NetworkX graph\n s : node\n Source node to look for induced nodes\n t : node\n Destination node to look for induced nodes\n treewidth_bound: float\n ... | def find_induced_nodes(G, s, t, treewidth_bound=sys.maxsize):
if not is_chordal(G):
raise nx.NetworkXError("Input graph is not chordal.")
H = nx.Graph(G)
H.add_edge(s, t)
induced_nodes = set()
triplet = _find_chordality_breaker(H, s, treewidth_bound)
while triplet:
(u, v, w... | |
@pytest.fixture | 40,573 | 170,548 | 45 | pandas/conftest.py | 33 | 11 | def any_skipna_inferred_dtype(request):
inferred_dtype, values = request.param
values = np.array(values, dtype=object) # | STYLE fix: pylint "consider-using-from" (#49335)
* use from import
* delete empty file
Co-authored-by: carlotta <c.fabian@turbit.de>
Co-authored-by: cfabian <cfabian@student.42wolfsburg.de> | any_skipna_inferred_dtype | f9ff3796329e4bedb4a5477739f5eb8d2e40761d | pandas | conftest.py | 9 | 4 | https://github.com/pandas-dev/pandas.git | 1 | 29 | 1 | 24 | 60 | Python | {
"docstring": "\n Fixture for all inferred dtypes from _libs.lib.infer_dtype\n\n The covered (inferred) types are:\n * 'string'\n * 'empty'\n * 'bytes'\n * 'mixed'\n * 'mixed-integer'\n * 'mixed-integer-float'\n * 'floating'\n * 'integer'\n * 'decimal'\n * 'boolean'\n * 'dateti... | def any_skipna_inferred_dtype(request):
inferred_dtype, values = request.param
values = np.array(values, dtype=object) # object dtype to avoid casting
# correctness of inference tested in tests/dtypes/test_inference.py
return inferred_dtype, values
# --------------------------------------------... |
1,609 | 9,409 | 66 | reconstruction/ostec/external/stylegan2/dnnlib/tflib/ops/upfirdn_2d.py | 43 | 16 | def downsample_2d(x, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):
r
assert isinstance(factor, int) | initialize ostec | downsample_2d | 7375ee364e0df2a417f92593e09557f1b2a3575a | insightface | upfirdn_2d.py | 11 | 28 | https://github.com/deepinsight/insightface.git | 3 | 87 | 0 | 36 | 153 | Python | {
"docstring": "Downsample a batch of 2D images with the given filter.\n\n Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`\n and downsamples each image with the given filter. The filter is normalized so that\n if the input pixels are constant, they will be scaled by the specified `... | def downsample_2d(x, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):
r
assert isinstance(factor, int) and factor >= 1
if k is None:
k = [1] * factor
k = _setup_kernel(k) * gain
p = k.shape[0] - factor
return _simple_upfirdn_2d(x, k, down=factor, pad0=(p+1)//2, pad1=p//2, dat... | |
40,239 | 168,224 | 373 | pandas/core/groupby/grouper.py | 136 | 11 | def _check_deprecated_resample_kwargs(kwargs, origin):
# Deprecation warning of `base` and `loffset` since v1.1.0:
# we are raising the warning here to be able to set the `stacklevel`
# properly since we need to raise the `base` and `loffset` deprecation
# warning from three different cases:
# ... | PERF cache find_stack_level (#48023)
cache stacklevel | _check_deprecated_resample_kwargs | 2f8d0a36703e81e4dca52ca9fe4f58c910c1b304 | pandas | grouper.py | 14 | 22 | https://github.com/pandas-dev/pandas.git | 3 | 83 | 0 | 85 | 176 | Python | {
"docstring": "\n Check for use of deprecated parameters in ``resample`` and related functions.\n\n Raises the appropriate warnings if these parameters are detected.\n Only sets an approximate ``stacklevel`` for the warnings (see #37603, #36629).\n\n Parameters\n ----------\n kwargs : dict\n ... | def _check_deprecated_resample_kwargs(kwargs, origin):
# Deprecation warning of `base` and `loffset` since v1.1.0:
# we are raising the warning here to be able to set the `stacklevel`
# properly since we need to raise the `base` and `loffset` deprecation
# warning from three different cases:
# ... | |
8,400 | 44,887 | 45 | airflow/providers/google/cloud/hooks/datacatalog.py | 13 | 8 | def get_conn(self) -> DataCatalogClient:
| Extract ClientInfo to module level (#21554) | get_conn | 1b568d73e1dfb838a3a0446e3a6063b9f27f04b8 | airflow | datacatalog.py | 13 | 5 | https://github.com/apache/airflow.git | 2 | 36 | 0 | 12 | 60 | Python | {
"docstring": "Retrieves client library object that allow access to Cloud Data Catalog service.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def get_conn(self) -> DataCatalogClient:
if not self._client:
self._client = DataCatalogClient(credentials=self._get_credentials(), client_info=CLIENT_INFO)
return self._client
| |
11,591 | 56,932 | 82 | src/prefect/blocks/kubernetes.py | 13 | 8 | def get_api_client(self) -> ApiClient:
try:
return new_client_from_config_dict(
config_dict=self.config, context=self.context
)
| organizational changes for the KubernetesClusterConfig and add from_environment classmethod | get_api_client | 574d10ff7612661b37801c811862f18998521d58 | prefect | kubernetes.py | 11 | 10 | https://github.com/PrefectHQ/prefect.git | 2 | 29 | 0 | 13 | 49 | Python | {
"docstring": "\n Returns an instance of the kubernetes api client with a specific context\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 12
} | def get_api_client(self) -> ApiClient:
try:
return new_client_from_config_dict(
config_dict=self.config, context=self.context
)
except ConfigException:
raise
| |
43,822 | 182,433 | 258 | src/textual/_arrangement.py | 75 | 25 | def cuts(self) -> list[list[int]]:
if self._cuts is not None:
return self._cuts
width = self.width
height = self.height
screen_region = Region(0, 0, width, height)
cuts_sets = [{0, width} for | ws | cuts | 57a05c7bbd14728f0dbde8b8e55d6f086362c35e | textual | _arrangement.py | 16 | 23 | https://github.com/Textualize/textual.git | 9 | 143 | 0 | 51 | 218 | Python | {
"docstring": "Get vertical cuts.\n\n A cut is every point on a line where a widget starts or ends.\n\n Returns:\n list[list[int]]: A list of cuts for every line.\n ",
"language": "en",
"n_whitespaces": 58,
"n_words": 26,
"vocab_size": 23
} | def cuts(self) -> list[list[int]]:
if self._cuts is not None:
return self._cuts
width = self.width
height = self.height
screen_region = Region(0, 0, width, height)
cuts_sets = [{0, width} for _ in range(height)]
if self.map is not None:
f... | |
80,576 | 270,861 | 35 | keras/engine/base_layer_utils.py | 12 | 4 | def is_subclassed(layer):
return (
la | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | is_subclassed | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | base_layer_utils.py | 11 | 5 | https://github.com/keras-team/keras.git | 2 | 32 | 0 | 10 | 58 | Python | {
"docstring": "Returns True if the object is a subclassed layer or subclassed model.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 11
} | def is_subclassed(layer):
return (
layer.__module__.find("keras.engine") == -1
and layer.__module__.find("keras.layers") == -1
)
| |
48,249 | 196,925 | 62 | sympy/matrices/dense.py | 9 | 6 | def _mat(self):
sympy_deprecation_warning(
,
deprecated_since_version="1.9",
active_deprecations_target="deprecated-private-matrix-attributes"
)
return | Update the deprecation of the _mat and _smat Matrix properties | _mat | 0b4d5fa57d64b1102e51e03ed80013e16053bf96 | sympy | dense.py | 9 | 10 | https://github.com/sympy/sympy.git | 1 | 23 | 0 | 9 | 42 | Python | {
"docstring": "\n The private _mat attribute of Matrix is deprecated. Use the\n .flat() method instead.\n ",
"language": "en",
"n_whitespaces": 47,
"n_words": 13,
"vocab_size": 13
} | def _mat(self):
sympy_deprecation_warning(
,
deprecated_since_version="1.9",
active_deprecations_target="deprecated-private-matrix-attributes"
)
return self.flat()
| |
78,274 | 266,037 | 115 | netbox/extras/tests/test_customfields.py | 34 | 22 | def test_missing_required_field(self):
cf3 = CustomField(type=CustomFieldTypeChoices.TYPE_TEXT, name='baz', required=True)
cf3.save()
cf3.conte | Closes #10052: The cf attribute now returns deserialized custom field data | test_missing_required_field | ea6d86e6c4bb6037465410db6205a7471bc81a6c | netbox | test_customfields.py | 11 | 10 | https://github.com/netbox-community/netbox.git | 1 | 92 | 0 | 28 | 165 | Python | {
"docstring": "\n Check that a ValidationError is raised if any required custom fields are not present.\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 14,
"vocab_size": 14
} | def test_missing_required_field(self):
cf3 = CustomField(type=CustomFieldTypeChoices.TYPE_TEXT, name='baz', required=True)
cf3.save()
cf3.content_types.set([ContentType.objects.get_for_model(Site)])
site = Site(name='Test Site', slug='test-site')
# Set custom field dat... | |
16,736 | 78,230 | 40 | wagtail/admin/tests/test_templatetags.py | 11 | 9 | def test_basic(self):
context = Context({})
template =
expected =
self.assertHTMLEqual(expected, Template( | Introduce new template fragment composition tags | test_basic | 524cab82e33b43463b746c3df1a80657b3ae874a | wagtail | test_templatetags.py | 11 | 15 | https://github.com/wagtail/wagtail.git | 1 | 34 | 0 | 9 | 60 | Python | {
"docstring": "\n {% load wagtailadmin_tags %}\n {% fragment as my_fragment %}\n <p>Hello, World</p>\n {% endfragment %}\n Text coming after:\n {{ my_fragment }}\n \n Text coming after:\n <p>Hello, World</p>\n ",
... | def test_basic(self):
context = Context({})
template =
expected =
self.assertHTMLEqual(expected, Template(template).render(context))
| |
40,094 | 167,732 | 75 | pandas/core/arrays/sparse/accessor.py | 14 | 8 | def to_dense(self) -> Series:
from pandas import Series
return Series(
self._parent.array.to_dense | TYP: more return annotations in core/ (#47618)
* TYP: more return annotations in core/
* from __future__ import annotations
* more __future__ | to_dense | f65417656ba8c59438d832b6e2a431f78d40c21c | pandas | accessor.py | 11 | 32 | https://github.com/pandas-dev/pandas.git | 1 | 42 | 0 | 14 | 67 | Python | {
"docstring": "\n Convert a Series from sparse values to dense.\n\n .. versionadded:: 0.25.0\n\n Returns\n -------\n Series:\n A Series with the same values, stored as a dense array.\n\n Examples\n --------\n >>> series = pd.Series(pd.arrays.SparseAr... | def to_dense(self) -> Series:
from pandas import Series
return Series(
self._parent.array.to_dense(),
index=self._parent.index,
name=self._parent.name,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.