ast_errors stringlengths 0 3.2k | d_id int64 44 121k | id int64 70 338k | n_whitespaces int64 3 14k | path stringlengths 8 134 | n_words int64 4 4.82k | n_identifiers int64 1 131 | random_cut stringlengths 16 15.8k | commit_message stringlengths 2 15.3k | fun_name stringlengths 1 84 | commit_id stringlengths 40 40 | repo stringlengths 3 28 | file_name stringlengths 5 79 | ast_levels int64 6 31 | nloc int64 1 548 | url stringlengths 31 59 | complexity int64 1 66 | token_counts int64 6 2.13k | n_ast_errors int64 0 28 | vocab_size int64 4 1.11k | n_ast_nodes int64 15 19.2k | language stringclasses 1
value | documentation dict | code stringlengths 101 62.2k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
@router.get("/version") | 11,931 | 59,709 | 13 | src/prefect/orion/api/admin.py | 8 | 8 | async def read_settings() -> prefect.settings.Settings:
r | Add secret flag to settings and obfuscate by default when displayed (#7465) | read_settings | 902dfa4bd3b6e330e4374eb1e04de064148a2f32 | prefect | admin.py | 10 | 7 | https://github.com/PrefectHQ/prefect.git | 1 | 23 | 1 | 8 | 56 | Python | {
"docstring": "\n Get the current Orion settings.\n\n Secret setting values will be obfuscated.\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 11,
"vocab_size": 11
} | async def read_settings() -> prefect.settings.Settings:
return prefect.settings.get_current_settings().with_obfuscated_secrets()
@router.get("/version") |
36,505 | 156,012 | 59 | dask/array/core.py | 20 | 10 | def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):
from dask.array.overlap import map_overlap
return map_overlap(
| absolufy-imports - No relative - PEP8 (#8796)
Conversation in https://github.com/dask/distributed/issues/5889 | map_overlap | cccb9d8d8e33a891396b1275c2448c352ef40c27 | dask | core.py | 8 | 5 | https://github.com/dask/dask.git | 1 | 51 | 0 | 19 | 71 | Python | {
"docstring": "Map a function over blocks of the array with some overlap\n\n We share neighboring zones between blocks of the array, then map a\n function, then trim away the neighboring strips.\n\n Note that this function will attempt to automatically determine the output\n array type be... | def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):
from dask.array.overlap import map_overlap
return map_overlap(
func, self, depth=depth, boundary=boundary, trim=trim, **kwargs
)
| |
50,614 | 204,011 | 98 | django/contrib/gis/gdal/raster/source.py | 33 | 7 | def _flush(self):
# Raise an Exception if the value is being changed in read mode.
if not self._write:
raise GDALException(
"Raster needs to be opened in write mode to change values."
)
capi | Refs #33476 -- Reformatted code with Black. | _flush | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | source.py | 10 | 6 | https://github.com/django/django.git | 2 | 25 | 0 | 30 | 47 | Python | {
"docstring": "\n Flush all data from memory into the source file if it exists.\n The data that needs flushing are geotransforms, coordinate systems,\n nodata_values and pixel values. This function will be called\n automatically wherever it is needed.\n ",
"language": "en",
"n_... | def _flush(self):
# Raise an Exception if the value is being changed in read mode.
if not self._write:
raise GDALException(
"Raster needs to be opened in write mode to change values."
)
capi.flush_ds(self._ptr)
| |
55,798 | 219,783 | 185 | python3.10.4/Lib/_pydecimal.py | 82 | 10 | def _round(self, places, rounding):
if places <= 0:
raise ValueError("argument should be at least 1 in _round")
if self._is_special or not self:
return Decimal(self)
ans = self._rescale(self.adjusted()+1-places, rounding)
# it ca | add python 3.10.4 for windows | _round | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | _pydecimal.py | 14 | 9 | https://github.com/XX-net/XX-Net.git | 5 | 84 | 0 | 66 | 141 | Python | {
"docstring": "Round a nonzero, nonspecial Decimal to a fixed number of\n significant figures, using the given rounding mode.\n\n Infinities, NaNs and zeros are returned unaltered.\n\n This operation is quiet: it raises no flags, and uses no\n information from the context.\n\n ",
... | def _round(self, places, rounding):
if places <= 0:
raise ValueError("argument should be at least 1 in _round")
if self._is_special or not self:
return Decimal(self)
ans = self._rescale(self.adjusted()+1-places, rounding)
# it can happen that the rescale ... | |
4,181 | 22,104 | 24 | pipenv/patched/pip/_vendor/requests/models.py | 10 | 5 | def is_redirect(self):
r | Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir. | is_redirect | cd5a9683be69c86c8f3adcd13385a9bc5db198ec | pipenv | models.py | 8 | 2 | https://github.com/pypa/pipenv.git | 2 | 18 | 0 | 9 | 33 | Python | {
"docstring": "True if this Response is a well-formed HTTP redirect that could have\n been processed automatically (by :meth:`Session.resolve_redirects`).\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 17,
"vocab_size": 17
} | def is_redirect(self):
return "location" in self.headers and self.status_code in REDIRECT_STATI
| |
39,293 | 162,760 | 65 | research/neo_peq/legacy_frequency_response.py | 19 | 13 | def write_eqapo_graphic_eq(self, file_path, normalize=True):
file_path = os.path.abspath(file_path)
s = self.eqapo_graphic_eq(normalize=normalize)
with open(file_path, 'w', encoding='utf-8') as f:
f.write(s)
return s
| Added PEQ configs to CLI and function interfaces. Improved default value handling for PEQ parameters and added more predefined configs. Removed legacy PEQ optimization. Fixed readme write. Improved shelf filter initialization. Added plot method to PEQ. Notebook for comparing old and new optimizers. Bug fixes. | write_eqapo_graphic_eq | 9120cdffe618c6c2ff16fe6a311b6a1367efdbc8 | AutoEq | legacy_frequency_response.py | 12 | 6 | https://github.com/jaakkopasanen/AutoEq.git | 1 | 54 | 0 | 17 | 92 | Python | {
"docstring": "Writes equalization graph to a file as Equalizer APO config.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def write_eqapo_graphic_eq(self, file_path, normalize=True):
file_path = os.path.abspath(file_path)
s = self.eqapo_graphic_eq(normalize=normalize)
with open(file_path, 'w', encoding='utf-8') as f:
f.write(s)
return s
| |
43,393 | 181,605 | 142 | tests/export_tests.py | 29 | 14 | def test_export_pipeline_6():
pipeline_string = (
'DecisionTreeClassifier(SelectPercentile(input_matrix, SelectPercentile__percentile=20),'
'DecisionTreeClassifier__criterion=gini, DecisionTreeClassifier__max_depth=8,'
'DecisionTreeClassifier__min_samples_leaf=5, DecisionTreeClassifier... | Revert "Deployed 7ccda9a with MkDocs version: 1.3.0"
This reverts commit bd9629c40e01241766197119b581a99409b07068. | test_export_pipeline_6 | 388616b6247ca4ea8de4e2f340d6206aee523541 | tpot | export_tests.py | 10 | 35 | https://github.com/EpistasisLab/tpot.git | 1 | 55 | 0 | 24 | 96 | Python | {
"docstring": "Assert that exported_pipeline() generated a compile source file with random_state and data_file_path.import numpy as np\nimport pandas as pd\nfrom sklearn.feature_selection import SelectPercentile, f_classif\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipel... | def test_export_pipeline_6():
pipeline_string = (
'DecisionTreeClassifier(SelectPercentile(input_matrix, SelectPercentile__percentile=20),'
'DecisionTreeClassifier__criterion=gini, DecisionTreeClassifier__max_depth=8,'
'DecisionTreeClassifier__min_samples_leaf=5, DecisionTreeClassifier... | |
42,027 | 176,659 | 669 | networkx/algorithms/connectivity/tests/test_edge_kcomponents.py | 275 | 32 | def _check_edge_connectivity(G):
# Construct the auxiliary graph that can be used to make each k-cc or k-sub
aux_graph = EdgeComponentAuxGraph.construct(G)
# memoize the local connectivity in this graph
memo = {}
for k in it.count(1):
# Test "local" k-edge-components and k-edge-subgra... | doc: fix typos in docstring and comment (#5647) | _check_edge_connectivity | 26b7de005ac562786f72b24a73af5a59bbab6953 | networkx | test_edge_kcomponents.py | 18 | 32 | https://github.com/networkx/networkx.git | 13 | 235 | 0 | 128 | 393 | Python | {
"docstring": "\n Helper - generates all k-edge-components using the aux graph. Checks the\n both local and subgraph edge connectivity of each cc. Also checks that\n alternate methods of computing the k-edge-ccs generate the same result.\n ",
"language": "en",
"n_whitespaces": 47,
"n_words": 33,
... | def _check_edge_connectivity(G):
# Construct the auxiliary graph that can be used to make each k-cc or k-sub
aux_graph = EdgeComponentAuxGraph.construct(G)
# memoize the local connectivity in this graph
memo = {}
for k in it.count(1):
# Test "local" k-edge-components and k-edge-subgra... | |
50,000 | 201,816 | 163 | tests/backends/tests.py | 53 | 17 | def test_sequence_name_length_limits_flush(self):
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
... | Refs #33476 -- Reformatted code with Black. | test_sequence_name_length_limits_flush | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 10 | 11 | https://github.com/django/django.git | 1 | 60 | 0 | 44 | 98 | Python | {
"docstring": "\n Sequence resetting as part of a flush with model with long name and\n long pk name doesn't error (#8901).\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 16
} | def test_sequence_name_length_limits_flush(self):
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
... | |
72,616 | 249,109 | 762 | tests/rest/admin/test_media.py | 159 | 38 | def test_delete_media(self) -> None:
download_resource = self.media_repo.children[b"download"]
upload_resource = self.media_repo.children[b"upload"]
# Upload some media into the room
response = self.helper.upload_media(
upload_resource,
SMALL_PNG,
... | Use literals in place of `HTTPStatus` constants in tests (#13469) | test_delete_media | c97042f7eef3748e17c90e48a4122389a89c4735 | synapse | test_media.py | 11 | 61 | https://github.com/matrix-org/synapse.git | 1 | 297 | 0 | 91 | 464 | Python | {
"docstring": "\n Tests that delete a media is successfully\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 7,
"vocab_size": 7
} | def test_delete_media(self) -> None:
download_resource = self.media_repo.children[b"download"]
upload_resource = self.media_repo.children[b"upload"]
# Upload some media into the room
response = self.helper.upload_media(
upload_resource,
SMALL_PNG,
... | |
1,233 | 7,623 | 114 | ludwig/visualize.py | 40 | 20 | def load_data_for_viz(load_type, model_file_statistics, **kwargs):
supported_load_types = dict(
load_json=load_json,
load_from_file=partial(
load_from_file, dtype=kwargs.get("dtype", int), ground_truth_split=kwar | Encoder refactor V2 (#2370)
* Added base files and some initial code
* More files created, fleshing out binary feature and corresponding encoders
* Added more schema infra
* Registered all feature encoders
* Separated feature utils infra
* Added all preprocessing classes
* Filled out rest of schema c... | load_data_for_viz | 03b4ab273abd7e22a56bb550b56f3d667200abf9 | ludwig | visualize.py | 15 | 14 | https://github.com/ludwig-ai/ludwig.git | 3 | 86 | 0 | 37 | 139 | Python | {
"docstring": "Load model file data in to list of .\n\n :param load_type: type of the data loader to be used.\n :param model_file_statistics: JSON file or list of json files containing any\n model experiment stats.\n :return List of training statistics loaded as json objects.\n ",
"language":... | def load_data_for_viz(load_type, model_file_statistics, **kwargs):
supported_load_types = dict(
load_json=load_json,
load_from_file=partial(
load_from_file, dtype=kwargs.get("dtype", int), ground_truth_split=kwargs.get("ground_truth_split", 2)
),
)
loader = supported... | |
1,663 | 9,733 | 134 | gensim/models/doc2vec.py | 52 | 15 | def scan_vocab(self, corpus_iterable=None, corpus_file=None, progress_per=100000, trim_rule=None):
logger.info("collecting all words and their counts")
if corpus_file | re #2809: update the doc2vec notebook | scan_vocab | 490676cc34d909b8a361fa1ae1e835263a13673b | gensim | doc2vec.py | 10 | 10 | https://github.com/RaRe-Technologies/gensim.git | 2 | 83 | 0 | 43 | 127 | Python | {
"docstring": "Create the model's vocabulary: a mapping from unique words in the corpus to their frequency count.\n\n Parameters\n ----------\n documents : iterable of :class:`~gensim.models.doc2vec.TaggedDocument`, optional\n The tagged documents used to create the vocabulary. Their ... | def scan_vocab(self, corpus_iterable=None, corpus_file=None, progress_per=100000, trim_rule=None):
logger.info("collecting all words and their counts")
if corpus_file is not None:
corpus_iterable = TaggedLineDocument(corpus_file)
total_words, corpus_count = self._scan_vocab... | |
56,800 | 222,893 | 1,131 | python3.10.4/Lib/distutils/dist.py | 337 | 50 | def _parse_command_opts(self, parser, args):
# late import because of mutual dependence between these modules
from distutils.cmd import Command
# Pull the current command from the head of the command line
command = args[0]
if not command_re.match(command):
r... | add python 3.10.4 for windows | _parse_command_opts | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | dist.py | 19 | 54 | https://github.com/XX-net/XX-Net.git | 18 | 357 | 0 | 203 | 597 | Python | {
"docstring": "Parse the command-line options for a single command.\n 'parser' must be a FancyGetopt instance; 'args' must be the list\n of arguments, starting with the current command (whose options\n we are about to parse). Returns a new version of 'args' with\n the next command at the... | def _parse_command_opts(self, parser, args):
# late import because of mutual dependence between these modules
from distutils.cmd import Command
# Pull the current command from the head of the command line
command = args[0]
if not command_re.match(command):
r... | |
69,784 | 242,085 | 658 | scipy/stats/_distn_infrastructure.py | 213 | 28 | def interval(self, confidence=None, *args, **kwds):
# This function was originally written with parameter `alpha`, but
# `alpha` is also the name of a shape parameter of two distributions.
# This block allows the functi | MAINT: stats: update deprecation warning version information | interval | 547d1bb522562a1ba38961d13932fffc2bb92edf | scipy | _distn_infrastructure.py | 14 | 30 | https://github.com/scipy/scipy.git | 10 | 219 | 0 | 134 | 366 | Python | {
"docstring": "Confidence interval with equal areas around the median.\n\n .. deprecated:: 1.9.0\n Parameter `alpha` is replaced by parameter `confidence` to avoid\n name collisions with the shape parameter `alpha` of some\n distributions. Parameter `alpha` will be removed in the... | def interval(self, confidence=None, *args, **kwds):
# This function was originally written with parameter `alpha`, but
# `alpha` is also the name of a shape parameter of two distributions.
# This block allows the function to accept both `alpha` and its
# replacement `confidence`... | |
21,158 | 101,754 | 336 | tools/alignments/jobs_faces.py | 80 | 25 | def __call__(self) -> bool:
for meta in tqdm(self._face_alignments,
desc="Updating Alignments File from PNG Header",
leave=False):
src = meta["source"]
alignment = meta["alignments"]
if not any(alignment.get(key, {}) ... | Alignments Tool updates
- Copy info back to alignments file from faces | __call__ | c79175cbde5600bebd65785f3821fc74b3a80cbe | faceswap | jobs_faces.py | 13 | 27 | https://github.com/deepfakes/faceswap.git | 6 | 138 | 0 | 62 | 231 | Python | {
"docstring": " Parse through the face data updating any entries in the alignments file.\n\n Returns\n -------\n bool\n ``True`` if any alignment information was updated otherwise ``False``\n ",
"language": "en",
"n_whitespaces": 64,
"n_words": 24,
"vocab_size": 22
} | def __call__(self) -> bool:
for meta in tqdm(self._face_alignments,
desc="Updating Alignments File from PNG Header",
leave=False):
src = meta["source"]
alignment = meta["alignments"]
if not any(alignment.get(key, {}) ... | |
116,979 | 319,622 | 116 | src/documents/tests/test_file_handling.py | 22 | 19 | def test_dynamic_path(self):
doc = Document.objects.create(
title="does not matter",
created=timezone.make_aware(datetime.datetime(2020, 6, 25, 7, 36, 51, 153)),
| Feature: Dynamic document storage pathes (#916)
* Added devcontainer
* Add feature storage pathes
* Exclude tests and add versioning
* Check escaping
* Check escaping
* Check quoting
* Echo
* Escape
* Escape :
* Double escape \
* Escaping
* Remove if
* Escape colon
* Missing \
... | test_dynamic_path | 69ef26dab04d51e7e102dcb33cd98ddc6ad975fd | paperless-ngx | test_file_handling.py | 13 | 10 | https://github.com/paperless-ngx/paperless-ngx.git | 1 | 81 | 0 | 22 | 127 | Python | {
"docstring": "\n GIVEN:\n - A document with a defined storage path\n WHEN:\n - the filename is generated for the document\n THEN:\n - the generated filename uses the defined storage path for the document\n ",
"language": "en",
"n_whitespaces": 93,
"... | def test_dynamic_path(self):
doc = Document.objects.create(
title="does not matter",
created=timezone.make_aware(datetime.datetime(2020, 6, 25, 7, 36, 51, 153)),
mime_type="application/pdf",
pk=2,
checksum="2",
storage_path=Storage... | |
34,736 | 150,412 | 71 | freqtrade/rpc/replicate/__init__.py | 17 | 8 | async def follower_loop(self):
try:
await self._connect_to_leaders()
except Exception as e:
logger.error("Exception occurred in follower loop: ")
logger.exception(e)
| initial concept for replicate, basic leader and follower logic | follower_loop | 9f6bba40af1a407f190a89f5c0c8b4e3f528ba46 | freqtrade | __init__.py | 11 | 6 | https://github.com/freqtrade/freqtrade.git | 2 | 31 | 0 | 17 | 60 | Python | {
"docstring": "\n Main follower coroutine\n\n This starts all of the leader connection coros\n ",
"language": "en",
"n_whitespaces": 33,
"n_words": 11,
"vocab_size": 11
} | async def follower_loop(self):
try:
await self._connect_to_leaders()
except Exception as e:
logger.error("Exception occurred in follower loop: ")
logger.exception(e)
| |
21,711 | 103,727 | 52 | kitty_tests/check_build.py | 18 | 11 | def test_launcher_ensures_stdio(self):
from kitty.constants import kitty_exe
import subprocess
exe = kitty_exe()
cp = subprocess.run([exe, '+runpy', ])
self.assertEqual(cp.returncode, 0)
| Fix regression in 0.26.0 that caused launching kitty without working STDIO handles to result in high CPU usage and prewarming failing
Fixes #5444 | test_launcher_ensures_stdio | 6604e0d015fbd7a3e5602a6f3831d786b4ed659d | kitty | check_build.py | 10 | 15 | https://github.com/kovidgoyal/kitty.git | 1 | 42 | 0 | 16 | 71 | Python | {
"docstring": "\\\nimport os, sys\nif sys.stdin:\n os.close(sys.stdin.fileno())\nif sys.stdout:\n os.close(sys.stdout.fileno())\nif sys.stderr:\n os.close(sys.stderr.fileno())\nos.execlp('kitty', 'kitty', '+runpy', 'import sys; raise SystemExit(1 if sys.stdout is None or sys.stdin is None or sys.stderr is N... | def test_launcher_ensures_stdio(self):
from kitty.constants import kitty_exe
import subprocess
exe = kitty_exe()
cp = subprocess.run([exe, '+runpy', ])
self.assertEqual(cp.returncode, 0)
| |
78,160 | 265,647 | 106 | netbox/dcim/tests/test_forms.py | 20 | 13 | def test_interface_label_count_mismatch(self):
| Fixes #10247: Allow changing selected device/VM when creating a new component (#10312)
* Initial work on #10247
* Continued work on #10247
* Clean up component creation tests
* Move valdiation of replicated field to form
* Clean up ordering of fields in component creation forms
* Omit fieldset header if... | test_interface_label_count_mismatch | c4b7ab067a914349abd88398dd9bfef9f6c2f806 | netbox | test_forms.py | 10 | 10 | https://github.com/netbox-community/netbox.git | 1 | 58 | 0 | 19 | 105 | Python | {
"docstring": "\n Check that attempting to generate a differing number of names and labels results in a validation error.\n ",
"language": "en",
"n_whitespaces": 32,
"n_words": 17,
"vocab_size": 16
} | def test_interface_label_count_mismatch(self):
bad_interface_data = {
'device': self.device.pk,
'name': 'eth[0-9]',
'label': 'Interface[0-1]',
'type': InterfaceTypeChoices.TYPE_1GE_GBIC,
}
form = InterfaceCreateForm(bad_interface_data)
... | |
75,716 | 259,334 | 22 | sklearn/preprocessing/_data.py | 13 | 8 | def power_transform(X, method="yeo-johnson", *, standardize=True, copy=True):
pt = PowerTransformer(method=method, stand | DOC Ensures that preprocessing._data.power_transform passes numpydoc validation (#22802)
Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com> | power_transform | 5cccdef4378fcdb863467414ee638c6f5e51a19a | scikit-learn | _data.py | 9 | 3 | https://github.com/scikit-learn/scikit-learn.git | 1 | 43 | 0 | 13 | 67 | Python | {
"docstring": "Parametric, monotonic transformation to make data more Gaussian-like.\n\n Power transforms are a family of parametric, monotonic transformations\n that are applied to make data more Gaussian-like. This is useful for\n modeling issues related to heteroscedasticity (non-constant variance),\n ... | def power_transform(X, method="yeo-johnson", *, standardize=True, copy=True):
pt = PowerTransformer(method=method, standardize=standardize, copy=copy)
return pt.fit_transform(X)
| |
41,742 | 176,172 | 29 | networkx/generators/small.py | 17 | 5 | def desargues_graph(create_using=None):
G = LCF_graph(20, [5, -5, 9, -9], 5, create_using)
G.name = "Desargues Graph"
return G
| Docstrings for the small.py module (#5240)
* added description for the first 5 small graphs
* modified descriptions based on comment and added description for two more functions
* added doctrings to all the functions
* Minor touchups.
Co-authored-by: Ross Barnowski <rossbar@berkeley.edu> | desargues_graph | dec723f072eb997a497a159dbe8674cd39999ee9 | networkx | small.py | 10 | 4 | https://github.com/networkx/networkx.git | 1 | 37 | 0 | 15 | 58 | Python | {
"docstring": "\n Returns the Desargues Graph\n\n The Desargues Graph is a non-planar, distance-transitive cubic graph\n with 20 nodes and 30 edges [1]_.\n It is a symmetric graph. It can be represented in LCF notation\n as [5,-5,9,-9]^5 [2]_.\n\n Parameters\n ----------\n create_using : Netw... | def desargues_graph(create_using=None):
G = LCF_graph(20, [5, -5, 9, -9], 5, create_using)
G.name = "Desargues Graph"
return G
| |
1,175 | 7,282 | 180 | ludwig/schema/features/utils.py | 36 | 7 | def get_output_feature_jsonschema():
output_feature_types = sorted(list(output_type_registry.keys()))
return {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {"type": "string"},
"type": {"type": "string", "enum":... | Input/Output Feature Schema Refactor (#2147)
* Added base files and some initial code
* More files created, fleshing out binary feature and corresponding encoders
* Added more schema infra
* Registered all feature encoders
* Separated feature utils infra
* Added all preprocessing classes
* Filled out... | get_output_feature_jsonschema | 6909ae16047d422b94ed4cbd1a753e6b34540ff9 | ludwig | utils.py | 14 | 16 | https://github.com/ludwig-ai/ludwig.git | 1 | 85 | 0 | 28 | 167 | Python | {
"docstring": "This function returns a JSON schema structured to only requires a `type` key and then conditionally applies\n a corresponding output feature's field constraints.\n\n Returns: JSON Schema\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 26,
"vocab_size": 23
} | def get_output_feature_jsonschema():
output_feature_types = sorted(list(output_type_registry.keys()))
return {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {"type": "string"},
"type": {"type": "string", "enum":... | |
43,114 | 180,242 | 82 | demo/blocks_component_shortcut/run.py | 29 | 14 | def greet(str):
return str
with gr.Blocks() as demo:
with gr.Row():
text1 = gr.component("textarea")
text2 = gr.TextArea()
text3 = gr.templates.TextArea()
text1.change(greet, text1, text2)
text2.change(greet, text2, text3)
text3.change(greet, text3, text1)
... | update-shortcut-syntax (#1234)
* update-shortcut-syntax
- fix&update gr.component
- create a demo introducing shortcuts within Blocks
* update-shortcut-syntax
- tweaks
* update-shortcut-syntax
- tweaks
* update-shortcut-syntax
- fix formatting
* update-shortcut-syntax
- tweaks
- fix tests
... | greet | 2de9ee8bfb43dc1f6d71e16ed1fe18ea164edd4c | gradio | run.py | 11 | 2 | https://github.com/gradio-app/gradio.git | 1 | 7 | 0 | 26 | 141 | Python | {
"docstring": "\n You can make use of str shortcuts you use in Interface within Blocks as well.\n \n Interface shortcut example:\n Interface(greet, \"textarea\", \"textarea\")\n \n You can use \n 1. gr.component()\n 2. gr.templates.Template()\n 3. gr.Template()\n All the templates are l... | def greet(str):
return str
with gr.Blocks() as demo:
with gr.Row():
text1 = gr.component("textarea")
text2 = gr.TextArea()
text3 = gr.templates.TextArea()
text1.change(greet, text1, text2)
text2.change(greet, text2, text3)
text3.change(greet, text3, text1)
... | |
43,601 | 181,817 | 84 | tpot/base.py | 23 | 10 | def _impute_values(self, features):
if self.verbosity > 1:
| Revert "Deployed 7ccda9a with MkDocs version: 1.3.0"
This reverts commit bd9629c40e01241766197119b581a99409b07068. | _impute_values | 388616b6247ca4ea8de4e2f340d6206aee523541 | tpot | base.py | 12 | 7 | https://github.com/EpistasisLab/tpot.git | 3 | 53 | 0 | 21 | 91 | Python | {
"docstring": "Impute missing values in a feature set.\n\n Parameters\n ----------\n features: array-like {n_samples, n_features}\n A feature matrix\n\n Returns\n -------\n array-like {n_samples, n_features}\n ",
"language": "en",
"n_whitespaces": 81,
... | def _impute_values(self, features):
if self.verbosity > 1:
print("Imputing missing values in feature set")
if self._fitted_imputer is None:
self._fitted_imputer = SimpleImputer(strategy="median")
self._fitted_imputer.fit(features)
return self._fitte... | |
50,700 | 204,336 | 70 | django/contrib/sites/shortcuts.py | 34 | 9 | def get_current_site(request):
# Import is inside the function because its point is to avoid importing the
# Site models when django.contrib.sites isn't installed.
if apps.is_installed("django.contrib.sites"):
from | Refs #33476 -- Reformatted code with Black. | get_current_site | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | shortcuts.py | 10 | 6 | https://github.com/django/django.git | 2 | 35 | 0 | 29 | 66 | Python | {
"docstring": "\n Check if contrib.sites is installed and return either the current\n ``Site`` object or a ``RequestSite`` object based on the request.\n ",
"language": "en",
"n_whitespaces": 30,
"n_words": 20,
"vocab_size": 18
} | def get_current_site(request):
# Import is inside the function because its point is to avoid importing the
# Site models when django.contrib.sites isn't installed.
if apps.is_installed("django.contrib.sites"):
from .models import Site
return Site.objects.get_current(request)
else:
... | |
77,782 | 264,670 | 235 | netbox/extras/scripts.py | 103 | 28 | def get_scripts(use_names=False):
scripts = OrderedDict()
# Iterate through all modules within the scripts path. These are the user-created files in which reports are
# defined.
for importer, module_name, _ in pkgutil.iter_modules([settings.SCRIPTS_ROOT]):
# Remove cached module to ensu | Save old JobResults | get_scripts | f13a00b2dd33bffc3048c861b494096df457f212 | netbox | scripts.py | 12 | 17 | https://github.com/netbox-community/netbox.git | 11 | 156 | 0 | 66 | 247 | Python | {
"docstring": "\n Return a dict of dicts mapping all scripts to their modules. Set use_names to True to use each module's human-\n defined name in place of the actual module name.\n ",
"language": "en",
"n_whitespaces": 39,
"n_words": 29,
"vocab_size": 26
} | def get_scripts(use_names=False):
scripts = OrderedDict()
# Iterate through all modules within the scripts path. These are the user-created files in which reports are
# defined.
for importer, module_name, _ in pkgutil.iter_modules([settings.SCRIPTS_ROOT]):
# Remove cached module to ensure c... | |
78,268 | 266,015 | 74 | netbox/extras/plugins/templates.py | 24 | 9 | def render(self, template_name, extra_context=None):
if extra_context is None:
extra_context = {}
elif not isinstance(extra_context, dict):
raise TypeError("extra_context must be a dictionary")
return get_template(template_name).render({**self.context, * | Reorganize plugin resources | render | e7f54c5867cf49126bbf95e28633e4283c2bbcb2 | netbox | templates.py | 11 | 6 | https://github.com/netbox-community/netbox.git | 3 | 53 | 0 | 23 | 87 | Python | {
"docstring": "\n Convenience method for rendering the specified Django template using the default context data. An additional\n context dictionary may be passed as `extra_context`.\n ",
"language": "en",
"n_whitespaces": 44,
"n_words": 22,
"vocab_size": 20
} | def render(self, template_name, extra_context=None):
if extra_context is None:
extra_context = {}
elif not isinstance(extra_context, dict):
raise TypeError("extra_context must be a dictionary")
return get_template(template_name).render({**self.context, **extra_c... | |
@keras_export("keras.applications.mobilenet_v3.decode_predictions") | 82,630 | 278,617 | 11 | keras/applications/mobilenet_v3.py | 6 | 4 | def preprocess_input(x, data_format=None):
return x
@keras_export("keras.applications. | Remove pylint comments.
PiperOrigin-RevId: 452353044 | preprocess_input | 3613c3defc39c236fb1592c4f7ba1a9cc887343a | keras | mobilenet_v3.py | 7 | 2 | https://github.com/keras-team/keras.git | 1 | 12 | 1 | 6 | 32 | Python | {
"docstring": "A placeholder method for backward compatibility.\n\n The preprocessing logic has been included in the mobilenet_v3 model\n implementation. Users are no longer required to call this method to\n normalize the input data. This method does nothing and only kept as a\n placeholder to align the ... | def preprocess_input(x, data_format=None):
return x
@keras_export("keras.applications.mobilenet_v3.decode_predictions") |
@register.simple_tag | 16,502 | 76,338 | 10 | wagtail/admin/templatetags/wagtailadmin_tags.py | 5 | 7 | def message_level_tag(message):
return MESSAGE_TAGS.get(message.level)
| Prevent custom MESSAGE_TAGS settings from leaking into admin styles
Fixes a test failure against Django main.
In #2552, a fix was applied to ensure that the project-level MESSAGE_TAGS setting was ignored, allowing end-users to customise that setting for their own projects without it leaking into Wagtail admin styles.... | message_level_tag | 1838fbfb1a720e0a286c989dbdea03dfde6af4a5 | wagtail | wagtailadmin_tags.py | 8 | 2 | https://github.com/wagtail/wagtail.git | 1 | 15 | 1 | 5 | 34 | Python | {
"docstring": "\n Return the tag for this message's level as defined in\n django.contrib.messages.constants.DEFAULT_TAGS, ignoring the project-level\n MESSAGE_TAGS setting (which end-users might customise).\n ",
"language": "en",
"n_whitespaces": 33,
"n_words": 20,
"vocab_size": 19
} | def message_level_tag(message):
return MESSAGE_TAGS.get(message.level)
@register.simple_tag |
75,848 | 259,648 | 422 | sklearn/metrics/_regression.py | 141 | 19 | def _check_reg_targets(y_true, y_pred, multioutput, dtype="numeric"):
check_cons | ENH add D2 pinbal score and D2 absolute error score (#22118) | _check_reg_targets | aeeac1c1d634dc80abc93fb30b3fe48e1d709b64 | scikit-learn | _regression.py | 17 | 35 | https://github.com/scikit-learn/scikit-learn.git | 10 | 234 | 0 | 93 | 371 | Python | {
"docstring": "Check that y_true and y_pred belong to the same regression task.\n\n Parameters\n ----------\n y_true : array-like\n\n y_pred : array-like\n\n multioutput : array-like or string in ['raw_values', uniform_average',\n 'variance_weighted'] or None\n None is accepted due to ba... | def _check_reg_targets(y_true, y_pred, multioutput, dtype="numeric"):
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False, dtype=dtype)
y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if ... | |
31,945 | 140,391 | 417 | python/ray/serve/deployment.py | 73 | 18 | def bind(self, *args, **kwargs) -> Union[ClassNode, FunctionNode]:
| [Deployment Graph] Simplify our use of DeploymentSchema (#25202) | bind | 820cf4fdcae6b274588e23b312d5255d1b418e10 | ray | deployment.py | 14 | 33 | https://github.com/ray-project/ray.git | 4 | 128 | 0 | 46 | 183 | Python | {
"docstring": "Bind the provided arguments and return a class or function node.\n\n The returned bound deployment can be deployed or bound to other\n deployments to create a deployment graph.\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 28,
"vocab_size": 23
} | def bind(self, *args, **kwargs) -> Union[ClassNode, FunctionNode]:
copied_self = copy(self)
copied_self._func_or_class = "dummpy.module"
schema_shell = deployment_to_schema(copied_self)
if inspect.isfunction(self._func_or_class):
return FunctionNode(
... | |
20,605 | 101,184 | 150 | tools/manual/faceviewer/viewport.py | 24 | 15 | def _obtain_mask(cls, detected_face, mask_type):
mas | lib.detected_face.Mask
- Add source + target offset and coverage to set_sub_crop method | _obtain_mask | 32950897376b48e0f08b46385602e4df902cf49e | faceswap | viewport.py | 12 | 10 | https://github.com/deepfakes/faceswap.git | 3 | 77 | 0 | 21 | 126 | Python | {
"docstring": " Obtain the mask for the correct \"face\" centering that is used in the thumbnail display.\n\n Parameters\n -----------\n detected_face: :class:`lib.align.DetectedFace`\n The Detected Face object to obtain the mask for\n mask_type: str\n The type of ma... | def _obtain_mask(cls, detected_face, mask_type):
mask = detected_face.mask.get(mask_type)
if not mask:
return None
if mask.stored_centering != "face":
face = AlignedFace(detected_face.landmarks_xy)
mask.set_sub_crop(face.pose.offset[mask.stored_center... | |
118,284 | 322,908 | 337 | examples/model_interpretation/task/senti/rnn/model.py | 104 | 33 | def forward(self, input, mask=None):
forward_input, backward_input = paddle.chunk(input, chunks=2, axis=2)
# elementwise-sum forward_x and backward_x
# Shape: (batch_size, max_seq_len, hidden_size)
h = paddle.add_n([forward_input, backward_input])
# Shape: (batch_size, h... | Add NLP model interpretation (#1752)
* upload NLP interpretation
* fix problems and relocate project
* remove abandoned picture
* remove abandoned picture
* fix dead link in README
* fix dead link in README
* fix code style problems
* fix CR round 1
* remove .gitkeep files
* fix code style
... | forward | 93cae49c0c572b5c1ac972759140fbe924b0374d | PaddleNLP | model.py | 14 | 18 | https://github.com/PaddlePaddle/PaddleNLP.git | 2 | 211 | 0 | 70 | 329 | Python | {
"docstring": "\n Args:\n input (paddle.Tensor) of shape (batch, seq_len, input_size): Tensor containing the features of the input sequence.\n mask (paddle.Tensor) of shape (batch, seq_len) :\n Tensor is a bool tensor, whose each element identifies whether the input word i... | def forward(self, input, mask=None):
forward_input, backward_input = paddle.chunk(input, chunks=2, axis=2)
# elementwise-sum forward_x and backward_x
# Shape: (batch_size, max_seq_len, hidden_size)
h = paddle.add_n([forward_input, backward_input])
# Shape: (batch_size, h... | |
70,242 | 244,108 | 133 | mmdet/models/dense_heads/maskformer_head.py | 33 | 15 | def simple_test(self, feats, img_metas, **kwargs):
all_cls_scores, all_mask_preds = self(feats, img_metas)
mask_cls_results = all_cls_scores[-1]
mask_pred_results = all_mask_preds[-1]
# upsample masks
img_shape = img_metas[0]['batch_input_shape']
mask_pred_resul... | [Enhance] MaskFormer refactor (#7471)
* maskformer refactor
update docstring
update docstring
update unit test
update unit test
update unit test
* remove redundant code
* update unit test | simple_test | 4bb184bae070f37febb10f82bee3a217dc1ad7c5 | mmdetection | maskformer_head.py | 11 | 11 | https://github.com/open-mmlab/mmdetection.git | 1 | 80 | 0 | 27 | 125 | Python | {
"docstring": "Test without augmentaton.\n\n Args:\n feats (list[Tensor]): Multi-level features from the\n upstream network, each is a 4D-tensor.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple: A tuple contains two tensors.\n\n ... | def simple_test(self, feats, img_metas, **kwargs):
all_cls_scores, all_mask_preds = self(feats, img_metas)
mask_cls_results = all_cls_scores[-1]
mask_pred_results = all_mask_preds[-1]
# upsample masks
img_shape = img_metas[0]['batch_input_shape']
mask_pred_resul... | |
52,490 | 208,731 | 308 | IPython/core/ultratb.py | 76 | 25 | def _format_list(self, extracted_list):
Colors = self.Colors
list = []
for ind, (filename, lineno, name, line) in enumerate(extracted_list):
normalCol, nameCol, fileCol, lineCol = (
# Emphasize the last entry
(Colors.normalEm, Colors.nameEm, ... | Restore lineno's for Input mapped files (#13560)
* Implement lineno's for Input mapped files
* Adopt In [123], line 123 format
* Revert "Set co_name for cells run line by line. Fixes https://github.com/ipython/ipykernel/issues/841"
(This reverts commit d11e987f174a15f1640f8006c86f58d884c3faa4.)
* Omit mention of... | _format_list | a72418e2dcdfc3c91f70d724d16d2691a41c9c24 | ipython | ultratb.py | 14 | 19 | https://github.com/ipython/ipython.git | 5 | 134 | 0 | 61 | 248 | Python | {
"docstring": "Format a list of traceback entry tuples for printing.\n\n Given a list of tuples as returned by extract_tb() or\n extract_stack(), return a list of strings ready for printing.\n Each string in the resulting list corresponds to the item with the\n same index in the argument ... | def _format_list(self, extracted_list):
Colors = self.Colors
list = []
for ind, (filename, lineno, name, line) in enumerate(extracted_list):
normalCol, nameCol, fileCol, lineCol = (
# Emphasize the last entry
(Colors.normalEm, Colors.nameEm, ... | |
47,530 | 196,030 | 351 | sympy/calculus/euler.py | 146 | 37 | def euler_equations(L, funcs=(), vars=()):
r
funcs = tuple(funcs) if iterable(funcs) else (funcs,)
if not funcs:
funcs = tuple(L.atoms(Function))
else:
for f in funcs:
if not isinstance(f, Function):
raise TypeError('Function expected, got: %s' % f)
var... | Updated import locations | euler_equations | 498015021131af4dbb07eb110e5badaba8250c7b | sympy | euler.py | 19 | 86 | https://github.com/sympy/sympy.git | 18 | 281 | 0 | 84 | 436 | Python | {
"docstring": "\n Find the Euler-Lagrange equations [1]_ for a given Lagrangian.\n\n Parameters\n ==========\n\n L : Expr\n The Lagrangian that should be a function of the functions listed\n in the second argument and their derivatives.\n\n For example, in the case of two functions `... | def euler_equations(L, funcs=(), vars=()):
r
funcs = tuple(funcs) if iterable(funcs) else (funcs,)
if not funcs:
funcs = tuple(L.atoms(Function))
else:
for f in funcs:
if not isinstance(f, Function):
raise TypeError('Function expected, got: %s' % f)
var... | |
17,162 | 81,161 | 207 | awx/main/tasks/callback.py | 65 | 11 | def delay_update(self, skip_if_already_set=False, **kwargs):
for key, value in kwargs.items():
if key in self.extra_update_fields and skip_if_already_set:
continue
elif key in self.extra_update_fields and key in ('job_explanation', 'result_traceback'):
... | Delay update of artifacts and error fields until final job save (#11832)
* Delay update of artifacts until final job save
Save tracebacks from receptor module to callback object
Move receptor traceback check up to be more logical
Use new mock_me fixture to avoid DB call with me method
Update the special ru... | delay_update | 452744b67e02823879e722fe574984a2d760ed60 | awx | callback.py | 18 | 10 | https://github.com/ansible/awx.git | 7 | 105 | 0 | 50 | 174 | Python | {
"docstring": "Stash fields that should be saved along with the job status change",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def delay_update(self, skip_if_already_set=False, **kwargs):
for key, value in kwargs.items():
if key in self.extra_update_fields and skip_if_already_set:
continue
elif key in self.extra_update_fields and key in ('job_explanation', 'result_traceback'):
... | |
12,428 | 61,179 | 164 | .venv/lib/python3.8/site-packages/pip/_internal/utils/filesystem.py | 68 | 26 | def adjacent_tmp_file(path, **kwargs):
# type: (str, **Any) -> Iterator[BinaryIO]
with NamedTemporaryFile(
delete=False,
dir=os.path.dirname(path),
prefix=os.path.basename(path),
suffix=".tmp",
**kwargs,
) as f:
result = cast(BinaryIO, f)
try:
... | upd; format | adjacent_tmp_file | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | filesystem.py | 14 | 14 | https://github.com/jindongwang/transferlearning.git | 2 | 78 | 0 | 60 | 180 | Python | {
"docstring": "Return a file-like object pointing to a tmp file next to path.\n\n The file is created securely and is ensured to be written to disk\n after the context reaches its end.\n\n kwargs will be passed to tempfile.NamedTemporaryFile to control\n the way the temporary file will be opened.\n ",... | def adjacent_tmp_file(path, **kwargs):
# type: (str, **Any) -> Iterator[BinaryIO]
with NamedTemporaryFile(
delete=False,
dir=os.path.dirname(path),
prefix=os.path.basename(path),
suffix=".tmp",
**kwargs,
) as f:
result = cast(BinaryIO, f)
try:
... | |
3,195 | 20,046 | 176 | pipenv/patched/notpip/_vendor/distro.py | 45 | 17 | def _lsb_release_info(self):
# type: () -> Dict[str, str]
if not self.include_lsb:
return {}
with open(os.devnull, "wb") as devnull:
try:
cmd = ("lsb_release", "-a")
stdout = subprocess.check_output(cmd, stderr=devnull)
... | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | _lsb_release_info | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | distro.py | 13 | 11 | https://github.com/pypa/pipenv.git | 3 | 79 | 0 | 38 | 138 | Python | {
"docstring": "\n Get the information items from the lsb_release command output.\n\n Returns:\n A dictionary containing all information items.\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 16,
"vocab_size": 14
} | def _lsb_release_info(self):
# type: () -> Dict[str, str]
if not self.include_lsb:
return {}
with open(os.devnull, "wb") as devnull:
try:
cmd = ("lsb_release", "-a")
stdout = subprocess.check_output(cmd, stderr=devnull)
... | |
38,299 | 159,507 | 107 | rasa/engine/graph.py | 35 | 16 | def as_dict(self) -> Dict[Text, Any]:
serializable_graph_schema: Dict[Text, Dict[Text, Any]] = {"nodes": {}}
for node | fix type annotation in rasa.engine | as_dict | 9fc462da870f69f9976be3bc081675844b9f64c2 | rasa | graph.py | 12 | 12 | https://github.com/RasaHQ/rasa.git | 2 | 72 | 0 | 28 | 137 | Python | {
"docstring": "Returns graph schema in a serializable format.\n\n Returns:\n The graph schema in a format which can be dumped as JSON or other formats.\n ",
"language": "en",
"n_whitespaces": 48,
"n_words": 23,
"vocab_size": 19
} | def as_dict(self) -> Dict[Text, Any]:
serializable_graph_schema: Dict[Text, Dict[Text, Any]] = {"nodes": {}}
for node_name, node in self.nodes.items():
serializable = dataclasses.asdict(node)
# Classes are not JSON serializable (surprise)
serializable["uses"... | |
51,065 | 205,284 | 240 | django/db/migrations/executor.py | 52 | 20 | def _create_project_state(self, with_applied_migrations=False):
state = ProjectState(real_apps=self.loader.unmigrated_apps)
if with_applied_migrations:
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(
se... | Refs #33476 -- Reformatted code with Black. | _create_project_state | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | executor.py | 14 | 15 | https://github.com/django/django.git | 6 | 101 | 0 | 42 | 157 | Python | {
"docstring": "\n Create a project state including all the applications without\n migrations and applied migrations if with_applied_migrations=True.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 15,
"vocab_size": 14
} | def _create_project_state(self, with_applied_migrations=False):
state = ProjectState(real_apps=self.loader.unmigrated_apps)
if with_applied_migrations:
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(
se... | |
@control_command(
variadic='headers',
signature='[key1=value1 [key2=value2 [... [keyN=valueN]]]]',
) | 52,249 | 208,224 | 90 | celery/worker/control.py | 58 | 14 | def revoke(state, task_id, terminate=False, signal=None, **kwargs):
# pylint: disable=redefined-outer-name
# XXX Note that this redefines `terminate`:
# Outside of this scope that is a function.
# supports list argument since 3.1
task_ids, task_ | New control command `revoke_by_stamped_headers` (#7838)
* Added pytest-order==1.0.1
* Added a new control command `revoke_by_stamped_headers` to revoke tasks by their
stamped header instead of task id (terminate only works on running tasks in memory) | revoke | 5092598fb88c1f18e3fe709861cdb31df90a7264 | celery | control.py | 12 | 4 | https://github.com/celery/celery.git | 2 | 56 | 1 | 51 | 115 | Python | {
"docstring": "Revoke task by task id (or list of ids).\n\n Keyword Arguments:\n terminate (bool): Also terminate the process if the task is active.\n signal (str): Name of signal to use for terminate (e.g., ``KILL``).\n ",
"language": "en",
"n_whitespaces": 53,
"n_words": 33,
"vocab_size... | def revoke(state, task_id, terminate=False, signal=None, **kwargs):
# pylint: disable=redefined-outer-name
# XXX Note that this redefines `terminate`:
# Outside of this scope that is a function.
# supports list argument since 3.1
task_ids, task_id = set(maybe_list(task_id) or []), None
... |
76,660 | 261,119 | 100 | sklearn/utils/sparsefuncs.py | 47 | 12 | def inplace_swap_row_csc(X, m, n):
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError("m and n should be valid integers")
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
m_mask = X.indices == m
X.indices[X.i | DOC Ensures that inplace_swap_row_csc passes numpydoc validation (#24513) | inplace_swap_row_csc | affb0cb49412eb5992d2fad0d765b50a2db1344c | scikit-learn | sparsefuncs.py | 12 | 11 | https://github.com/scikit-learn/scikit-learn.git | 5 | 87 | 0 | 32 | 135 | Python | {
"docstring": "Swap two rows of a CSC matrix in-place.\n\n Parameters\n ----------\n X : sparse matrix of shape (n_samples, n_features)\n Matrix whose two rows are to be swapped. It should be of\n CSC format.\n\n m : int\n Index of the row of X to be swapped.\n\n n : int\n ... | def inplace_swap_row_csc(X, m, n):
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError("m and n should be valid integers")
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
m_mask = X.indices == m
X.indices[X.indices == n] = m
X.indices[m_m... | |
16,058 | 73,591 | 139 | wagtail/contrib/table_block/tests.py | 29 | 8 | def test_render_empty_table(self):
| Reformat with black | test_render_empty_table | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | tests.py | 12 | 19 | https://github.com/wagtail/wagtail.git | 1 | 67 | 0 | 22 | 105 | Python | {
"docstring": "\n An empty table should render okay.\n \n <table>\n <tbody>\n <tr><td></td><td></td><td></td></tr>\n <tr><td></td><td></td><td></td></tr>\n <tr><td></td><td></td><td></td></tr>\n </tbody>\n... | def test_render_empty_table(self):
block = TableBlock()
result = block.render(
{
"first_row_is_table_header": False,
"first_col_is_header": False,
"data": [[None, None, None], [None, None, None], [None, None, None]],
}
... | |
47,876 | 196,376 | 56 | sympy/matrices/dense.py | 21 | 8 | def rot_axis3(theta):
ct = cos(theta) | Moved imports to higher level | rot_axis3 | 59d22b6bb7287613d598611027f640d068ca5748 | sympy | dense.py | 9 | 7 | https://github.com/sympy/sympy.git | 1 | 51 | 0 | 18 | 76 | Python | {
"docstring": "Returns a rotation matrix for a rotation of theta (in radians) about\n the 3-axis.\n\n Examples\n ========\n\n >>> from sympy import pi, rot_axis3\n\n A rotation of pi/3 (60 degrees):\n\n >>> theta = pi/3\n >>> rot_axis3(theta)\n Matrix([\n [ 1/2, sqrt(3)/2, 0],\n [... | def rot_axis3(theta):
ct = cos(theta)
st = sin(theta)
lil = ((ct, st, 0),
(-st, ct, 0),
(0, 0, 1))
return Matrix(lil)
| |
47,592 | 196,092 | 56 | sympy/combinatorics/graycode.py | 17 | 9 | def current(self):
rv = self._current or '0'
if not isinstance(rv, str):
rv = bin(rv)[2:]
return rv.rjust(self.n, '0') | Updated import locations | current | 498015021131af4dbb07eb110e5badaba8250c7b | sympy | graycode.py | 11 | 5 | https://github.com/sympy/sympy.git | 3 | 43 | 0 | 15 | 74 | Python | {
"docstring": "\n Returns the currently referenced Gray code as a bit string.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import GrayCode\n >>> GrayCode(3, start='100').current\n '100'\n ",
"language": "en",
"n_whitespaces": 71,
"n_words": 21,
"vo... | def current(self):
rv = self._current or '0'
if not isinstance(rv, str):
rv = bin(rv)[2:]
return rv.rjust(self.n, '0')
| |
120,738 | 335,307 | 360 | src/diffusers/models/unet_sde_score_estimation.py | 210 | 30 | def upsample_conv_2d(x, w, k=None, factor=2, gain=1):
assert isinstance(factor, int) and factor >= 1
# Check weight shape.
assert len(w.shape) == 4
convH = w.shape[2]
convW = w.shape[3]
inC = w.shape[1]
assert convW == convH
# Setup filter kernel.
if k is None:
k = [... | add score estimation model | upsample_conv_2d | ac796924dff7241d9b516ea27faaa7b2f12434fd | diffusers | unet_sde_score_estimation.py | 14 | 25 | https://github.com/huggingface/diffusers.git | 4 | 356 | 0 | 114 | 547 | Python | {
"docstring": "Fused `upsample_2d()` followed by `tf.nn.conv2d()`.\n\n Padding is performed only once at the beginning, not between the\n operations.\n The fused op is considerably more efficient than performing the same\n calculation\n using standard TensorFlow ops. It supports gradients of arbitrary... | def upsample_conv_2d(x, w, k=None, factor=2, gain=1):
assert isinstance(factor, int) and factor >= 1
# Check weight shape.
assert len(w.shape) == 4
convH = w.shape[2]
convW = w.shape[3]
inC = w.shape[1]
assert convW == convH
# Setup filter kernel.
if k is None:
k = [... | |
3,837 | 21,441 | 175 | pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py | 32 | 11 | def read(self, size=None):
if size is None:
t = []
while True:
buf = self._read(self.bufsize | Vendor in pip 22.1.2 | read | c69d55f7c82d5ae2cce542bcfb98d043ca4836a0 | pipenv | tarfile.py | 13 | 13 | https://github.com/pypa/pipenv.git | 4 | 71 | 0 | 25 | 121 | Python | {
"docstring": "Return the next size number of bytes from the stream.\n If size is not defined, return all bytes of the stream\n up to EOF.\n ",
"language": "en",
"n_whitespaces": 51,
"n_words": 24,
"vocab_size": 19
} | def read(self, size=None):
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
... | |
553 | 3,767 | 89 | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/base_streams.py | 16 | 8 | def state(self) -> Mapping[str, Any]:
if self._cursor_value:
return {
self.cursor_field: self._cursor_value,
| 🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805)
* Facebook Marketing performance improvement
* add comments and little refactoring
* fix integration tests with the new config
* improve job status handling, limit concurrency to 10
* fix campaign jobs, refactor manager
* big refactori... | state | a3aae8017a0a40ff2006e2567f71dccb04c997a5 | airbyte | base_streams.py | 10 | 8 | https://github.com/airbytehq/airbyte.git | 2 | 38 | 0 | 15 | 61 | Python | {
"docstring": "State getter, get current state and serialize it to emmit Airbyte STATE message",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | def state(self) -> Mapping[str, Any]:
if self._cursor_value:
return {
self.cursor_field: self._cursor_value,
"include_deleted": self._include_deleted,
}
return {}
| |
55,965 | 220,322 | 55 | python3.10.4/Lib/asyncio/base_events.py | 23 | 6 | def set_task_factory(self, factory):
if factory is not None and not callable(factory):
raise TypeError('task factory must b | add python 3.10.4 for windows | set_task_factory | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | base_events.py | 10 | 4 | https://github.com/XX-net/XX-Net.git | 3 | 30 | 0 | 20 | 52 | Python | {
"docstring": "Set a task factory that will be used by loop.create_task().\n\n If factory is None the default task factory will be set.\n\n If factory is a callable, it should have a signature matching\n '(loop, coro)', where 'loop' will be a reference to the active\n event loop, 'coro' w... | def set_task_factory(self, factory):
if factory is not None and not callable(factory):
raise TypeError('task factory must be a callable or None')
self._task_factory = factory
| |
39,394 | 163,186 | 79 | pandas/core/arrays/datetimes.py | 37 | 10 | def date(self) -> npt.NDArray[np.object_]:
# If the Timestamps have a timezone that is not UTC,
# | DOC: Improve doc summaries in series.rst (#45237) | date | 521259299f7829da667ba39302ec77acedde9e5e | pandas | datetimes.py | 9 | 9 | https://github.com/pandas-dev/pandas.git | 1 | 31 | 0 | 32 | 56 | Python | {
"docstring": "\n Returns numpy array of python :class:`datetime.date` objects.\n\n Namely, the date part of Timestamps without time and\n timezone information.\n ",
"language": "en",
"n_whitespaces": 47,
"n_words": 18,
"vocab_size": 17
} | def date(self) -> npt.NDArray[np.object_]:
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
timestamps = self._local_timestamps()
return ints_to_pydatetime(timestamps, box="... | |
69,952 | 243,004 | 172 | src/PIL/Image.py | 54 | 14 | def apply_transparency(self):
if self.mode != "P" or "transparency" not in self.info:
return
from . import ImagePalette
palette = self.getpalette("RGBA")
transparency = self.info["transparency"]
if isinstance(tra | Added apply_transparency() | apply_transparency | 11be1631433f252b816802aef1a3cd109bd308c7 | Pillow | Image.py | 13 | 14 | https://github.com/python-pillow/Pillow.git | 5 | 110 | 0 | 41 | 186 | Python | {
"docstring": "\n If a P mode image has a \"transparency\" key in the info dictionary,\n remove the key and apply the transparency to the palette instead.\n ",
"language": "en",
"n_whitespaces": 46,
"n_words": 24,
"vocab_size": 19
} | def apply_transparency(self):
if self.mode != "P" or "transparency" not in self.info:
return
from . import ImagePalette
palette = self.getpalette("RGBA")
transparency = self.info["transparency"]
if isinstance(transparency, bytes):
for i, alpha i... | |
19,002 | 93,655 | 19 | src/sentry/utils/assets.py | 10 | 9 | def get_frontend_app_asset_url(module, key):
args = (settings.STATIC_FRONTEND_APP_URL.rstrip("/"), module, key.lstrip("/"))
return "{}/{}/{}".format(* | ref(js): Remove broken frontend asset cache busting (#36953) | get_frontend_app_asset_url | 2992f33c2d084f2542af647c6b76b54c351cc5a5 | sentry | assets.py | 10 | 3 | https://github.com/getsentry/sentry.git | 1 | 37 | 0 | 10 | 65 | Python | {
"docstring": "\n Returns an asset URL that is unversioned. These assets should have a\n `Cache-Control: max-age=0, must-revalidate` so that clients must validate with the origin\n server before using their locally cached asset.\n\n Example:\n {% frontend_app_asset_url 'sentry' 'sentry.css' %}\n ... | def get_frontend_app_asset_url(module, key):
args = (settings.STATIC_FRONTEND_APP_URL.rstrip("/"), module, key.lstrip("/"))
return "{}/{}/{}".format(*args)
| |
26,690 | 119,812 | 197 | jax/_src/lax/linalg.py | 139 | 19 | def tridiagonal_solve(dl, d, du, b):
r
if dl.ndim != 1 or d.ndim != 1 or du.ndim != 1:
raise ValueError('dl, d and du must be vectors')
if dl.shape != d.shape or d.shape != du.shape:
raise ValueError(
f'dl={dl.shape}, d={d.shape} and du={du.shape} must all be `[m]`')
if b.ndim != 2:
raise ... | DOC: add missing linalg functionality to docs | tridiagonal_solve | c66f5dda60aa5df7b6aa2f09d3ce88c4249b6f34 | jax | linalg.py | 13 | 39 | https://github.com/google/jax.git | 13 | 200 | 0 | 90 | 379 | Python | {
"docstring": "Computes the solution of a tridiagonal linear system.\n\n This function computes the solution of a tridiagonal linear system:\n\n .. math::\n A . X = B\n\n Args:\n dl: The lower diagonal of A: ``dl[i] := A[i, i-1]`` for i in ``[0,m)``.\n Note that ``dl[0] = 0``.\n d: The middle diagno... | def tridiagonal_solve(dl, d, du, b):
r
if dl.ndim != 1 or d.ndim != 1 or du.ndim != 1:
raise ValueError('dl, d and du must be vectors')
if dl.shape != d.shape or d.shape != du.shape:
raise ValueError(
f'dl={dl.shape}, d={d.shape} and du={du.shape} must all be `[m]`')
if b.ndim != 2:
raise ... | |
39,828 | 166,568 | 289 | pandas/util/_print_versions.py | 72 | 14 | def _get_dependency_info() -> dict[str, JSONSerializable]:
deps = [
"pandas",
# required
"numpy",
"pytz",
"dateutil",
# install / build,
"setuptools",
"pip",
"Cython",
# test
"pytest",
"hypothesis",
# docs
... | fix pandas.show_versions() and remove pin for setuptools (#47096) | _get_dependency_info | 44b660dc4a07f4fb507c31795ae63dca2e6e9440 | pandas | _print_versions.py | 12 | 32 | https://github.com/pandas-dev/pandas.git | 3 | 106 | 0 | 61 | 191 | Python | {
"docstring": "\n Returns dependency information as a JSON serializable dictionary.\n ",
"language": "en",
"n_whitespaces": 15,
"n_words": 8,
"vocab_size": 8
} | def _get_dependency_info() -> dict[str, JSONSerializable]:
deps = [
"pandas",
# required
"numpy",
"pytz",
"dateutil",
# install / build,
"setuptools",
"pip",
"Cython",
# test
"pytest",
"hypothesis",
# docs
... | |
14,318 | 66,758 | 41 | erpnext/patches/v13_0/germany_fill_debtor_creditor_number.py | 60 | 16 | def execute():
company_list = frappe.get_all("Company", filters={"country": "Germany"})
for company in company_list:
party_account_list = frappe.get_all(
"Party Account",
filters={"company": company.name},
fields=["name", "account", "debtor_creditor_number"],
)
for party_account in party_account_lis... | style: format code with black | execute | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | germany_fill_debtor_creditor_number.py | 14 | 18 | https://github.com/frappe/erpnext.git | 6 | 126 | 0 | 46 | 218 | Python | {
"docstring": "Move account number into the new custom field debtor_creditor_number.\n\n\tGerman companies used to use a dedicated payable/receivable account for\n\tevery party to mimick party accounts in the external accounting software\n\t\"DATEV\". This is no longer necessary. The reference ID for DATEV will be\n... | def execute():
company_list = frappe.get_all("Company", filters={"country": "Germany"})
for company in company_list:
party_account_list = frappe.get_all(
"Party Account",
filters={"company": company.name},
fields=["name", "account", "debtor_creditor_number"],
)
for party_account in party_account_lis... | |
77,108 | 262,049 | 78 | TTS/tts/datasets/dataset.py | 21 | 16 | def compute_or_load(self, wav_file):
pitch_file = self.create_pitch_file_path(wav_file, self.cache_path)
if not os.path.exists(pitch_file):
pitch | Refactor TTSDataset ⚡️ | compute_or_load | 176b712c1a40cf630da9a77f1826836723c40fde | TTS | dataset.py | 11 | 7 | https://github.com/coqui-ai/TTS.git | 2 | 64 | 0 | 18 | 102 | Python | {
"docstring": "\n compute pitch and return a numpy array of pitch values\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 9
} | def compute_or_load(self, wav_file):
pitch_file = self.create_pitch_file_path(wav_file, self.cache_path)
if not os.path.exists(pitch_file):
pitch = self._compute_and_save_pitch(self.ap, wav_file, pitch_file)
else:
pitch = np.load(pitch_file)
return pitch.... | |
49,581 | 200,282 | 816 | sympy/testing/runtests.py | 272 | 33 | def check_output(self, want, got, optionflags):
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# TODO parse integers as well ?
# Parse floats and compare them. If some of the par... | runtests.py: Undo auto-formatting, re-add changes to blacklist for scipy, numpy | check_output | 6d2bbf80752549276a968fd4af78231c569d55c5 | sympy | runtests.py | 15 | 36 | https://github.com/sympy/sympy.git | 15 | 276 | 0 | 149 | 459 | Python | {
"docstring": "\n Return True iff the actual output from an example (`got`)\n matches the expected output (`want`). These strings are\n always considered to match if they are identical; but\n depending on what option flags the test runner is using,\n several non-exact match types ... | def check_output(self, want, got, optionflags):
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# TODO parse integers as well ?
# Parse floats and compare them. If some of the par... | |
16,102 | 73,775 | 120 | wagtail/core/models/__init__.py | 24 | 20 | def start(self, workflow_state, user=None):
task_state = self.get_task_state_class()(workflow_state=workflow_state)
task_state.status = TaskState.STATUS_IN_PROGRESS
task_state.page_revision = workflow_state.page.get_latest_revision()
task_state.task = self
task_state.sav... | Reformat with black | start | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | __init__.py | 10 | 12 | https://github.com/wagtail/wagtail.git | 1 | 77 | 0 | 20 | 122 | Python | {
"docstring": "Start this task on the provided workflow state by creating an instance of TaskState",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 14
} | def start(self, workflow_state, user=None):
task_state = self.get_task_state_class()(workflow_state=workflow_state)
task_state.status = TaskState.STATUS_IN_PROGRESS
task_state.page_revision = workflow_state.page.get_latest_revision()
task_state.task = self
task_state.sav... | |
14,424 | 67,084 | 117 | erpnext/regional/germany/utils/datev/datev_csv.py | 155 | 40 | def get_datev_csv(data, filters, csv_class):
empty_df = pd.DataFrame(columns=csv_class.COLUMNS)
data_df = pd.DataFrame.from_records(data)
result = empty_df.append(data_df, sort=True)
if csv_class.DATA_CATEGORY == DataCategory.TRANSACTIONS:
result["Belegdatum"] = pd.to_datetime(result[" | style: format code with black | get_datev_csv | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | datev_csv.py | 13 | 27 | https://github.com/frappe/erpnext.git | 3 | 247 | 0 | 107 | 435 | Python | {
"docstring": "\n\tFill in missing columns and return a CSV in DATEV Format.\n\n\tFor automatic processing, DATEV requires the first line of the CSV file to\n\thold meta data such as the length of account numbers oder the category of\n\tthe data.\n\n\tArguments:\n\tdata -- array of dictionaries\n\tfilters -- dict\n\... | def get_datev_csv(data, filters, csv_class):
empty_df = pd.DataFrame(columns=csv_class.COLUMNS)
data_df = pd.DataFrame.from_records(data)
result = empty_df.append(data_df, sort=True)
if csv_class.DATA_CATEGORY == DataCategory.TRANSACTIONS:
result["Belegdatum"] = pd.to_datetime(result["Belegdatum"])
result["... | |
49,659 | 200,453 | 36 | sympy/stats/random_matrix_models.py | 21 | 11 | def CircularSymplecticEnsemble(sym, dim):
sym, dim = _symbol_converter(sym), _sympify(dim)
model = CircularSymplecticEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, di | Fix various typos
Found via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet` | CircularSymplecticEnsemble | 24f1e7730119fe958cc8e28411f790c9a5ec04eb | sympy | random_matrix_models.py | 9 | 5 | https://github.com/sympy/sympy.git | 1 | 52 | 0 | 18 | 80 | Python | {
"docstring": "\n Represents Circular Symplectic Ensembles.\n\n Examples\n ========\n\n >>> from sympy.stats import CircularSymplecticEnsemble as CSE\n >>> from sympy.stats import joint_eigen_distribution\n >>> C = CSE('S', 1)\n >>> joint_eigen_distribution(C)\n Lambda(t[1], Product(Abs(exp(I... | def CircularSymplecticEnsemble(sym, dim):
sym, dim = _symbol_converter(sym), _sympify(dim)
model = CircularSymplecticEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
| |
7,845 | 43,154 | 35 | airflow/models/taskinstance.py | 12 | 4 | def _executor_config_comparator(x, y):
try:
return x == y
except AttributeError:
return False
| Don't crash scheduler if exec config has old k8s objects (#24117)
From time to time k8s library objects change their attrs. If executor config is stored with old version, and unpickled with new version, we can get attribute errors that can crash the scheduler (see https://github.com/apache/airflow/issues/23727).
H... | _executor_config_comparator | 0c41f437674f135fe7232a368bf9c198b0ecd2f0 | airflow | taskinstance.py | 8 | 5 | https://github.com/apache/airflow.git | 2 | 19 | 0 | 11 | 33 | Python | {
"docstring": "\n The TaskInstance.executor_config attribute is a pickled object that may contain\n kubernetes objects. If the installed library version has changed since the\n object was originally pickled, due to the underlying ``__eq__`` method on these\n objects (which converts them to JSON), we may... | def _executor_config_comparator(x, y):
try:
return x == y
except AttributeError:
return False
| |
87,761 | 288,605 | 370 | tests/util/test_color.py | 112 | 3 | def test_color_temperature_to_rgbww():
# Coldest color temperature -> only cold channel enabled
assert color_util.color_temperature_to_rgbww(6535, 255, 2000, 6535) == (
0,
0,
0,
255,
0,
)
assert color_util.color_temperatu | Use Kelvin as the preferred color temperature unit (#79591)
* Use Kelvin as the preferred white temperature unit
* Update homekit
* Adjust tests | test_color_temperature_to_rgbww | 47d0598e75487f63901931875f69f802a477df13 | core | test_color.py | 8 | 43 | https://github.com/home-assistant/core.git | 1 | 161 | 0 | 34 | 207 | Python | {
"docstring": "Test color temp to warm, cold conversion.\n\n Temperature values must be in mireds\n Home Assistant uses rgbcw for rgbww\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 19,
"vocab_size": 19
} | def test_color_temperature_to_rgbww():
# Coldest color temperature -> only cold channel enabled
assert color_util.color_temperature_to_rgbww(6535, 255, 2000, 6535) == (
0,
0,
0,
255,
0,
)
assert color_util.color_temperature_to_rgbww(6535, 128, 2000, 6535) == ... | |
26,330 | 118,626 | 176 | lib/tests/streamlit/report_context_test.py | 33 | 20 | def test_set_page_config_first(self):
fake_enqueue = lambda msg: None
| Rename and refactor `Report` machinery (#4141)
This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app". | test_set_page_config_first | 704eab3478cf69847825b23dabf15813a8ac9fa2 | streamlit | report_context_test.py | 10 | 17 | https://github.com/streamlit/streamlit.git | 1 | 84 | 0 | 26 | 148 | Python | {
"docstring": "st.set_page_config must be called before other st commands\n when the script has been marked as started",
"language": "en",
"n_whitespaces": 22,
"n_words": 16,
"vocab_size": 16
} | def test_set_page_config_first(self):
fake_enqueue = lambda msg: None
ctx = ScriptRunContext(
"TestSessionID",
fake_enqueue,
"",
SessionState(),
UploadedFileManager(),
)
ctx.on_script_start()
markdown_msg = F... | |
21,290 | 101,909 | 150 | lib/gui/display_command.py | 38 | 15 | def set_vars(self) -> None:
tk_vars = super().set_vars()
smoothgraph = tk.DoubleVar()
smoothgraph.set(0.900)
tk_vars["smoothg | Typing - lib.gui.display_command | set_vars | dab823a3eb7a5257cb1e0818ee10ed234d3de97f | faceswap | display_command.py | 10 | 25 | https://github.com/deepfakes/faceswap.git | 1 | 103 | 0 | 24 | 177 | Python | {
"docstring": " Add graphing specific variables to the default variables.\n\n Overrides original method.\n\n Returns\n -------\n dict\n The variable names with their corresponding tkinter variable\n ",
"language": "en",
"n_whitespaces": 69,
"n_words": 22,
"vocab_... | def set_vars(self) -> None:
tk_vars = super().set_vars()
smoothgraph = tk.DoubleVar()
smoothgraph.set(0.900)
tk_vars["smoothgraph"] = smoothgraph
raw_var = tk.BooleanVar()
raw_var.set(True)
tk_vars["raw_data"] = raw_var
smooth_var = tk.BooleanV... | |
11,975 | 60,026 | 226 | src/prefect/infrastructure/kubernetes.py | 82 | 9 | def _configure_kubernetes_library_client(self) -> None:
# TODO: Investigate returning a configured client so calls on other threads
# will not invalidate the config needed here
# if a k8s cluster block is provided to the flow runner, use that
if self.cluster_config:
... | Use cluster uid and namespace instead of cluster "name" for Kubernetes job identifiers (#7747)
Co-authored-by: peytonrunyan <peytonrunyan@gmail.com>
Co-authored-by: Peyton <44583861+peytonrunyan@users.noreply.github.com> | _configure_kubernetes_library_client | 0c9ee0876133bde14ce070a89557fc31cd905bac | prefect | kubernetes.py | 14 | 14 | https://github.com/PrefectHQ/prefect.git | 3 | 45 | 0 | 62 | 85 | Python | {
"docstring": "\n Set the correct kubernetes client configuration.\n\n WARNING: This action is not threadsafe and may override the configuration\n specified by another `KubernetesJob` instance.\n ",
"language": "en",
"n_whitespaces": 61,
"n_words": 22,
"vocab_size": 21
} | def _configure_kubernetes_library_client(self) -> None:
# TODO: Investigate returning a configured client so calls on other threads
# will not invalidate the config needed here
# if a k8s cluster block is provided to the flow runner, use that
if self.cluster_config:
... | |
19,261 | 95,966 | 657 | tests/sentry/api/endpoints/test_project_rules.py | 116 | 34 | def test_runs_alert_rule_action_creator(self, mock_alert_rule_action_creator):
self.login_as(user=self.user)
project = self.create_project()
self.create_sentry_app(
name="Pied Piper",
organization=project.organization,
schema={"elements": [self.crea... | feat(alert-rule-action): New data structure for alert-rule-action settings (#31444)
Objective:
Originally the issue was with serializing the settings field for alert webhooks and fighting with the serializers. Instead we decided to convert the dictionary to an array of dictionaries with keys name and value. | test_runs_alert_rule_action_creator | 3c8b4477340a7fe276c57c9b598c161b309c4fbd | sentry | test_project_rules.py | 15 | 51 | https://github.com/getsentry/sentry.git | 1 | 291 | 0 | 91 | 513 | Python | {
"docstring": "\n Ensures that Sentry Apps with schema forms (UI components)\n receive a payload when an alert rule is created with them.\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 20,
"vocab_size": 19
} | def test_runs_alert_rule_action_creator(self, mock_alert_rule_action_creator):
self.login_as(user=self.user)
project = self.create_project()
self.create_sentry_app(
name="Pied Piper",
organization=project.organization,
schema={"elements": [self.crea... | |
115,587 | 317,011 | 987 | homeassistant/components/icloud/account.py | 199 | 43 | def _determine_interval(self) -> int:
intervals = {"default": self._max_interval}
for device in self._devices.values():
# Max interval if no location
if device.location is None:
continue
current_zone = run_callback_threadsafe(
... | Remove icloud from mypy ignore list (#75007) | _determine_interval | 6ac05784a63f7490f875959139ef903034bc45b0 | core | account.py | 16 | 52 | https://github.com/home-assistant/core.git | 13 | 290 | 0 | 120 | 454 | Python | {
"docstring": "Calculate new interval between two API fetch (in minutes).",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def _determine_interval(self) -> int:
intervals = {"default": self._max_interval}
for device in self._devices.values():
# Max interval if no location
if device.location is None:
continue
current_zone = run_callback_threadsafe(
... | |
4,164 | 22,084 | 93 | pipenv/patched/pip/_vendor/requests/models.py | 25 | 11 | def prepare_cookies(self, cookies):
if isinstance(cookies, cookielib.CookieJar | Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir. | prepare_cookies | cd5a9683be69c86c8f3adcd13385a9bc5db198ec | pipenv | models.py | 11 | 8 | https://github.com/pypa/pipenv.git | 3 | 57 | 0 | 18 | 93 | Python | {
"docstring": "Prepares the given HTTP cookie data.\n\n This function eventually generates a ``Cookie`` header from the\n given cookies using cookielib. Due to cookielib's design, the header\n will not be regenerated if it already exists, meaning this function\n can only be called once fo... | def prepare_cookies(self, cookies):
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
s... | |
7,355 | 40,216 | 88 | dash/testing/browser.py | 23 | 11 | def wait_for_contains_text(self, selector, text, timeout=None):
return self._wait_for(
method=contains_text,
args=(selector, text),
timeout=timeout,
msg=f"text -> {text} not found inside element within {timeout or self._wait_timeout}s",
)
| f-strings everywhere! fffff | wait_for_contains_text | c3c84b9ecf16bcc61ed80ec39d511af92fe07f2c | dash | browser.py | 12 | 7 | https://github.com/plotly/dash.git | 1 | 41 | 0 | 23 | 73 | Python | {
"docstring": "Explicit wait until the element's text contains the expected `text`.\n\n timeout if not set, equals to the fixture's `wait_timeout`\n shortcut to `WebDriverWait` with customized `contains_text`\n condition.\n ",
"language": "en",
"n_whitespaces": 54,
"n_words": 26,
... | def wait_for_contains_text(self, selector, text, timeout=None):
return self._wait_for(
method=contains_text,
args=(selector, text),
timeout=timeout,
msg=f"text -> {text} not found inside element within {timeout or self._wait_timeout}s",
)
| |
49,035 | 198,681 | 99 | sympy/physics/continuum_mechanics/truss.py | 23 | 10 | def remove_member(self, label):
if label not in list(self._members):
raise ValueError("No such member exists in the Truss")
else:
| default values for supports and loads removed along with other changes | remove_member | 73b2975a89b45ef437f11b697d39796f755a856b | sympy | truss.py | 16 | 8 | https://github.com/sympy/sympy.git | 2 | 104 | 0 | 22 | 162 | Python | {
"docstring": "\n This method removes a member from the given truss.\n\n Parameters\n ==========\n label: String or Symbol\n The label for the member to be removed.\n\n Examples\n ========\n\n >>> from sympy.physics.continuum_mechanics.truss import Truss\n ... | def remove_member(self, label):
if label not in list(self._members):
raise ValueError("No such member exists in the Truss")
else:
self._nodes_occupied.pop(tuple([self._members[label][0], self._members[label][1]]))
self._nodes_occupied.pop(tuple([self._member... | |
26,306 | 118,583 | 84 | lib/tests/server_test_case.py | 24 | 15 | def _create_mock_app_session(*args, **kwargs):
mock_id = mock.PropertyMock(
return_value="mock_id:%s" % ServerTestCase._next_session_id
)
ServerTestCase._next_session_id += 1
mock_session = mock.MagicMock(AppSession, autospec=True, *args, **kwargs)
type(mock... | Rename and refactor `Report` machinery (#4141)
This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app". | _create_mock_app_session | 704eab3478cf69847825b23dabf15813a8ac9fa2 | streamlit | server_test_case.py | 11 | 8 | https://github.com/streamlit/streamlit.git | 1 | 57 | 0 | 19 | 93 | Python | {
"docstring": "Create a mock AppSession. Each mocked instance will have\n its own unique ID.",
"language": "en",
"n_whitespaces": 19,
"n_words": 13,
"vocab_size": 13
} | def _create_mock_app_session(*args, **kwargs):
mock_id = mock.PropertyMock(
return_value="mock_id:%s" % ServerTestCase._next_session_id
)
ServerTestCase._next_session_id += 1
mock_session = mock.MagicMock(AppSession, autospec=True, *args, **kwargs)
type(mock... | |
7,605 | 42,543 | 34 | nltk/parse/util.py | 17 | 4 | def taggedsents_to_conll(sentences):
for sentence in sentences:
yield from taggedsent_to_conll(sentence)
yield "\n\n"
############################################################# | Docstring tests (#3050)
* fixed pytests
* fixed more pytests
* fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py
* fixed pytests (mainly multiline or rounding issues)
* fixed treebank pytests, removed test for return_string=True (deprecated)
* fixed destructive.py... | taggedsents_to_conll | 8a4cf5d94eb94b6427c5d1d7907ba07b119932c5 | nltk | util.py | 10 | 4 | https://github.com/nltk/nltk.git | 2 | 19 | 0 | 15 | 41 | Python | {
"docstring": "\n A module to convert the a POS tagged document stream\n (i.e. list of list of tuples, a list of sentences) and yield lines\n in CONLL format. This module yields one line per word and two newlines\n for end of sentence.\n\n >>> from nltk import word_tokenize, sent_tokenize, pos_tag\n ... | def taggedsents_to_conll(sentences):
for sentence in sentences:
yield from taggedsent_to_conll(sentence)
yield "\n\n"
######################################################################
# { Test Suites
######################################################################
| |
19,953 | 100,480 | 244 | plugins/train/model/phaze_a.py | 82 | 18 | def _get_input_shape(self):
arch = self.config["enc_architecture"]
enforce_size = _MODEL_MAPPING[arch].get("enforce_for_weig | Phaze-A: Add MobileNetV3 encoder | _get_input_shape | 0189029dbaad486e623353ee4a8451af8c85f4e4 | faceswap | phaze_a.py | 17 | 16 | https://github.com/deepfakes/faceswap.git | 4 | 139 | 0 | 60 | 232 | Python | {
"docstring": " Obtain the input shape for the model.\n\n Input shape is calculated from the selected Encoder's input size, scaled to the user\n selected Input Scaling, rounded down to the nearest 16 pixels.\n\n Notes\n -----\n Some models (NasNet) require the input size to be of a... | def _get_input_shape(self):
arch = self.config["enc_architecture"]
enforce_size = _MODEL_MAPPING[arch].get("enforce_for_weights", False)
default_size = _MODEL_MAPPING[arch]["default_size"]
scaling = self.config["enc_scaling"] / 100
min_size = _MODEL_MAPPING[arch].get("m... | |
16,543 | 76,578 | 572 | wagtail/admin/panels.py | 148 | 12 | def get_form_options(self):
options = {}
if not getattr(self.widget_overrides, "is_original_method", False):
warn(
"The `widget_overrides` method (on %r) is deprecated; | Introduce a get_form_options method to combine widget_overrides / required_fields / required_formsets / field_permissions | get_form_options | ae79eb4cb29b84bb8379fcf0957e6837164c5933 | wagtail | panels.py | 12 | 35 | https://github.com/wagtail/wagtail.git | 5 | 168 | 0 | 60 | 300 | Python | {
"docstring": "\n Return a dictionary of attributes such as 'fields', 'formsets' and 'widgets'\n which should be incorporated into the form class definition to generate a form\n that this EditHandler can use.\n This will only be called after binding to a model (i.e. self.model is availabl... | def get_form_options(self):
options = {}
if not getattr(self.widget_overrides, "is_original_method", False):
warn(
"The `widget_overrides` method (on %r) is deprecated; "
"these should be returned from `get_form_options` as a "
"`widg... | |
12,267 | 60,730 | 101 | .venv/lib/python3.8/site-packages/pip/_internal/index/package_finder.py | 29 | 10 | def evaluate_links(self, link_evaluator, links):
# type: (LinkEvaluator, Iterable[Link]) -> List[InstallationCandidate]
candidates = []
for link in self._sort_links(links):
candidate = self.get_install_candidate(link_evaluator, | upd; format | evaluate_links | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | package_finder.py | 11 | 7 | https://github.com/jindongwang/transferlearning.git | 3 | 48 | 0 | 26 | 77 | Python | {
"docstring": "\n Convert links that are candidates to InstallationCandidate objects.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 8,
"vocab_size": 8
} | def evaluate_links(self, link_evaluator, links):
# type: (LinkEvaluator, Iterable[Link]) -> List[InstallationCandidate]
candidates = []
for link in self._sort_links(links):
candidate = self.get_install_candidate(link_evaluator, link)
if candidate is not None:
... | |
35,855 | 154,199 | 70 | modin/core/storage_formats/base/query_compiler.py | 20 | 7 | def columnarize(self):
| REFACTOR-#4796: Introduce constant for __reduced__ column name (#4799)
Co-authored-by: Mahesh Vashishtha <mvashishtha@users.noreply.github.com>
Co-authored-by: Alexey Prutskov <lehaprutskov@gmail.com>
Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru>
Signed-off-by: Jonathan Shi <jhshi@ponder.io> | columnarize | 3f985ed6864cc1b5b587094d75ca5b2695e4139f | modin | query_compiler.py | 12 | 6 | https://github.com/modin-project/modin.git | 4 | 44 | 0 | 17 | 72 | Python | {
"docstring": "\n Transpose this QueryCompiler if it has a single row but multiple columns.\n\n This method should be called for QueryCompilers representing a Series object,\n i.e. ``self.is_series_like()`` should be True.\n\n Returns\n -------\n BaseQueryCompiler\n ... | def columnarize(self):
if len(self.columns) != 1 or (
len(self.index) == 1 and self.index[0] == MODIN_UNNAMED_SERIES_LABEL
):
return self.transpose()
return self
| |
12,305 | 60,858 | 34 | .venv/lib/python3.8/site-packages/pip/_internal/models/wheel.py | 13 | 6 | def get_formatted_file_tags(self):
# type: () -> List[str]
return sorted(str(tag) for tag in self. | upd; format | get_formatted_file_tags | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | wheel.py | 9 | 2 | https://github.com/jindongwang/transferlearning.git | 2 | 20 | 0 | 13 | 35 | Python | {
"docstring": "Return the wheel's tags as a sorted list of strings.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def get_formatted_file_tags(self):
# type: () -> List[str]
return sorted(str(tag) for tag in self.file_tags)
| |
39,090 | 161,892 | 555 | tests/test_syntax.py | 85 | 18 | def test_python_render():
syntax = Panel.fit(
Syntax(
CODE,
lexer="python",
line_numbers=True,
line_range=(2, 10),
theme="monokai",
code_width=60,
word_wrap=True,
),
padding=0,
)
rendered_syntax = ren... | fix for syntax measure | test_python_render | ac69488768e9c54cdef26e45b26a1b42ebf2f5d3 | rich | test_syntax.py | 12 | 17 | https://github.com/Textualize/rich.git | 1 | 69 | 0 | 57 | 289 | Python | {
"docstring": "Iterate and generate a tuple with a flag for first \\x1b[0m\\x1b[48;2;39;40;34m \\x1b[0m│\\n│\\x1b[48;2;39;40;34m \\x1b[0m\\x1b[38;2;230;219;116;48;2;39;40;34mand last value.",
"language": "en",
"n_whitespaces": 19,
"n_words": 15,
"vocab_size": 14
} | def test_python_render():
syntax = Panel.fit(
Syntax(
CODE,
lexer="python",
line_numbers=True,
line_range=(2, 10),
theme="monokai",
code_width=60,
word_wrap=True,
),
padding=0,
)
rendered_syntax = ren... | |
102,133 | 303,311 | 151 | tests/components/recorder/test_history.py | 73 | 22 | def test_state_changes_during_period_multiple_entities_single_test(hass_recorder):
hass = hass_recorder()
start = dt_util.utcnow()
test_entites = {f"sensor.{i}": str(i) for i in range(30)}
for entity_id, value in test_entites.items():
hass.states.set(entity_id, value)
wait_recording_do... | Fix state_changes_during_period history query when no entities are passed (#73139) | test_state_changes_during_period_multiple_entities_single_test | de2e9b6d77adb7f86c6ec4aa0a50428ec8606dc3 | core | test_history.py | 11 | 18 | https://github.com/home-assistant/core.git | 6 | 183 | 0 | 32 | 284 | Python | {
"docstring": "Test state change during period with multiple entities in the same test.\n\n This test ensures the sqlalchemy query cache does not\n generate incorrect results.\n ",
"language": "en",
"n_whitespaces": 33,
"n_words": 24,
"vocab_size": 23
} | def test_state_changes_during_period_multiple_entities_single_test(hass_recorder):
hass = hass_recorder()
start = dt_util.utcnow()
test_entites = {f"sensor.{i}": str(i) for i in range(30)}
for entity_id, value in test_entites.items():
hass.states.set(entity_id, value)
wait_recording_do... | |
51,586 | 206,612 | 91 | django/utils/dateformat.py | 37 | 7 | def O(self): # NOQA: E743, E741
if self._no_timezone_or_datetime_is_ambiguous_or_imaginary:
return ""
seconds = sel | Refs #33476 -- Reformatted code with Black. | O | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | dateformat.py | 10 | 7 | https://github.com/django/django.git | 3 | 43 | 0 | 27 | 93 | Python | {
"docstring": "\n Difference to Greenwich time in hours; e.g. '+0200', '-0430'.\n\n If timezone information is not available, return an empty string.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 19
} | def O(self): # NOQA: E743, E741
if self._no_timezone_or_datetime_is_ambiguous_or_imaginary:
return ""
seconds = self.Z()
sign = "-" if seconds < 0 else "+"
seconds = abs(seconds)
return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
| |
17,087 | 80,656 | 337 | awx/main/utils/common.py | 155 | 16 | def convert_mem_str_to_bytes(mem_str):
# If there is no suffix, the memory sourced from the request is in bytes
if mem_str.isdigit():
return int(mem_str)
conversions = {
'Ei': lambda x: x * 2**60,
'E': lambda x: x * 1 | Fixup conversion of memory and cpu settings to support k8s resource request format (#11725)
fix memory and cpu settings to suport k8s resource request format
* fix conversion of memory setting to bytes
This setting has not been getting set by default, and needed some fixing
up to be compatible with setting the ... | convert_mem_str_to_bytes | 799968460d4794bcd9959f57a2b97846b9a00bb7 | awx | common.py | 14 | 29 | https://github.com/ansible/awx.git | 6 | 234 | 0 | 86 | 400 | Python | {
"docstring": "Convert string with suffix indicating units to memory in bytes (base 2)\n\n Useful for dealing with memory setting that may be expressed in units compatible with\n kubernetes.\n\n See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory\n ",
"l... | def convert_mem_str_to_bytes(mem_str):
# If there is no suffix, the memory sourced from the request is in bytes
if mem_str.isdigit():
return int(mem_str)
conversions = {
'Ei': lambda x: x * 2**60,
'E': lambda x: x * 10**18,
'Pi': lambda x: x * 2**50,
'P': lambda... | |
19,015 | 93,732 | 525 | src/sentry/integrations/jira_server/integration.py | 103 | 37 | def sync_status_outbound(self, external_issue, is_resolved, project_id, **kwargs):
client = self.get_client()
jira_issue = client.get_issue(external_issue.key)
jira_project = jira_issue["fields"]["project"]
try:
external_project = IntegrationExternalProject.objects.... | ref(Jira): Split Jira Cloud and Jira Server (#37034)
* Split Jira Cloud and Jira Server | sync_status_outbound | 2fbf550ec05c8501cbc9eca62e73526e717dcbdf | sentry | integration.py | 17 | 36 | https://github.com/getsentry/sentry.git | 8 | 213 | 0 | 81 | 352 | Python | {
"docstring": "\n Propagate a sentry issue's status to a linked issue's status.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 8
} | def sync_status_outbound(self, external_issue, is_resolved, project_id, **kwargs):
client = self.get_client()
jira_issue = client.get_issue(external_issue.key)
jira_project = jira_issue["fields"]["project"]
try:
external_project = IntegrationExternalProject.objects.... | |
23,225 | 108,514 | 166 | lib/matplotlib/axes/_base.py | 44 | 17 | def _sci(self, im):
_api.check_isinstance(
(mpl.contour.ContourSet, mcoll.Collection, mimage.AxesImage),
im=im)
if isinstance(im, mpl.contour.ContourSet):
if im.collections[0] not in self._children:
raise ValueError("ContourSet must be in curr... | Cleanup documentation generation for pyplot
- remove the awkward `pyplot.plotting()` function, which only served
as a namespace to take up the docs for pyplot and output them via
`.. autofunction`
- Instead generate the same information using `.. autosummary::`. We
have to list the desired methods here explicitl... | _sci | 032316bc6c7798fca6c82de24167c975f237687f | matplotlib | _base.py | 12 | 11 | https://github.com/matplotlib/matplotlib.git | 4 | 81 | 0 | 33 | 130 | Python | {
"docstring": "\n Set the current image.\n\n This image will be the target of colormap functions like\n ``pyplot.viridis``, and other functions such as `~.pyplot.clim`. The\n current image is an attribute of the current Axes.\n ",
"language": "en",
"n_whitespaces": 68,
"n_wo... | def _sci(self, im):
_api.check_isinstance(
(mpl.contour.ContourSet, mcoll.Collection, mimage.AxesImage),
im=im)
if isinstance(im, mpl.contour.ContourSet):
if im.collections[0] not in self._children:
raise ValueError("ContourSet must be in curr... | |
36,665 | 156,512 | 26 | dask/typing.py | 12 | 5 | def __dask_postpersist__(self) -> tuple[PostPersistCallable, tuple]:
raise NotImplementedError("Inheriting c | Collection Protocol (#8674)
[PEP 544](https://www.python.org/dev/peps/pep-0544/) introduces the `Protocol` class to the `typing` module in Python 3.8 (the soon be the minimum supported version, https://github.com/dask/community/issues/213). Writing new Dask collections for [dask-awkward](https://github.com/ContinuumIO... | __dask_postpersist__ | 1e783d9a714160e968936cb22d54d085959ab09e | dask | typing.py | 8 | 21 | https://github.com/dask/dask.git | 1 | 18 | 0 | 12 | 32 | Python | {
"docstring": "Rebuilder function and optional arguments to contruct a persisted collection.\n\n Returns\n -------\n PostPersistCallable\n Callable that rebuilds the collection. The signature\n should be\n ``rebuild(dsk: Mapping, *args: Any, rename: Mapping[str, ... | def __dask_postpersist__(self) -> tuple[PostPersistCallable, tuple]:
raise NotImplementedError("Inheriting class must implement this method.")
| |
13,627 | 64,407 | 21 | erpnext/patches/v4_2/repost_reserved_qty.py | 32 | 11 | def execute():
for doctype in ("Sales Order Item", "Bin | fix: avoid creating bins without item-wh
Co-Authored-By: Shadrak Gurupnor <30501401+shadrak98@users.noreply.github.com>
Co-Authored-By: Saurabh <saurabh6790@gmail.com> | execute | c36bd7e1a6fe48c5fff4765e843571a0d6560dd1 | erpnext | repost_reserved_qty.py | 13 | 30 | https://github.com/frappe/erpnext.git | 5 | 70 | 0 | 29 | 118 | Python | {
"docstring": "\n\t\tselect\n\t\t\tdistinct item_code, warehouse\n\t\tfrom\n\t\t\t(\n\t\t\t\t(\n\t\t\t\t\tselect distinct item_code, warehouse\n\t\t\t\t\t\t\t\tfrom `tabSales Order Item` where docstatus=1\n\t\t\t\t) UNION (\n\t\t\t\t\tselect distinct item_code, warehouse\n\t\t\t\t\tfrom `tabPacked Item` where docsta... | def execute():
for doctype in ("Sales Order Item", "Bin"):
frappe.reload_doctype(doctype)
repost_for = frappe.db.sql()
for item_code, warehouse in repost_for:
if not (item_code and warehouse):
continue
update_bin_qty(item_code, warehouse, {
"reserved_qty": get_reserved_qty(item_code, warehouse)
})
... | |
5,291 | 30,043 | 85 | saleor/account/migrations/0071_group.py | 26 | 12 | def rename_group_tables_reverse(apps, schema_editor):
Group = apps.get_model("auth", "Group")
schema_editor.alter_db_table(
Group,
"account_group",
"auth_group",
)
PermissionGroup = Group.permissions.through
schema_editor.alter_db_table(
PermissionGroup,
"acco... | Drop Djanog Auth | rename_group_tables_reverse | 72c120ae8eeb34e5a3f9840fb1ab1de1fca52fb5 | saleor | 0071_group.py | 9 | 13 | https://github.com/saleor/saleor.git | 1 | 46 | 0 | 20 | 100 | Python | {
"docstring": "\nALTER TABLE account_group RENAME CONSTRAINT account_group_pkey\n TO auth_group_pkey;\n\nALTER TABLE account_group RENAME CONSTRAINT account_group_name_key\n TO auth_group_name_key;\n\nALTER INDEX IF EXISTS account_group_name_034e9f3f_like\n RENAME TO auth_group_name_a6ea08ec_like;\n\nALTER ... | def rename_group_tables_reverse(apps, schema_editor):
Group = apps.get_model("auth", "Group")
schema_editor.alter_db_table(
Group,
"account_group",
"auth_group",
)
PermissionGroup = Group.permissions.through
schema_editor.alter_db_table(
PermissionGroup,
"acco... | |
49,897 | 201,197 | 29 | tests/auth_tests/test_context_processors.py | 8 | 6 | def test_session_is_accessed(self):
| Refs #33476 -- Reformatted code with Black. | test_session_is_accessed | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | test_context_processors.py | 9 | 3 | https://github.com/django/django.git | 1 | 24 | 0 | 8 | 45 | Python | {
"docstring": "\n The session is accessed if the auth context processor\n is used and relevant attributes accessed.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 15,
"vocab_size": 14
} | def test_session_is_accessed(self):
response = self.client.get("/auth_processor_attr_access/")
self.assertContains(response, "Session accessed")
| |
56,628 | 222,539 | 206 | python3.10.4/Lib/distutils/_msvccompiler.py | 71 | 17 | def _find_vc2017():
root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles")
if not root:
return None, None
try:
path = subprocess.check_output([
os.path.join(root, "Micro | add python 3.10.4 for windows | _find_vc2017 | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | _msvccompiler.py | 16 | 19 | https://github.com/XX-net/XX-Net.git | 5 | 135 | 0 | 55 | 275 | Python | {
"docstring": "Returns \"15, path\" based on the result of invoking vswhere.exe\n If no install is found, returns \"None, None\"\n\n The version is returned to avoid unnecessarily changing the function\n result. It may be ignored when the path is not None.\n\n If vswhere.exe is not available, by definiti... | def _find_vc2017():
root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles")
if not root:
return None, None
try:
path = subprocess.check_output([
os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"),
"-latest",
... | |
81,493 | 275,868 | 244 | keras/saving/hdf5_format.py | 127 | 21 | def save_attributes_to_hdf5_group(group, name, data):
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT]
# Expectin... | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | save_attributes_to_hdf5_group | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | hdf5_format.py | 13 | 18 | https://github.com/keras-team/keras.git | 8 | 123 | 0 | 88 | 208 | Python | {
"docstring": "Saves attributes (data) of the specified name into the HDF5 group.\n\n This method deals with an inherent problem of HDF5 file which is not\n able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes.\n\n Args:\n group: A pointer to a HDF5 group.\n name: A name of the attrib... | def save_attributes_to_hdf5_group(group, name, data):
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT]
# Expectin... | |
52,639 | 209,197 | 742 | scapy/layers/tls/record.py | 165 | 34 | def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt is not None:
plen = len(_pkt)
if plen >= 2:
byte0, byte1 = struct.unpack("BB", _pkt[:2])
s = kargs.get("tls_ | Update the TLS13 notebook to spec | dispatch_hook | c96fbb8487051e209dfee788eff857e9ca1fed72 | scapy | record.py | 19 | 25 | https://github.com/secdev/scapy.git | 18 | 192 | 0 | 107 | 302 | Python | {
"docstring": "\n If the TLS class was called on raw SSLv2 data, we want to return an\n SSLv2 record instance. We acknowledge the risk of SSLv2 packets with a\n msglen of 0x1403, 0x1503, 0x1603 or 0x1703 which will never be casted\n as SSLv2 records but TLS ones instead, but hey, we can't... | def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt is not None:
plen = len(_pkt)
if plen >= 2:
byte0, byte1 = struct.unpack("BB", _pkt[:2])
s = kargs.get("tls_session", None)
if byte0 not in _tls_type or byte1 != 3: # Unkn... | |
14,164 | 66,292 | 12 | erpnext/hr/utils.py | 20 | 9 | def get_leave_period(from_date, to_date, company):
leave_period = frappe.db.sql(
,
{"from_date": from_date, "to_date": to_date, "company": company},
as_dict=1,
) | style: format code with black | get_leave_period | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | utils.py | 11 | 15 | https://github.com/frappe/erpnext.git | 2 | 43 | 0 | 18 | 69 | Python | {
"docstring": "\n\t\tselect name, from_date, to_date\n\t\tfrom `tabLeave Period`\n\t\twhere company=%(company)s and is_active=1\n\t\t\tand (from_date between %(from_date)s and %(to_date)s\n\t\t\t\tor to_date between %(from_date)s and %(to_date)s\n\t\t\t\tor (from_date < %(from_date)s and to_date > %(to_date)s))\n\t"... | def get_leave_period(from_date, to_date, company):
leave_period = frappe.db.sql(
,
{"from_date": from_date, "to_date": to_date, "company": company},
as_dict=1,
)
if leave_period:
return leave_period
| |
33,065 | 143,826 | 136 | rllib/policy/sample_batch.py | 49 | 22 | def rows(self) -> Iterator[Dict[str, TensorType]]:
# Do we add seq_lens=[1] to each row?
seq_lens = None if self.get(SampleBatch.SEQ_LENS) is None else np.array([1])
self_as_dict = {k: v for k, v in self.items()}
for i in range(self.count):
yield tree.map_structur... | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | rows | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | sample_batch.py | 14 | 27 | https://github.com/ray-project/ray.git | 5 | 95 | 0 | 42 | 144 | Python | {
"docstring": "Returns an iterator over data rows, i.e. dicts with column values.\n\n Note that if `seq_lens` is set in self, we set it to [1] in the rows.\n\n Yields:\n The column values of the row in this iteration.\n\n Examples:\n >>> batch = SampleBatch({\n .... | def rows(self) -> Iterator[Dict[str, TensorType]]:
# Do we add seq_lens=[1] to each row?
seq_lens = None if self.get(SampleBatch.SEQ_LENS) is None else np.array([1])
self_as_dict = {k: v for k, v in self.items()}
for i in range(self.count):
yield tree.map_structur... | |
80,539 | 270,715 | 125 | keras/engine/base_layer.py | 38 | 10 | def _dedup_weights(self, weights):
output, seen_ids = [], set()
for w in weights:
if id(w) not in seen_ids:
output.append(w)
# Track the Variable's identity to avoid __eq__ issues.
seen_ids.add(id(w))
return output
# Save... | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _dedup_weights | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | base_layer.py | 13 | 7 | https://github.com/keras-team/keras.git | 3 | 49 | 0 | 35 | 83 | Python | {
"docstring": "Dedupe weights while maintaining order as much as possible.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 8
} | def _dedup_weights(self, weights):
output, seen_ids = [], set()
for w in weights:
if id(w) not in seen_ids:
output.append(w)
# Track the Variable's identity to avoid __eq__ issues.
seen_ids.add(id(w))
return output
# Save... | |
45,126 | 185,717 | 95 | src/textual/dom.py | 31 | 11 | def ancestors_with_self(self) -> list[DOMNode]:
nodes: list[MessagePump | None] = []
add_node = nodes.append
node: MessagePump | None = self
while node is not None:
| Don't include self in DOMNode.ancestors any more
As well as dropping `self` from the list that DOMNode.ancestors provides,
this commit also adds DOMNode.ancestors_with_self, which maintains the
previous behaviour of DOMNode.ancestors. | ancestors_with_self | e3130f95c69648916f121e779a325b6f6f87e6ba | textual | dom.py | 9 | 12 | https://github.com/Textualize/textual.git | 2 | 56 | 0 | 26 | 92 | Python | {
"docstring": "list[DOMNode]: A list of Nodes by tracing a path all the way back to App.\n\n Note: This is inclusive of ``self``.\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 21,
"vocab_size": 20
} | def ancestors_with_self(self) -> list[DOMNode]:
nodes: list[MessagePump | None] = []
add_node = nodes.append
node: MessagePump | None = self
while node is not None:
add_node(node)
node = node._parent
return cast("list[DOMNode]", nodes)
| |
18,067 | 85,981 | 14,028 | src/sentry/search/events/datasets/metrics.py | 747 | 53 | def function_converter(self) -> Mapping[str, fields.MetricsFunction]:
resolve_metric_id = {
"name": "metric_id",
"fn": lambda args: self.resolve_metric(args["column"]),
}
function_converter = {
function.name: function
for function in [
... | fix(mep): Include the column so its countmerge (#39005)
- This was causing these results to overcount since we werent merging
rows correctly. For the purposes of the endpoint we just needed >0 so it
wasn't as noticeable | function_converter | 0099fe517a2044e70567e969f19bcf3fa3b26122 | sentry | metrics.py | 28 | 548 | https://github.com/getsentry/sentry.git | 6 | 2,133 | 0 | 202 | 3,312 | Python | {
"docstring": "While the final functions in clickhouse must have their -Merge combinators in order to function, we don't\n need to add them here since snuba has a FunctionMapper that will add it for us. Basically it turns expressions\n like quantiles(0.9)(value) into quantilesMerge(0.9)(percentiles)\n ... | def function_converter(self) -> Mapping[str, fields.MetricsFunction]:
resolve_metric_id = {
"name": "metric_id",
"fn": lambda args: self.resolve_metric(args["column"]),
}
function_converter = {
function.name: function
for function in [
... | |
42,682 | 178,391 | 2,042 | nuitka/freezer/Standalone.py | 477 | 66 | def copyUsedDLLs(source_dir, dist_dir, standalone_entry_points):
# This is terribly complex, because we check the list of used DLLs
# trying to avoid duplicates, and detecting errors with them not
# being binary identical, so we can report them. And then of course
# we also need to handle OS specifics.
... | UI: In case of PermissionError, allow uses to retry
* Esp. on Windows it happens a lot that running programs cannot be
updated by Nuitka, this avoids the cryptic error somewhere ranomly. | copyUsedDLLs | 2c20b90946a8aa5ad4ee39ad365ff1b83f182770 | Nuitka | Standalone.py | 19 | 125 | https://github.com/Nuitka/Nuitka.git | 34 | 660 | 0 | 262 | 1,077 | Python | {
"docstring": "Colliding DLL names for %s, checking identity of \\\n'%s' <-> '%s'.\\\nIgnoring non-identical DLLs for '%s'.\n%s used by:\n %s\ndifferent from\n%s used by\n %s",
"language": "en",
"n_whitespaces": 25,
"n_words": 27,
"vocab_size": 22
} | def copyUsedDLLs(source_dir, dist_dir, standalone_entry_points):
# This is terribly complex, because we check the list of used DLLs
# trying to avoid duplicates, and detecting errors with them not
# being binary identical, so we can report them. And then of course
# we also need to handle OS specifics.
... | |
48,671 | 197,718 | 275 | sympy/integrals/transforms.py | 81 | 30 | def _laplace_rule_diff(f, t, s, doit=True, **hints):
hints.pop('simplify', True)
a = Wild('a', exclude=[t])
y = Wild('y')
n = Wild('n', exclude=[t])
g = WildFunction('g', nargs=1)
ma1 = f.match(a*Derivative(g, (t, n)))
if ma1 and ma1[g].args[0] == t and ma1[n].is_integer:
debug(... | include the coefficient in L(A*x') | _laplace_rule_diff | 392c40aceadd4c7cdeed0fceb93a763927dc0ca1 | sympy | transforms.py | 20 | 22 | https://github.com/sympy/sympy.git | 6 | 258 | 0 | 61 | 404 | Python | {
"docstring": "\n This internal helper function tries to transform an expression containing\n a derivative of an undefined function and returns `None` if it cannot\n do it.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 24,
"vocab_size": 22
} | def _laplace_rule_diff(f, t, s, doit=True, **hints):
hints.pop('simplify', True)
a = Wild('a', exclude=[t])
y = Wild('y')
n = Wild('n', exclude=[t])
g = WildFunction('g', nargs=1)
ma1 = f.match(a*Derivative(g, (t, n)))
if ma1 and ma1[g].args[0] == t and ma1[n].is_integer:
debug(... | |
17,193 | 81,376 | 752 | awx/sso/pipeline.py | 374 | 25 | def _check_flag(user, flag, attributes, user_flags_settings):
new_flag = False
is_role_key = "is_%s_role" % (flag)
is_attr_key = "is_%s_attr" % (flag)
is_value_key = "is_%s_value" % (flag)
remove_setting = "remove_%ss" % (flag)
# Check to see if we are respecting a role and, if so, does ou... | Allow multiple values in SOCIAL_AUTH_SAML_USER_FLAGS_BY_ATTR.is_*_[value|role] settings (#12558) | _check_flag | 782667a34ee45bfe825b29db39c67d4465391bdb | awx | pipeline.py | 19 | 40 | https://github.com/ansible/awx.git | 12 | 339 | 0 | 171 | 561 | Python | {
"docstring": "\n Helper function to set the is_superuser is_system_auditor flags for the SAML adapter\n Returns the new flag and whether or not it changed the flag\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 24,
"vocab_size": 20
} | def _check_flag(user, flag, attributes, user_flags_settings):
new_flag = False
is_role_key = "is_%s_role" % (flag)
is_attr_key = "is_%s_attr" % (flag)
is_value_key = "is_%s_value" % (flag)
remove_setting = "remove_%ss" % (flag)
# Check to see if we are respecting a role and, if so, does ou... | |
72,983 | 249,543 | 108 | tests/storage/test_event_federation.py | 30 | 12 | def test_get_backfill_points_in_room(self):
setup_info = self._setup_room_for_backfill_tests()
room_id = setup_info.room_id
backfill_points = self.get_success(
self.store.get_backfill_points_in_room(room_id)
)
backfill_event_ids = [backfill_point[0] for back... | Only try to backfill event if we haven't tried before recently (#13635)
Only try to backfill event if we haven't tried before recently (exponential backoff). No need to keep trying the same backfill point that fails over and over.
Fix https://github.com/matrix-org/synapse/issues/13622
Fix https://github.com/matrix... | test_get_backfill_points_in_room | ac1a31740b6d0dfda4d57a25762aaddfde981caf | synapse | test_event_federation.py | 11 | 10 | https://github.com/matrix-org/synapse.git | 2 | 67 | 0 | 26 | 115 | Python | {
"docstring": "\n Test to make sure we get some backfill points\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | def test_get_backfill_points_in_room(self):
setup_info = self._setup_room_for_backfill_tests()
room_id = setup_info.room_id
backfill_points = self.get_success(
self.store.get_backfill_points_in_room(room_id)
)
backfill_event_ids = [backfill_point[0] for back... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.