ast_errors stringlengths 0 3.2k | d_id int64 44 121k | id int64 70 338k | n_whitespaces int64 3 14k | path stringlengths 8 134 | n_words int64 4 4.82k | n_identifiers int64 1 131 | random_cut stringlengths 16 15.8k | commit_message stringlengths 2 15.3k | fun_name stringlengths 1 84 | commit_id stringlengths 40 40 | repo stringlengths 3 28 | file_name stringlengths 5 79 | ast_levels int64 6 31 | nloc int64 1 548 | url stringlengths 31 59 | complexity int64 1 66 | token_counts int64 6 2.13k | n_ast_errors int64 0 28 | vocab_size int64 4 1.11k | n_ast_nodes int64 15 19.2k | language stringclasses 1
value | documentation dict | code stringlengths 101 62.2k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
34,338 | 148,805 | 205 | freqtrade/exchange/exchange.py | 56 | 18 | def reload_markets(self) -> None:
# Check whether markets have to be reloaded
if (self._last_markets_refresh > 0) and (
self._last_markets_refresh + self.markets_refresh_interval
> arrow.utcnow().int_timestamp):
return None
logger.debug("Perfo... | replaced "leverage" with "tiers" | reload_markets | 42e36f44f8a91a79a8ffa14698542f38df39cb50 | freqtrade | exchange.py | 11 | 14 | https://github.com/freqtrade/freqtrade.git | 4 | 94 | 0 | 49 | 161 | Python | {
"docstring": "Reload markets both sync and async if refresh interval has passed ",
"language": "en",
"n_whitespaces": 11,
"n_words": 11,
"vocab_size": 11
} | def reload_markets(self) -> None:
# Check whether markets have to be reloaded
if (self._last_markets_refresh > 0) and (
self._last_markets_refresh + self.markets_refresh_interval
> arrow.utcnow().int_timestamp):
return None
logger.debug("Perfo... | |
47,852 | 196,352 | 43 | sympy/matrices/common.py | 12 | 6 | def is_strongly_diagonally_dominant(self):
r
| Moved imports to higher level | is_strongly_diagonally_dominant | 59d22b6bb7287613d598611027f640d068ca5748 | sympy | common.py | 7 | 40 | https://github.com/sympy/sympy.git | 3 | 39 | 0 | 12 | 37 | Python | {
"docstring": "Tests if the matrix is row strongly diagonally dominant.\n\n Explanation\n ===========\n\n A $n, n$ matrix $A$ is row strongly diagonally dominant if\n\n .. math::\n \\left|A_{i, i}\\right| > \\sum_{j = 0, j \\neq i}^{n-1}\n \\left|A_{i, j}\\right| \\q... | def is_strongly_diagonally_dominant(self):
r
if not self.is_square:
return False
rows, cols = self.shape
| |
1,111 | 7,060 | 58 | ludwig/features/binary_feature.py | 15 | 12 | def create_calibration_module(self, feature) -> torch.nn.Module:
if feature.get("calibration"):
calibration_cls = calibration.get_calibration_cls(BINARY, "temperature_scaling")
return calibration_cls(binary=True)
return None
| Adds mechanism for calibrating probabilities for category and binary features (#1949)
* Started adding files for calibration implementation.
* Adds option to return logits and labels in predictor.
* Pre-commit fixes
* First pass temperature scaling working.
* Fixes calibration for categorical feature.
*... | create_calibration_module | e65f74e87e8e29922f4e9f9d839978ffb2c5b029 | ludwig | binary_feature.py | 11 | 10 | https://github.com/ludwig-ai/ludwig.git | 2 | 41 | 0 | 14 | 70 | Python | {
"docstring": "Creates the appropriate calibration module based on the feature config.\n\n Today, only one type of calibration (\"temperature_scaling\") is available, but more options may be supported in\n the future.\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 28,
"vocab_size... | def create_calibration_module(self, feature) -> torch.nn.Module:
if feature.get("calibration"):
calibration_cls = calibration.get_calibration_cls(BINARY, "temperature_scaling")
return calibration_cls(binary=True)
return None
| |
52,846 | 210,002 | 616 | ppdet/utils/download.py | 143 | 42 | def _download(url, path, md5sum=None):
if not osp.exists(path):
os.makedir | fix download.py (#5159) | _download | 1dcec15b6600df750d680e97e89117fcb8eb84a0 | PaddleDetection | download.py | 19 | 35 | https://github.com/PaddlePaddle/PaddleDetection.git | 11 | 256 | 0 | 103 | 435 | Python | {
"docstring": "\n Download from url, save to path.\n\n url (str): download url\n path (str): download to given path\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 16,
"vocab_size": 11
} | def _download(url, path, md5sum=None):
if not osp.exists(path):
os.makedirs(path)
fname = osp.split(url)[-1]
fullname = osp.join(path, fname)
retry_cnt = 0
while not (osp.exists(fullname) and _check_exist_file_md5(fullname, md5sum,
... | |
51,172 | 205,715 | 149 | django/db/models/options.py | 25 | 12 | def related_objects(self):
all_related_fields = self._get_fields(
forward=False, reverse=True, include_hidden=True
)
return make_immutable_fields_list(
"related_objects",
(
obj
for obj in | Refs #33476 -- Reformatted code with Black. | related_objects | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | options.py | 12 | 12 | https://github.com/django/django.git | 4 | 49 | 0 | 22 | 77 | Python | {
"docstring": "\n Return all related objects pointing to the current model. The related\n objects can come from a one-to-one, one-to-many, or many-to-many field\n relation type.\n\n Private API intended only to be used by Django itself; get_fields()\n combined with filtering of fie... | def related_objects(self):
all_related_fields = self._get_fields(
forward=False, reverse=True, include_hidden=True
)
return make_immutable_fields_list(
"related_objects",
(
obj
for obj in all_related_fields
... | |
50,121 | 202,419 | 88 | tests/csrf_tests/tests.py | 25 | 13 | def test_https_good_referer(self):
req = self._get_POST_request_with_token()
| Refs #33476 -- Reformatted code with Black. | test_https_good_referer | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 9 | 9 | https://github.com/django/django.git | 1 | 68 | 0 | 20 | 118 | Python | {
"docstring": "\n A POST HTTPS request with a good referer is accepted.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | def test_https_good_referer(self):
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_HOST"] = "www.example.com"
req.META["HTTP_REFERER"] = "https://www.example.com/somepage"
mw = CsrfViewMiddleware(post_form_view)
mw.process_... | |
71,467 | 247,057 | 133 | tests/rest/client/test_retention.py | 40 | 16 | def test_state_policy(self) -> None:
room_id = self.helper.create_room_as(self.user_id, tok=self.token)
# Set the maximum lifetime to 35 days so that the first event gets expired but not
# the second one.
self.helper.send_state(
room_id=room_id,
event_ty... | Add type hints to `tests/rest/client` (#12084) | test_state_policy | 1901cb1d4a8b7d9af64493fbd336e9aa2561c20c | synapse | test_retention.py | 11 | 12 | https://github.com/matrix-org/synapse.git | 1 | 69 | 0 | 37 | 109 | Python | {
"docstring": "Tests that an event gets correctly expired if there is no default retention\n policy but there's a policy specific to the room.\n ",
"language": "en",
"n_whitespaces": 36,
"n_words": 22,
"vocab_size": 21
} | def test_state_policy(self) -> None:
room_id = self.helper.create_room_as(self.user_id, tok=self.token)
# Set the maximum lifetime to 35 days so that the first event gets expired but not
# the second one.
self.helper.send_state(
room_id=room_id,
event_ty... | |
4,382 | 22,638 | 126 | insertion_sort.py | 53 | 7 | def insertion_sort(list, n):
for i in range(0, n):
key = list[i]
j = i - 1
# Swap elements witth key iff they are
# greater than | refactor: clean code
Signed-off-by: slowy07 <slowy.arfy@gmail.com> | insertion_sort | f0af0c43340763724f139fa68aa1e5a9ffe458b4 | Python | insertion_sort.py | 12 | 9 | https://github.com/geekcomputers/Python.git | 4 | 67 | 0 | 34 | 105 | Python | {
"docstring": "\n sort list in assending order\n\n INPUT:\n list=list of values to be sorted\n n=size of list that contains values to be sorted\n\n OUTPUT:\n list of sorted values in assending order\n ",
"language": "en",
"n_whitespaces": 63,
"n_words": 29,
"vocab_size": 16
} | def insertion_sort(list, n):
for i in range(0, n):
key = list[i]
j = i - 1
# Swap elements witth key iff they are
# greater than key
while j >= 0 and list[j] > key:
list[j + 1] = list[j]
j = j - 1
list[j + 1] = key
return list
| |
42,922 | 179,232 | 21 | gradio/component.py | 7 | 6 | def get_template_context(self):
return {"name": self.__class__.__name__.lower(), | Format The Codebase
- black formatting
- isort formatting | get_template_context | cc0cff893f9d7d472788adc2510c123967b384fe | gradio | component.py | 11 | 2 | https://github.com/gradio-app/gradio.git | 1 | 26 | 0 | 7 | 47 | Python | {
"docstring": "\n :return: a dictionary with context variables for the javascript file associated with the context\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 14,
"vocab_size": 11
} | def get_template_context(self):
return {"name": self.__class__.__name__.lower(), "label": self.label}
| |
25,824 | 116,751 | 92 | mindsdb/integrations/handlers/teradata_handler/teradata_handler.py | 21 | 6 | def connect(self):
if self.is_connected is True:
r | feat: add teradata integration | connect | 47c5e0ac2d89807f8ff7239d423a3d346bd39a1e | mindsdb | teradata_handler.py | 10 | 9 | https://github.com/mindsdb/mindsdb.git | 2 | 42 | 0 | 14 | 70 | Python | {
"docstring": "\n Handles the connection to a Teradata database insance.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 8,
"vocab_size": 8
} | def connect(self):
if self.is_connected is True:
return self.connection
connection = teradatasql.connect(
**self.connection_data
)
self.is_connected = True
self.connection = connection
return self.connection
| |
46,963 | 194,429 | 59 | kivy/core/window/__init__.py | 13 | 10 | def unregister_event_manager(self, manager):
self.event_managers.remove(manager)
for type_id in manager.type_ids:
self.event_managers_dict[type_id].remove(manager)
m | Feature: EventManagerBase (#7658)
* Added EventManagerBase class and event_managers attribute to WindowBase class.
* Added on_motion event to Widget class.
* Updated post_dispatch_input in EventLoopBase to skip non-touch events.
* Using type ids in MouseMotionEventProvider.
* Added on_motion method to Widget subcl... | unregister_event_manager | 1830123ba3edf7290b7c6cb1c6f406ccf1d0e5d4 | kivy | __init__.py | 11 | 6 | https://github.com/kivy/kivy.git | 2 | 44 | 0 | 13 | 72 | Python | {
"docstring": "Unregister and stop an event manager previously registered with\n :meth:`register_event_manager`.\n\n .. versionadded:: 2.1.0\n\n .. warning::\n This is an experimental method and it remains so until this warning\n is present as it can be changed or removed i... | def unregister_event_manager(self, manager):
self.event_managers.remove(manager)
for type_id in manager.type_ids:
self.event_managers_dict[type_id].remove(manager)
manager.stop()
manager.window = None
| |
54,816 | 217,481 | 306 | python3.10.4/Lib/functools.py | 132 | 24 | def _c3_mro(cls, abcs=None):
for i, base in enumerate(reversed(cls.__bases__)):
if hasattr(base, '__abstractmethods__'):
boundary = len(cls.__bases__) - i
break # Bases up to the last explicit ABC are considered first.
else:
boundary = 0
abcs = list(abcs) if ab... | add python 3.10.4 for windows | _c3_mro | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | functools.py | 15 | 26 | https://github.com/XX-net/XX-Net.git | 12 | 210 | 0 | 83 | 328 | Python | {
"docstring": "Computes the method resolution order using extended C3 linearization.\n\n If no *abcs* are given, the algorithm works exactly like the built-in C3\n linearization used for method resolution.\n\n If given, *abcs* is a list of abstract base classes that should be inserted\n into the resultin... | def _c3_mro(cls, abcs=None):
for i, base in enumerate(reversed(cls.__bases__)):
if hasattr(base, '__abstractmethods__'):
boundary = len(cls.__bases__) - i
break # Bases up to the last explicit ABC are considered first.
else:
boundary = 0
abcs = list(abcs) if ab... | |
107,548 | 308,815 | 29 | homeassistant/components/nissan_leaf/__init__.py | 8 | 7 | async def async_start_charging(self) -> None:
awai | Add button to start leaf charge (#62948)
Co-authored-by: Bruce Duncan <bwduncan@gmail.com> | async_start_charging | 10027b20904b678d8baecbc6e72c5bcc3f4f24b2 | core | __init__.py | 10 | 4 | https://github.com/home-assistant/core.git | 1 | 26 | 0 | 8 | 47 | Python | {
"docstring": "Request to start charging the car. Used by the button platform.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 10
} | async def async_start_charging(self) -> None:
await self.hass.async_add_executor_job(self.leaf.start_charging)
self.schedule_update()
| |
73,835 | 251,831 | 25 | test/mitmproxy/proxy/layers/http/test_http.py | 13 | 13 | def test_multiple_server_connections(tctx):
server1 = Placeholder(Server)
server2 = Placehold | make it black! | test_multiple_server_connections | b3587b52b25077f68116b9852b041d33e7fc6601 | mitmproxy | test_http.py | 11 | 35 | https://github.com/mitmproxy/mitmproxy.git | 1 | 219 | 0 | 10 | 61 | Python | {
"docstring": "Test multiple requests being rewritten to different targets.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def test_multiple_server_connections(tctx):
server1 = Placeholder(Server)
server2 = Placeholder(Server)
playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False)
| |
74,744 | 255,404 | 34 | onnx/test/compose_test.py | 9 | 5 | def test_overlapping_output_names(self) -> None:
self._test_overlapping_names(
outputs0=['o0', 'o1'], outputs1=['o1', 'o2' | Use Python type annotations rather than comments (#3962)
* These have been supported since Python 3.5.
ONNX doesn't support Python < 3.6, so we can use the annotations.
Diffs generated by https://pypi.org/project/com2ann/.
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* Remove MYPY conditional logi... | test_overlapping_output_names | 83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd | onnx | compose_test.py | 10 | 6 | https://github.com/onnx/onnx.git | 1 | 28 | 0 | 9 | 52 | Python | {
"docstring": "\n Tests error checking when the name of the output overlaps\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 9
} | def test_overlapping_output_names(self) -> None:
self._test_overlapping_names(
outputs0=['o0', 'o1'], outputs1=['o1', 'o2'])
| |
11,096 | 54,559 | 30 | tests/test_settings.py | 9 | 5 | def test_write_profiles_does_not_include_default(self, temporary_profiles_path):
write_profiles({})
assert "profiles.default" not in temporary_profiles_path.read_text()
| Tests passing | test_write_profiles_does_not_include_default | 1dd7561062328e96594bbf60a6d15f49163c9d87 | prefect | test_settings.py | 8 | 3 | https://github.com/PrefectHQ/prefect.git | 1 | 22 | 0 | 9 | 40 | Python | {
"docstring": "\n Including the default has a tendency to bake in settings the user may not want, and\n can prevent them from gaining new defaults.\n ",
"language": "en",
"n_whitespaces": 45,
"n_words": 23,
"vocab_size": 22
} | def test_write_profiles_does_not_include_default(self, temporary_profiles_path):
write_profiles({})
assert "profiles.default" not in temporary_profiles_path.read_text()
| |
49,459 | 199,970 | 69 | sympy/physics/optics/polarization.py | 15 | 9 | def phase_retarder(theta=0, delta=0):
R = Matrix([[cos(theta)**2 + exp(I*delta)*sin(theta)**2,
(1-exp(I*delta))*cos(theta)*sin(theta)],
[(1-exp(I*delta))*cos(theta)*sin(theta),
sin(theta)**2 + exp(I*delta)*cos(theta)**2]])
return R*exp(-I*delta/2)
| removed backticks around variable names in docs according to PR review | phase_retarder | ae2baaa0bbcd42792bb2e7887ca61b97abc40463 | sympy | polarization.py | 17 | 6 | https://github.com/sympy/sympy.git | 1 | 118 | 0 | 14 | 185 | Python | {
"docstring": "A phase retarder Jones matrix with retardance `delta` at angle `theta`.\n\n Parameters\n ==========\n\n theta : numeric type or SymPy Symbol\n The angle of the fast axis relative to the horizontal plane.\n delta : numeric type or SymPy Symbol\n The phase difference between th... | def phase_retarder(theta=0, delta=0):
R = Matrix([[cos(theta)**2 + exp(I*delta)*sin(theta)**2,
(1-exp(I*delta))*cos(theta)*sin(theta)],
[(1-exp(I*delta))*cos(theta)*sin(theta),
sin(theta)**2 + exp(I*delta)*cos(theta)**2]])
return R*exp(-I*delta/2)
| |
42,804 | 178,724 | 214 | nuitka/utils/Execution.py | 90 | 20 | def wrapCommandForDebuggerForExec(*args):
gdb_path = getExecutablePath("gdb")
# Windows extra ball, attempt the downloaded one.
if isWin32Windows() and gdb_path is None:
from nuitka.Options import assumeYesForDownloads
mingw64_gcc_path = getCachedDownloadedMinGW64(
target... | Windows: Make running in debugger work with cmd files as well | wrapCommandForDebuggerForExec | 98badaaafd4e56529378947358acae489035fa1e | Nuitka | Execution.py | 14 | 19 | https://github.com/Nuitka/Nuitka.git | 7 | 142 | 0 | 60 | 254 | Python | {
"docstring": "Wrap a command for system debugger to call exec\n\n Args:\n args: (list of str) args for call to be debugged\n Returns:\n args tuple with debugger command inserted\n\n Notes:\n Currently only gdb and lldb are supported, but adding more\n debuggers would be very wel... | def wrapCommandForDebuggerForExec(*args):
gdb_path = getExecutablePath("gdb")
# Windows extra ball, attempt the downloaded one.
if isWin32Windows() and gdb_path is None:
from nuitka.Options import assumeYesForDownloads
mingw64_gcc_path = getCachedDownloadedMinGW64(
target... | |
51,944 | 207,377 | 54 | tests/admin_scripts/tests.py | 19 | 10 | def test_commands_with_invalid_settings(self):
args = ["startproject"]
out, err = self.run_django_admin(args, settings_file="bad_settings")
self.assertNoOutput(out)
| Refs #33476 -- Reformatted code with Black. | test_commands_with_invalid_settings | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 10 | 5 | https://github.com/django/django.git | 1 | 43 | 0 | 18 | 75 | Python | {
"docstring": "\n Commands that don't require settings succeed if the settings file\n doesn't exist.\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 12,
"vocab_size": 11
} | def test_commands_with_invalid_settings(self):
args = ["startproject"]
out, err = self.run_django_admin(args, settings_file="bad_settings")
self.assertNoOutput(out)
self.assertOutput(err, "You must provide a project name", regex=True)
| |
56,344 | 221,321 | 216 | python3.10.4/Lib/chunk.py | 67 | 12 | def read(self, size=-1):
if self.closed:
raise ValueError("I/O operation on closed file")
if self.size_read >= self.chunksize:
| add python 3.10.4 for windows | read | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | chunk.py | 11 | 17 | https://github.com/XX-net/XX-Net.git | 8 | 136 | 0 | 38 | 215 | Python | {
"docstring": "Read at most size bytes from the chunk.\n If size is omitted or negative, read until the end\n of the chunk.\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 21,
"vocab_size": 17
} | def read(self, size=-1):
if self.closed:
raise ValueError("I/O operation on closed file")
if self.size_read >= self.chunksize:
return b''
if size < 0:
size = self.chunksize - self.size_read
if size > self.chunksize - self.size_read:
... | |
36,529 | 156,064 | 193 | dask/array/slicing.py | 99 | 29 | def slicing_plan(chunks, index):
from dask.array.utils import asarray_safe
if not is_arraylike(index):
index = np.asanyarray(index)
cum_chunks = cached_cumsum(chunks)
cum_chunks = asarray_safe(cum_chunks, like=index)
# this dispactches to the array library
chunk_locations = np.sea... | absolufy-imports - No relative - PEP8 (#8796)
Conversation in https://github.com/dask/distributed/issues/5889 | slicing_plan | cccb9d8d8e33a891396b1275c2448c352ef40c27 | dask | slicing.py | 13 | 20 | https://github.com/dask/dask.git | 4 | 196 | 0 | 72 | 305 | Python | {
"docstring": "Construct a plan to slice chunks with the given index\n\n Parameters\n ----------\n chunks : Tuple[int]\n One dimensions worth of chunking information\n index : np.ndarray[int]\n The index passed to slice on that dimension\n\n Returns\n -------\n out : List[Tuple[int... | def slicing_plan(chunks, index):
from dask.array.utils import asarray_safe
if not is_arraylike(index):
index = np.asanyarray(index)
cum_chunks = cached_cumsum(chunks)
cum_chunks = asarray_safe(cum_chunks, like=index)
# this dispactches to the array library
chunk_locations = np.sea... | |
70,662 | 245,112 | 151 | mmdet/models/roi_heads/bbox_heads/double_bbox_head.py | 21 | 14 | def _add_conv_branch(self) -> None:
branch_convs = ModuleList()
for i in range(self.num_convs):
branch_convs.append(
Bottleneck(
inplanes=self.conv_out_channels,
planes=self.conv_out_channels // 4,
conv_cfg=... | Refactor Double Head, MS, Dynamic, Trident. | _add_conv_branch | cd4e9ed8269b0c767e129169b7268b0ced7e60c9 | mmdetection | double_bbox_head.py | 14 | 11 | https://github.com/open-mmlab/mmdetection.git | 2 | 56 | 0 | 20 | 91 | Python | {
"docstring": "Add the fc branch which consists of a sequential of conv layers.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 11
} | def _add_conv_branch(self) -> None:
branch_convs = ModuleList()
for i in range(self.num_convs):
branch_convs.append(
Bottleneck(
inplanes=self.conv_out_channels,
planes=self.conv_out_channels // 4,
conv_cfg=... | |
34,223 | 148,287 | 22 | python/ray/_private/thirdparty/pathspec/util.py | 33 | 15 | def match_files(patterns, files):
all_files = files if isinstance(files, Collection) else list(files)
return_files = set()
for pattern in patterns:
if pattern.include is not None:
result_files = pattern.match(all_files)
if pattern.include:
return | [Bugfix] fix invalid excluding of Black (#24042)
- We should use `--force-exclude` when we pass code path explicitly https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html?highlight=--force-exclude#command-line-options
- Recover the files in `python/ray/_private/thirdparty` which has been form... | match_files | 0e6c042e29cbbe429d81c9c1af3c75c261f00980 | ray | util.py | 14 | 11 | https://github.com/ray-project/ray.git | 5 | 70 | 0 | 28 | 113 | Python | {
"docstring": "\n\tMatches the files to the patterns.\n\n\t*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)\n\tcontains the patterns to use.\n\n\t*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains\n\tthe normalized file paths to be matched against *patterns*.\n... | def match_files(patterns, files):
all_files = files if isinstance(files, Collection) else list(files)
return_files = set()
for pattern in patterns:
if pattern.include is not None:
result_files = pattern.match(all_files)
if pattern.include:
return_files.update(result_files)
else:
return_files.dif... | |
39,770 | 166,205 | 490 | pandas/core/exchange/column.py | 130 | 33 | def _get_offsets_buffer(self) -> Tuple[PandasBuffer, Any]:
if self.dtype[0] == DtypeKind.STRING:
# For each string, we need to manually determine the next offset
values = self._col.to_numpy()
ptr = 0
offsets = np.zeros(shape=(len(values) + 1,), dtype=np.i... | ENH: Implement DataFrame interchange protocol (#46141) | _get_offsets_buffer | 90140f055892a46f473bd26affab88a7f171e394 | pandas | column.py | 16 | 29 | https://github.com/pandas-dev/pandas.git | 4 | 139 | 0 | 97 | 228 | Python | {
"docstring": "\n Return the buffer containing the offset values for variable-size binary\n data (e.g., variable-length strings) and the buffer's associated dtype.\n Raises NoBufferPresent if the data buffer does not have an associated\n offsets buffer.\n ",
"language": "en",
"... | def _get_offsets_buffer(self) -> Tuple[PandasBuffer, Any]:
if self.dtype[0] == DtypeKind.STRING:
# For each string, we need to manually determine the next offset
values = self._col.to_numpy()
ptr = 0
offsets = np.zeros(shape=(len(values) + 1,), dtype=np.i... | |
102,039 | 303,211 | 46 | homeassistant/helpers/update_coordinator.py | 10 | 3 | def _unschedule_refresh(self) -> None:
if self._unsub_refresh:
self._unsub_refresh()
self._unsub_refresh = None
| Keep track of a context for each listener (#72702)
* Remove async_remove_listener
This avoids the ambuigity as to what happens if same callback is added multiple times.
* Keep track of a context for each listener
This allow a update coordinator to adapt what data to request on update from the backing service ... | _unschedule_refresh | 8910d265d6cf15fed4e6e98b4344031019c1016d | core | update_coordinator.py | 9 | 5 | https://github.com/home-assistant/core.git | 2 | 23 | 0 | 10 | 41 | Python | {
"docstring": "Unschedule any pending refresh since there is no longer any listeners.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 10
} | def _unschedule_refresh(self) -> None:
if self._unsub_refresh:
self._unsub_refresh()
self._unsub_refresh = None
| |
49,987 | 201,749 | 448 | tests/backends/postgresql/tests.py | 119 | 18 | def test_connect_and_rollback(self):
new_connection = connection.copy()
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
with new_co... | Refs #33476 -- Reformatted code with Black. | test_connect_and_rollback | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 15 | 19 | https://github.com/django/django.git | 3 | 125 | 0 | 79 | 237 | Python | {
"docstring": "\n PostgreSQL shouldn't roll back SET TIME ZONE, even if the first\n transaction is rolled back (#17062).\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 16,
"vocab_size": 15
} | def test_connect_and_rollback(self):
new_connection = connection.copy()
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
with new_co... | |
30,664 | 135,585 | 366 | python/ray/data/tests/test_dataset_tfrecords.py | 79 | 11 | def test_readback_tfrecords(ray_start_regular_shared, tmp_path):
# The dataset we will write to a .tfrecords file.
ds = ray.data.from_items(
[
# Row one.
{
"int_item": 1,
"int_list": [2, 2, 3],
"float_item": 1.0,
... | [Datasets] Add writer for TFRecords. (#29448)
This PR enables users to write TFRecords from datasets.
In particular, the master branch already includes an API for reading TFRecords from datasets. Users have requested the ability to write these datasets back to TFRecords. | test_readback_tfrecords | 9fab504fe776f96fecf85e12ea006264cbe92f4a | ray | test_dataset_tfrecords.py | 13 | 24 | https://github.com/ray-project/ray.git | 1 | 155 | 0 | 59 | 226 | Python | {
"docstring": "\n Test reading back TFRecords written using datasets.\n The dataset we read back should be the same that we wrote.\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 19,
"vocab_size": 17
} | def test_readback_tfrecords(ray_start_regular_shared, tmp_path):
# The dataset we will write to a .tfrecords file.
ds = ray.data.from_items(
[
# Row one.
{
"int_item": 1,
"int_list": [2, 2, 3],
"float_item": 1.0,
... | |
83,014 | 279,493 | 484 | keras/layers/rnn/legacy_cells.py | 79 | 27 | def call(self, inputs, state):
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with tf.compat.v1.variable_scope("cell_%d" % i):
if self._state_is_tuple:
if not tf.nest.is_nested(state):
... | Add f-string format and lint with flynt on the whole codebase | call | be73ac1a1e25d9abd4d793cba9707098d7adf231 | keras | legacy_cells.py | 21 | 27 | https://github.com/keras-team/keras.git | 5 | 148 | 0 | 64 | 252 | Python | {
"docstring": "Run this multi-layer cell on inputs, starting from state.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def call(self, inputs, state):
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with tf.compat.v1.variable_scope("cell_%d" % i):
if self._state_is_tuple:
if not tf.nest.is_nested(state):
... | |
54,497 | 216,279 | 76 | tests/pytests/functional/transport/server/test_req_channel.py | 29 | 8 | def test_basic(push_channel):
msgs = [
{"foo": "bar"},
{"bar": "baz"},
{"baz": "qux", "list": [1, 2, 3]},
| Fix minion unit tests, specifically .../tests/pytests/test_minion.py | test_basic | 3c7e1ec1f08abd7cd1ba78ad7880acb6ba6fdce7 | salt | test_req_channel.py | 11 | 9 | https://github.com/saltstack/salt.git | 2 | 66 | 0 | 27 | 112 | Python | {
"docstring": "\n Test a variety of messages, make sure we get the expected responses\n ",
"language": "en",
"n_whitespaces": 19,
"n_words": 12,
"vocab_size": 12
} | def test_basic(push_channel):
msgs = [
{"foo": "bar"},
{"bar": "baz"},
{"baz": "qux", "list": [1, 2, 3]},
]
for msg in msgs:
ret = push_channel.send(msg, timeout=5, tries=1)
assert ret["load"] == msg
| |
72,034 | 247,975 | 197 | tests/module_api/test_account_data_manager.py | 36 | 10 | def test_put_global(self) -> None:
self.get_success(
self._module_api.account_data_manager.put_global(
self.user_id, "test.data", {"wombat": True}
)
)
# Request that account data from the normal store; check it's as we expect.
self.asser... | Add Module API for reading and writing global account data. (#12391) | test_put_global | 85ca963c1add5ca12f59238a50dfc63df4846bb7 | synapse | test_account_data_manager.py | 12 | 17 | https://github.com/matrix-org/synapse.git | 1 | 62 | 0 | 30 | 105 | Python | {
"docstring": "\n Tests that written account data using `put_global` can be read out again later.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 13,
"vocab_size": 13
} | def test_put_global(self) -> None:
self.get_success(
self._module_api.account_data_manager.put_global(
self.user_id, "test.data", {"wombat": True}
)
)
# Request that account data from the normal store; check it's as we expect.
self.asser... | |
75,990 | 259,914 | 24 | build_tools/azure/update_environments_and_lock_files.py | 9 | 7 | def get_conda_environment_content(build_metadata):
template = environment.from_string(
| CI: move Linux and MacOS Azure builds to conda lock files (#22448)
Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org>
Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> | get_conda_environment_content | f862129f36786acbae3d9f2d161bbb72d77b87ec | scikit-learn | update_environments_and_lock_files.py | 11 | 21 | https://github.com/scikit-learn/scikit-learn.git | 1 | 26 | 0 | 9 | 45 | Python | {
"docstring": "\n# DO NOT EDIT: this file is generated from the specification found in the\n# following script to centralize the configuration for all Azure CI builds:\n# build_tools/azure/update_environments_and_lock_files.py\nchannels:\n - {{ build_metadata['channel'] }}\ndependencies:\n {% for conda_dep in buil... | def get_conda_environment_content(build_metadata):
template = environment.from_string(
.strip()
)
return template.render(build_metadata=build_metadata)
| |
5,147 | 27,990 | 360 | saleor/thumbnail/utils.py | 94 | 24 | def preprocess(self, image, image_format):
format = self.format or image_format
save_kwargs = {"format": format}
# Ensuring image is properly rotated
if hasattr(image, "_getexif"):
exif_datadict = image._getexif() # returns None if no EXIF data
if exif_... | Better media thumbnails including WebP support (#9988)
* Add thumbnail app
* Update get_thumbnail_size method and add tests
* Add logic for creating thumbnails
* Update logic for getting thumbnail
* Allow defining format for tumbnail generation
* Clear handle_thumbnail views
* Add prepare_image_proxy... | preprocess | 5d1a36b9aaf408016957db04f86397b2e53c2500 | saleor | utils.py | 16 | 21 | https://github.com/saleor/saleor.git | 8 | 162 | 0 | 62 | 271 | Python | {
"docstring": "Preprocess an image.\n\n An API hook for image pre-processing. Calls any image format specific\n pre-processors (if defined). I.E. If `image_format` is 'JPEG', this\n method will look for a method named `preprocess_JPEG`, if found\n `image` will be passed to it.\n\n ... | def preprocess(self, image, image_format):
format = self.format or image_format
save_kwargs = {"format": format}
# Ensuring image is properly rotated
if hasattr(image, "_getexif"):
exif_datadict = image._getexif() # returns None if no EXIF data
if exif_... | |
77,424 | 262,880 | 102 | PyInstaller/utils/hooks/__init__.py | 49 | 11 | def get_package_paths(package):
pkg_paths = get_all_package_paths(package)
if not pkg_paths:
raise ValueError(f"Package '{package}' does not exist or is not a package!")
if len(pkg_paths) > 1:
logger.warning(
"get_package_paths - packa | hookutils: support multiple package paths in collect_* helpers
Split the functionality of ``get_package_paths`` into two new helpers,
``get_all_package_paths`` and ``package_base_path``. The former obtains
all package paths, while the latter simplifies removal of
package-specific sub-path from the full package-path.
... | get_package_paths | e232aaf089d150b085502b97ce0fcf699b45e1b2 | pyinstaller | __init__.py | 11 | 11 | https://github.com/pyinstaller/pyinstaller.git | 3 | 58 | 0 | 42 | 100 | Python | {
"docstring": "\n Given a package, return the path to packages stored on this machine and also returns the path to this particular\n package. For example, if pkg.subpkg lives in /abs/path/to/python/libs, then this function returns\n ``(/abs/path/to/python/libs, /abs/path/to/python/libs/pkg/subpkg)``.\n\n ... | def get_package_paths(package):
pkg_paths = get_all_package_paths(package)
if not pkg_paths:
raise ValueError(f"Package '{package}' does not exist or is not a package!")
if len(pkg_paths) > 1:
logger.warning(
"get_package_paths - package %s has multiple paths (%r); returnin... | |
14,022 | 65,820 | 18 | erpnext/e_commerce/shopping_cart/cart.py | 27 | 8 | def get_address_territory(address_name):
territory = None
if address_name:
address_fields = frappe.db.get_value("Address", address_name, ["city", "state", "country"])
for value in address_fields:
territory = frappe.db.get_value("Territory", value)
if territory:
break
return territory
| style: format code with black | get_address_territory | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | cart.py | 13 | 9 | https://github.com/frappe/erpnext.git | 4 | 55 | 0 | 22 | 95 | Python | {
"docstring": "Tries to match city, state and country of address to existing territory",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 11
} | def get_address_territory(address_name):
territory = None
if address_name:
address_fields = frappe.db.get_value("Address", address_name, ["city", "state", "country"])
for value in address_fields:
territory = frappe.db.get_value("Territory", value)
if territory:
break
return territory
| |
77,991 | 265,105 | 102 | netbox/dcim/svg.py | 38 | 13 | def _get_device_coords(self, position, height):
x = self.legend_width + RACK_ELEVATION_BORDER_WIDTH
y = RACK_ELEVATION_BORDER_WIDTH
if self.rack.desc_units:
y += int((position - 1) * self.unit_height)
| Clean up rack elevation rendering | _get_device_coords | 0c915f7de9612c7485da3713cc6d63f368698a5d | netbox | svg.py | 18 | 8 | https://github.com/netbox-community/netbox.git | 2 | 76 | 0 | 24 | 121 | Python | {
"docstring": "\n Return the X, Y coordinates of the top left corner for a device in the specified rack unit.\n ",
"language": "en",
"n_whitespaces": 33,
"n_words": 18,
"vocab_size": 16
} | def _get_device_coords(self, position, height):
x = self.legend_width + RACK_ELEVATION_BORDER_WIDTH
y = RACK_ELEVATION_BORDER_WIDTH
if self.rack.desc_units:
y += int((position - 1) * self.unit_height)
else:
y += int((self.rack.u_height - position + 1) * s... | |
47,439 | 195,852 | 151 | sympy/core/numbers.py | 71 | 16 | def igcd(*args):
if len(args) < 2:
raise TypeError(
'igcd() takes at least 2 arguments (%s given)' % len(args))
args_temp = [abs(as_int(i)) for i in args]
if 1 in args_temp:
return 1
| Improved documentation formatting | igcd | cda8dfe6f45dc5ed394c2f5cda706cd6c729f713 | sympy | numbers.py | 13 | 15 | https://github.com/sympy/sympy.git | 8 | 98 | 0 | 46 | 166 | Python | {
"docstring": "Computes nonnegative integer greatest common divisor.\n\n Explanation\n ===========\n\n The algorithm is based on the well known Euclid's algorithm [1]_. To\n improve speed, ``igcd()`` has its own caching mechanism.\n\n Examples\n ========\n\n >>> from sympy import igcd\n >>> i... | def igcd(*args):
if len(args) < 2:
raise TypeError(
'igcd() takes at least 2 arguments (%s given)' % len(args))
args_temp = [abs(as_int(i)) for i in args]
if 1 in args_temp:
return 1
a = args_temp.pop()
if HAS_GMPY: # Using gmpy if present to speed up.
for b ... | |
117,417 | 320,887 | 192 | qutebrowser/browser/webengine/webenginetab.py | 52 | 14 | def _prev_next_cb(self, found, *, going_up, callback):
if found:
result = browsertab.SearchNavigationResult.found
# Check if the match count change is opposite to the search direction
if self._old_match.current > 0:
if not going_up and self._old_match... | search: Split navigation/search callbacks
This way, we can move more logic (checking wrapping, etc.) into the API,
thus making the commands much more simple and stateless. | _prev_next_cb | e15bda307e42c288b926f578e7bf8c610e4767af | qutebrowser | webenginetab.py | 15 | 11 | https://github.com/qutebrowser/qutebrowser.git | 7 | 91 | 0 | 36 | 145 | Python | {
"docstring": "Call the prev/next callback based on the search result.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 8
} | def _prev_next_cb(self, found, *, going_up, callback):
if found:
result = browsertab.SearchNavigationResult.found
# Check if the match count change is opposite to the search direction
if self._old_match.current > 0:
if not going_up and self._old_match... | |
42,730 | 178,524 | 219 | nuitka/plugins/standard/TensorflowPlugin.py | 59 | 15 | def onModuleSourceCode(self, module_name, source_code):
if module_name != "tensorflow":
return source_code
source_lines = source_code.splitlines()
found_insert = False
for i, l in enumerate(source_lines):
if l.startswith("def ") and "_running_from_pip_pa... | Plugins: Slight more helpful error message in case tensorflow works | onModuleSourceCode | ab7014c6457b2b65010aea41512ca75d93847c9a | Nuitka | TensorflowPlugin.py | 12 | 16 | https://github.com/Nuitka/Nuitka.git | 6 | 95 | 0 | 47 | 170 | Python | {
"docstring": "Neutralize some path magic in tensorflow.\n\n Notes:\n Make sure tensorflow understands, we are not running as a PIP\n installed application.\n ",
"language": "en",
"n_whitespaces": 56,
"n_words": 20,
"vocab_size": 20
} | def onModuleSourceCode(self, module_name, source_code):
if module_name != "tensorflow":
return source_code
source_lines = source_code.splitlines()
found_insert = False
for i, l in enumerate(source_lines):
if l.startswith("def ") and "_running_from_pip_pa... | |
40,604 | 170,734 | 91 | pandas/core/common.py | 32 | 6 | def cast_scalar_indexer(val):
# assumes lib.is_scalar(val)
if lib.is_float(val) and val.is_integer():
raise IndexError(
# GH#34193
"Indexing with a float is no lon | DEPR: indexing (#49412)
* DEPR: disallow Series.__getitem__ with a single-element list containing slice
* DEPR: disallow slicing with positional slicer and .loc
* DEPR: disallow positional indexing with float key
* move whatsnew
* DEPR: disallow multi-dimensional indexing
* fix matplotlib tests
* upd... | cast_scalar_indexer | 9820edc174730e11cb423d7869650c13100eb314 | pandas | common.py | 11 | 7 | https://github.com/pandas-dev/pandas.git | 3 | 28 | 0 | 31 | 55 | Python | {
"docstring": "\n Disallow indexing with a float key, even if that key is a round number.\n\n Parameters\n ----------\n val : scalar\n\n Returns\n -------\n outval : scalar\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 24,
"vocab_size": 21
} | def cast_scalar_indexer(val):
# assumes lib.is_scalar(val)
if lib.is_float(val) and val.is_integer():
raise IndexError(
# GH#34193
"Indexing with a float is no longer supported. Manually convert "
"to an integer key instead."
)
return val
| |
79,351 | 268,085 | 45 | test/lib/ansible_test/_internal/util.py | 28 | 15 | def load_plugins(base_type, database): # type: (t.Type[C], t.Dict[str, t.Type[C]]) -> None
plugins: t. | ansible-test - Convert more type hints. (#78449)
* Simple regex replace of multi-line function arg annotations.
* Simple regex replace of multi-line function arg annotations with default values.
* Simple regex replace of multi-line function arg return annotations.
* Simple regex replace of assignment annotati... | load_plugins | b993b5cd49662f715774c333ce98e2845227ab66 | ansible | util.py | 13 | 4 | https://github.com/ansible/ansible.git | 3 | 65 | 0 | 23 | 99 | Python | {
"docstring": "\n Load plugins of the specified type and track them in the specified database.\n Only plugins which have already been imported will be loaded.\n ",
"language": "en",
"n_whitespaces": 33,
"n_words": 23,
"vocab_size": 20
} | def load_plugins(base_type, database): # type: (t.Type[C], t.Dict[str, t.Type[C]]) -> None
plugins: t.Dict[str, t.Type[C]] = dict((sc.__module__.rsplit('.', 1)[1], sc) for sc in get_subclasses(base_type))
for plugin in plugins:
database[plugin] = plugins[plugin]
| |
49,648 | 200,437 | 44 | sympy/solvers/ode/nonhomogeneous.py | 29 | 14 | def _undetermined_coefficients_match(expr, x, func=None, eq_homogeneous=S.Zero):
r
a = Wild('a', exclude=[x])
b = W | Fix various typos
Found via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet` | _undetermined_coefficients_match | 24f1e7730119fe958cc8e28411f790c9a5ec04eb | sympy | nonhomogeneous.py | 10 | 54 | https://github.com/sympy/sympy.git | 7 | 151 | 0 | 23 | 93 | Python | {
"docstring": "\n Returns a trial function match if undetermined coefficients can be applied\n to ``expr``, and ``None`` otherwise.\n\n A trial expression can be found for an expression for use with the method\n of undetermined coefficients if the expression is an\n additive/multiplicative combination... | def _undetermined_coefficients_match(expr, x, func=None, eq_homogeneous=S.Zero):
r
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
expr = powsimp(expr, combine='exp') # exp(x)*exp(2*x + 1) => exp(3*x + 1)
retdict = {}
| |
20,154 | 100,698 | 99 | lib/gui/analysis/stats.py | 27 | 10 | def _remove_raw(self) -> None:
if "raw" in self._selections:
return
logger.debug("Removing Raw Data from output")
for key in list(self._stats.keys()):
if key.startswith("raw"):
| Bugfixes:
- Stats graph - Handle NaNs in data
- logger - de-elevate matplotlib font messages | _remove_raw | afec52309326304f4323029039e49bfcf928ef43 | faceswap | stats.py | 11 | 9 | https://github.com/deepfakes/faceswap.git | 4 | 57 | 0 | 21 | 102 | Python | {
"docstring": " Remove raw values from :attr:`stats` if they are not requested. ",
"language": "en",
"n_whitespaces": 11,
"n_words": 10,
"vocab_size": 10
} | def _remove_raw(self) -> None:
if "raw" in self._selections:
return
logger.debug("Removing Raw Data from output")
for key in list(self._stats.keys()):
if key.startswith("raw"):
del self._stats[key]
logger.debug("Removed Raw Data from outpu... | |
80,044 | 269,374 | 32 | keras/applications/efficientnet_weight_update_util.py | 20 | 6 | def get_keras_blocks(keras_weight_names):
# example: 'block1a_dwconv/depthwise_kernel:0' -> 'block1a'
keras_blocks = {x.split("_")[0] for x in keras_weight_names if "block" in x}
return sorted(keras_blocks)
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | get_keras_blocks | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | efficientnet_weight_update_util.py | 11 | 3 | https://github.com/keras-team/keras.git | 3 | 32 | 0 | 19 | 57 | Python | {
"docstring": "Extract the block names from list of full weight names.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def get_keras_blocks(keras_weight_names):
# example: 'block1a_dwconv/depthwise_kernel:0' -> 'block1a'
keras_blocks = {x.split("_")[0] for x in keras_weight_names if "block" in x}
return sorted(keras_blocks)
| |
13,800 | 65,128 | 121 | erpnext/accounts/party.py | 193 | 45 | def get_dashboard_info(party_type, party, loyalty_program=None):
current_fiscal_year = get_fiscal_year(nowdate(), as_dict=True)
doctype = "Sales Invoice" if party_type == "Customer" else "Purchase Invoice"
companies = frappe.get_all(
doctype, filters={"docstatus": 1, party_type.lower(): party}, distinct=1, field... | style: format code with black | get_dashboard_info | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | party.py | 18 | 77 | https://github.com/frappe/erpnext.git | 12 | 432 | 0 | 116 | 712 | Python | {
"docstring": "\n\t\tselect company, sum(debit_in_account_currency) - sum(credit_in_account_currency)\n\t\tfrom `tabGL Entry`\n\t\twhere party_type = %s and party=%s\n\t\tand is_cancelled = 0\n\t\tgroup by company",
"language": "en",
"n_whitespaces": 16,
"n_words": 21,
"vocab_size": 19
} | def get_dashboard_info(party_type, party, loyalty_program=None):
current_fiscal_year = get_fiscal_year(nowdate(), as_dict=True)
doctype = "Sales Invoice" if party_type == "Customer" else "Purchase Invoice"
companies = frappe.get_all(
doctype, filters={"docstatus": 1, party_type.lower(): party}, distinct=1, field... | |
83,815 | 281,509 | 32 | gamestonk_terminal/portfolio/brokers/robinhood/robinhood_controller.py | 10 | 7 | def print_help(self):
help_text =
console.print(text= | Terminal Wide Rich (#1161)
* My idea for how we handle Rich moving forward
* remove independent consoles
* FIxed pylint issues
* add a few vars
* Switched print to console
* More transitions
* Changed more prints
* Replaced all prints
* Fixing tabulate
* Finished replace tabulate
* Finish... | print_help | 82747072c511beb1b2672846ae2ee4aec53eb562 | OpenBBTerminal | robinhood_controller.py | 9 | 8 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 1 | 21 | 0 | 9 | 40 | Python | {
"docstring": "Print help[cmds]\n login login to robinhood\n\n holdings show account holdings in stocks\n history show equity history of your account\n[/cmds]",
"language": "en",
"n_whitespaces": 40,
"n_words": 20,
"vocab_size": 15
} | def print_help(self):
help_text =
console.print(text=help_text, menu="Portfolio - Brokers - Robinhood")
| |
15,811 | 71,984 | 153 | wagtail/admin/tests/test_edit_handlers.py | 31 | 16 | def test_page_with_inline_model_with_tabbed_panel_only(self):
EventPageSpeaker.settings_panels = [
FieldPanel("first_name"),
FieldPanel("last_name"),
]
warning = checks.Warning(
"EventPageSpeaker.settings_panels will have no effect on InlinePanel mo... | Reformat with black | test_page_with_inline_model_with_tabbed_panel_only | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_edit_handlers.py | 10 | 15 | https://github.com/wagtail/wagtail.git | 1 | 66 | 0 | 29 | 112 | Python | {
"docstring": "Test that checks will warn against setting single tabbed panel on InlinePanel modelEnsure that EventPageSpeaker uses `panels` instead of `settings_panels`.\nThere are no tabs on non-Page model editing within InlinePanels.",
"language": "en",
"n_whitespaces": 28,
"n_words": 30,
"vocab_size": 28... | def test_page_with_inline_model_with_tabbed_panel_only(self):
EventPageSpeaker.settings_panels = [
FieldPanel("first_name"),
FieldPanel("last_name"),
]
warning = checks.Warning(
"EventPageSpeaker.settings_panels will have no effect on InlinePanel mo... | |
@dataclass | 121,067 | 337,478 | 48 | src/accelerate/utils/dataclasses.py | 21 | 10 | def to_kwargs(self):
default_dict = self.__class__().to_dict()
this_dict = self.to_dict()
| Refactor utils into its own module (#340)
Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> | to_kwargs | 02e2ed567be0e6d54b65884265a14873c3a30b2a | accelerate | dataclasses.py | 10 | 4 | https://github.com/huggingface/accelerate.git | 3 | 47 | 1 | 19 | 82 | Python | {
"docstring": "\n Returns a dictionary containing the attributes with values different from the default of this class.\n ",
"language": "en",
"n_whitespaces": 30,
"n_words": 15,
"vocab_size": 14
} | def to_kwargs(self):
default_dict = self.__class__().to_dict()
this_dict = self.to_dict()
return {k: v for k, v in this_dict.items() if default_dict[k] != v}
@dataclass |
117,024 | 319,942 | 662 | src/documents/tasks.py | 141 | 56 | def update_document_archive_file(document_id):
document = Document.objects.get(id=document_id)
mime_type = document.mime_type
parser_class: Type[DocumentParser] = get_parser_class_for_mime_type(mime_type)
if not parser_class:
logger.error(
f"No parser found for mime type {mim... | Implements a better re-do of OCR by making the document archiver function common. Actually creates updated file now | update_document_archive_file | ab761e837c4be4974f699c8c97560a4291a8d298 | paperless-ngx | tasks.py | 20 | 43 | https://github.com/paperless-ngx/paperless-ngx.git | 5 | 266 | 0 | 108 | 463 | Python | {
"docstring": "\n Re-creates the archive file of a document, including new OCR content and thumbnail\n ",
"language": "en",
"n_whitespaces": 20,
"n_words": 13,
"vocab_size": 13
} | def update_document_archive_file(document_id):
document = Document.objects.get(id=document_id)
mime_type = document.mime_type
parser_class: Type[DocumentParser] = get_parser_class_for_mime_type(mime_type)
if not parser_class:
logger.error(
f"No parser found for mime type {mim... | |
14,258 | 66,630 | 13 | erpnext/patches/v12_0/move_credit_limit_to_customer_credit_limit.py | 20 | 6 | def execute():
frappe.reload_doc("Selling", "doctype", "Customer Credit Limit")
frappe.reload_doc("Selling", "doctype", "Customer")
frappe.reload_doc("Setup", "doctype", "Customer Group | style: format code with black | execute | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | move_credit_limit_to_customer_credit_limit.py | 8 | 7 | https://github.com/frappe/erpnext.git | 2 | 49 | 0 | 15 | 98 | Python | {
"docstring": "Move credit limit and bypass credit limit to the child table of customer credit limit",
"language": "en",
"n_whitespaces": 14,
"n_words": 15,
"vocab_size": 11
} | def execute():
frappe.reload_doc("Selling", "doctype", "Customer Credit Limit")
frappe.reload_doc("Selling", "doctype", "Customer")
frappe.reload_doc("Setup", "doctype", "Customer Group")
if frappe.db.a_row_exists("Customer Credit Limit"):
return
move_credit_limit_to_child_table()
| |
34,708 | 150,327 | 73 | scripts/rest_client.py | 15 | 7 | def forceexit(self, tradeid, ordertype=None, amount=None):
return self._post("forceexit", data={
"tradeid": tradeid,
"ordertype": ordertype,
"amount": amount,
| Accept parameters to forceexit | forceexit | 82aecc81f393e98b86115e9bdfa46dac1a143fad | freqtrade | rest_client.py | 11 | 6 | https://github.com/freqtrade/freqtrade.git | 1 | 40 | 0 | 14 | 66 | Python | {
"docstring": "Force-exit a trade.\n\n :param tradeid: Id of the trade (can be received via status command)\n :param ordertype: Order type to use (must be market or limit)\n :param amount: Amount to sell. Full sell if not given\n :return: json object\n ",
"language": "en",
"n_w... | def forceexit(self, tradeid, ordertype=None, amount=None):
return self._post("forceexit", data={
"tradeid": tradeid,
"ordertype": ordertype,
"amount": amount,
})
| |
13,070 | 62,924 | 25 | .venv/lib/python3.8/site-packages/pip/_vendor/packaging/tags.py | 16 | 4 | def _abi3_applies(python_version):
# type: (PythonVersion) -> bool
return len(python_version) > 1 and tu | upd; format | _abi3_applies | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | tags.py | 9 | 2 | https://github.com/jindongwang/transferlearning.git | 2 | 24 | 0 | 16 | 41 | Python | {
"docstring": "\n Determine if the Python version supports abi3.\n\n PEP 384 was first implemented in Python 3.2.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 15,
"vocab_size": 14
} | def _abi3_applies(python_version):
# type: (PythonVersion) -> bool
return len(python_version) > 1 and tuple(python_version) >= (3, 2)
| |
80,150 | 269,521 | 264 | keras/backend.py | 64 | 29 | def variable(value, dtype=None, name=None, constraint=None):
if dtype is None:
dtype = floatx()
if hasattr(value, "tocoo"):
sparse_coo = value.tocoo()
indices = np.concatenate(
(
np.expand_dims(sparse_coo.row, 1),
np.expand_dims(sparse_coo... | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | variable | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | backend.py | 14 | 28 | https://github.com/keras-team/keras.git | 5 | 173 | 0 | 44 | 265 | Python | {
"docstring": "Instantiates a variable and returns it.\n\n Args:\n value: Numpy array, initial value of the tensor.\n dtype: Tensor type.\n name: Optional name string for the tensor.\n constraint: Optional projection function to be\n applied to the variable after an optimize... | def variable(value, dtype=None, name=None, constraint=None):
if dtype is None:
dtype = floatx()
if hasattr(value, "tocoo"):
sparse_coo = value.tocoo()
indices = np.concatenate(
(
np.expand_dims(sparse_coo.row, 1),
np.expand_dims(sparse_coo... | |
84,633 | 284,080 | 93 | openbb_terminal/stocks/dark_pool_shorts/ibkr_model.py | 51 | 27 | def get_cost_to_borrow() -> pd.DataFrame:
ftp = ftplib.FTP("ftp3.interactivebrokers.com", "shortstock")
flo = BytesIO()
ftp.retrbinary("RETR usa.txt", flo.write)
flo.seek(0)
data = pd.read_csv(flo, sep="|", skiprows=1)
data = data[["#SYM", "FEERATE", "AVAILABLE"]]
data["AVAILABLE"] = ... | Add cost to borrow of stocks. Data from IBKR (#1663)
* add ctb to dps
* add test for ctb
* reformat using black
* fix tests for ctb
Co-authored-by: didierlopes.eth <dro.lopes@campus.fct.unl.pt>
Co-authored-by: jmaslek <jmaslek11@gmail.com> | get_cost_to_borrow | 73187d9e17a4838fc6ec583bcfcab593e06508cf | OpenBBTerminal | ibkr_model.py | 12 | 21 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 1 | 161 | 0 | 40 | 281 | Python | {
"docstring": "Get stocks with highest cost to borrow [Source: Interactive Broker]\n\n Returns\n -------\n pd.DataFrame\n Cost to borrow\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 16,
"vocab_size": 14
} | def get_cost_to_borrow() -> pd.DataFrame:
ftp = ftplib.FTP("ftp3.interactivebrokers.com", "shortstock")
flo = BytesIO()
ftp.retrbinary("RETR usa.txt", flo.write)
flo.seek(0)
data = pd.read_csv(flo, sep="|", skiprows=1)
data = data[["#SYM", "FEERATE", "AVAILABLE"]]
data["AVAILABLE"] = ... | |
8,086 | 43,885 | 30 | airflow/jobs/local_task_job.py | 5 | 4 | def _enable_task_listeners():
if get_listener_manager() | Add Listener Plugin API that tracks TaskInstance state changes (#20443)
This adds new Plugin API - "listeners". It enables plugin authors to write
[pluggy hook implementation][1] that will be called on certain formalized extension
points. To differentiate between current Airflow extension points, like
plugins, and ... | _enable_task_listeners | dba00ce6a32b7f50153887c6974f62985ca8023f | airflow | local_task_job.py | 9 | 3 | https://github.com/apache/airflow.git | 2 | 15 | 0 | 5 | 30 | Python | {
"docstring": "\n Check if we have any registered listeners, then register sqlalchemy hooks for\n TI state change if we do.\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 18,
"vocab_size": 16
} | def _enable_task_listeners():
if get_listener_manager().has_listeners:
register_task_instance_state_events()
| |
31,438 | 138,495 | 26 | python/ray/data/impl/plan.py | 12 | 5 | def has_computed_output(self) -> bool:
return self._snapshot_blocks is not N | [Datasets] [Out-of-Band Serialization: 2/3] Refactor `ExecutionPlan` to maintain complete lineage and eagerly unlink block references. (#23931)
This PR refactors ExecutionPlan to maintain complete stage lineage, even for eagerly computed datasets, while ensuring that block references are unlinked as early as possible ... | has_computed_output | 9ee24530abf1b5e3239869b5257dd7b678337b90 | ray | plan.py | 8 | 5 | https://github.com/ray-project/ray.git | 2 | 20 | 0 | 11 | 34 | Python | {
"docstring": "Whether this plan has a computed snapshot for the final stage, i.e. for the\n output of this plan.\n ",
"language": "en",
"n_whitespaces": 32,
"n_words": 18,
"vocab_size": 15
} | def has_computed_output(self) -> bool:
return self._snapshot_blocks is not None and not self._stages_after_snapshot
| |
14,851 | 68,747 | 110 | erpnext/accounts/report/sales_register/sales_register.py | 150 | 21 | def get_conditions(filters):
conditions = ""
accounting_dimensions = get_accounting_dimensions(as_list=False) or []
accounting_dimensions_list = [d.fieldname for d in accounting_dimensions]
if filters.get("company"):
conditions += " and company=%(company)s"
if filters.get("customer") and "customer" not in acc... | fix(Sales Register): incorrect query with dimensions
If accounting dimension is also part of the default filters then same
query is repeated with incorrect syntax.
e.g. `item_group = (child1, child2)` instead of `in` query.
fix: don't add default filter if they are part of dimensions to be
added. | get_conditions | c3219ebad1cac35afc04cc051c9e215c70cd1e9b | erpnext | sales_register.py | 20 | 41 | https://github.com/frappe/erpnext.git | 13 | 213 | 0 | 73 | 446 | Python | {
"docstring": " and exists(select name from `tab{table}`\n\t\t\t where parent=`tabSales Invoice`.name\n\t\t\t \tand ifnull(`tab{table}`.{field}, '') = %({field})s)\n\t\t\tand exists(select name from `tabSales Invoice Item`\n\t\t\t\twhere parent=`tabSales Invoice`.name\n\t\t\t",
"language": "en",
"n_whitespaces":... | def get_conditions(filters):
conditions = ""
accounting_dimensions = get_accounting_dimensions(as_list=False) or []
accounting_dimensions_list = [d.fieldname for d in accounting_dimensions]
if filters.get("company"):
conditions += " and company=%(company)s"
if filters.get("customer") and "customer" not in acc... | |
2,909 | 19,198 | 29 | mlflow/sklearn/utils.py | 13 | 8 | def _get_classifier_artifacts(fitted_estimator, prefix, X, y_true, sample_weight):
import sklearn
if | Improve confusion matrix plot (#5273)
* update
Signed-off-by: Weichen Xu <weichen.xu@databricks.com>
* fix
Signed-off-by: Weichen Xu <weichen.xu@databricks.com>
* update
Signed-off-by: Weichen Xu <weichen.xu@databricks.com> | _get_classifier_artifacts | 847eb6b22d03f0cffef945996cf835272870435a | mlflow | utils.py | 8 | 48 | https://github.com/mlflow/mlflow.git | 3 | 187 | 0 | 13 | 41 | Python | {
"docstring": "\n Draw and record various common artifacts for classifier\n\n For all classifiers, we always log:\n (1) confusion matrix:\n https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html\n\n For only binary classifiers, we will log:\n (2) precision reca... | def _get_classifier_artifacts(fitted_estimator, prefix, X, y_true, sample_weight):
import sklearn
if not _is_plotting_supported():
return []
| |
@py_random_state(3) | 42,180 | 176,941 | 559 | networkx/algorithms/swap.py | 228 | 33 | def double_edge_swap(G, nswap=1, max_tries=100, seed=None):
if G.is_directed():
raise nx.NetworkXError(
"double_edge_swap() not defined for directed graphs. Use direc | Implement directed edge swap (#5663)
* Add tests for directed edge swap
* Add directed edge swap algorithm
* Allow more swaps in directed tests
* Fix errors in swap.py to meet test criteria
* Remove TODOs
* Update documentation for directed_edge_swap and run black
* Fix incosistent spacing
* Add r... | double_edge_swap | 7d910e7184abd385c929f789b0c935ab143fc932 | networkx | swap.py | 14 | 38 | https://github.com/networkx/networkx.git | 10 | 251 | 1 | 154 | 428 | Python | {
"docstring": "Swap two edges in the graph while keeping the node degrees fixed.\n\n A double-edge swap removes two randomly chosen edges u-v and x-y\n and creates the new edges u-x and v-y::\n\n u--v u v\n becomes | |\n x--y x y\n\n If either the edge u-x or v-y... | def double_edge_swap(G, nswap=1, max_tries=100, seed=None):
if G.is_directed():
raise nx.NetworkXError(
"double_edge_swap() not defined for directed graphs. Use directed_edge_swap instead."
)
if nswap > max_tries:
raise nx.NetworkXError("Number of swaps > number of tries... |
27,737 | 124,997 | 196 | rllib/offline/tests/test_dataset_reader.py | 44 | 16 | def test_dataset_shard_with_task_parallelization(self):
config = {
"input": "dataset",
"input_config": {
"format": "json",
"paths": self.dset_path,
"parallelism": 10,
}, | [RLlib] improved unittests for dataset_reader and fixed bugs (#26458) | test_dataset_shard_with_task_parallelization | 569fe0109629048d08e1d9e023f7769f10bd2244 | ray | test_dataset_reader.py | 11 | 16 | https://github.com/ray-project/ray.git | 2 | 86 | 0 | 38 | 143 | Python | {
"docstring": "Tests whether the dataset_shard function works correctly with parallelism\n for reading the dataset.",
"language": "en",
"n_whitespaces": 19,
"n_words": 13,
"vocab_size": 12
} | def test_dataset_shard_with_task_parallelization(self):
config = {
"input": "dataset",
"input_config": {
"format": "json",
"paths": self.dset_path,
"parallelism": 10,
},
}
NUM_WORKERS = 4
_, sha... | |
23,178 | 108,410 | 164 | lib/matplotlib/tests/test_compare_images.py | 97 | 24 | def test_image_comparison_expect_rms(im1, im2, tol, expect_rms):
baseline_dir, result_dir = map(Path, _image_directories(lambda: "dummy"))
# Copy both "baseline" a | Add uuid in im1 name | test_image_comparison_expect_rms | f3edc8771b7c292c5539e0e6444746b6ccefec04 | matplotlib | test_compare_images.py | 12 | 14 | https://github.com/matplotlib/matplotlib.git | 2 | 124 | 0 | 70 | 194 | Python | {
"docstring": "\n Compare two images, expecting a particular RMS error.\n\n im1 and im2 are filenames relative to the baseline_dir directory.\n\n tol is the tolerance to pass to compare_images.\n\n expect_rms is the expected RMS value, or None. If None, the test will\n succeed if compare_images succee... | def test_image_comparison_expect_rms(im1, im2, tol, expect_rms):
baseline_dir, result_dir = map(Path, _image_directories(lambda: "dummy"))
# Copy both "baseline" and "test" image to result_dir, so that 1)
# compare_images writes the diff to result_dir, rather than to the source
# tree and 2) the ba... | |
21,474 | 102,147 | 110 | test/jit/test_save_load.py | 27 | 20 | def test_versioned_symbols_reserialization(self):
module_v2 = torch.jit.load(py | Revert D33198155: Bump version number to 7 and compile old operators with old schema
Test Plan: revert-hammer
Differential Revision:
D33198155 (https://github.com/pytorch/pytorch/commit/d35fc409ad84c1a837e7e07ffe3f4e4942538e50)
Original commit changeset: 38a1185f9ecb
Original Phabricator Diff: D33198155 (https://gi... | test_versioned_symbols_reserialization | 0ece9a49d7d705b1a0cd4406d4f1c526d720e1f3 | pytorch | test_save_load.py | 12 | 9 | https://github.com/pytorch/pytorch.git | 2 | 81 | 0 | 23 | 136 | Python | {
"docstring": "\n Tests that loading and saving serialized Torchscript with a versioned\n symbol won't persist the original function and will inline the\n versioned builtin.\n ",
"language": "en",
"n_whitespaces": 51,
"n_words": 22,
"vocab_size": 19
} | def test_versioned_symbols_reserialization(self):
module_v2 = torch.jit.load(pytorch_test_dir + "/jit/fixtures/_test_serialization_subcmul_v2.pt")
buffer = io.BytesIO()
torch.jit.save(module_v2, buffer)
buffer.seek(0)
module_reserialized = torch.jit.load(buffer)
... | |
44,359 | 183,837 | 62 | tests/css/test_stylesheet.py | 39 | 13 | def test_stylesheet_apply_takes_final_rule_in_specificity_clash():
css = ".a {background: red; color: lime;} .b {background: blue;}"
stylesheet = _make_stylesheet(css)
node = DOMNode(classes="a b", id="c")
stylesheet.apply(node)
assert node.styles.color == Color(0, 255, 0) # color: lime
a... | Add various additional tests around CSS specificity | test_stylesheet_apply_takes_final_rule_in_specificity_clash | 4dd0d9fae43583638f34257f97d5749ca4f2c00c | textual | test_stylesheet.py | 10 | 7 | https://github.com/Textualize/textual.git | 1 | 62 | 0 | 31 | 105 | Python | {
"docstring": ".a and .b both contain background and have same specificity, so .b wins\n since it was declared last - the background should be blue.",
"language": "en",
"n_whitespaces": 26,
"n_words": 24,
"vocab_size": 21
} | def test_stylesheet_apply_takes_final_rule_in_specificity_clash():
css = ".a {background: red; color: lime;} .b {background: blue;}"
stylesheet = _make_stylesheet(css)
node = DOMNode(classes="a b", id="c")
stylesheet.apply(node)
assert node.styles.color == Color(0, 255, 0) # color: lime
a... | |
8,919 | 46,541 | 408 | airflow/migrations/versions/0106_909884dea523_update_migration_for_fab_tables_to_add_missing_constraints.py | 116 | 25 | def upgrade():
conn = op.get_bind()
if conn.dialect.name == 'sqlite':
op.execute('PRAGMA foreign_keys=OFF')
with op.batch_alter_table('ab_view_menu', schema=None) as batch_op:
batch_op.create_unique_constraint(batch_op.f('ab_view_menu_name_uq'), ['name'])
op.execute('PRA... | Use Airflow.Base.metadata in FAB models (#22353)
Since FAB models are now in airflow, it makes sense to monitor changes
in them. Therefore we use Airflow.models.base.Base.metadata for FAB models | upgrade | 2f5a567977e1219cab16c2548825a1b9eba07ab3 | airflow | 0106_909884dea523_update_migration_for_fab_tables_to_add_missing_constraints.py | 16 | 30 | https://github.com/apache/airflow.git | 6 | 378 | 0 | 53 | 652 | Python | {
"docstring": "Apply Update migration for FAB tables to add missing constraints",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def upgrade():
conn = op.get_bind()
if conn.dialect.name == 'sqlite':
op.execute('PRAGMA foreign_keys=OFF')
with op.batch_alter_table('ab_view_menu', schema=None) as batch_op:
batch_op.create_unique_constraint(batch_op.f('ab_view_menu_name_uq'), ['name'])
op.execute('PRA... | |
25,090 | 114,082 | 1,416 | mindsdb/migrations/versions/2022-02-09_27c5aca9e47e_test.py | 386 | 70 | def upgrade():
op.drop_table('ai_table')
conn = op.get_bind()
# views was created with unnamed fk. Therefore need recreate it
op.create_table(
'view_tmp',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('company_i... | migration | upgrade | 2a39e0ab3c81f09a227c50c98a3fb7ee57ec8fac | mindsdb | 2022-02-09_27c5aca9e47e_test.py | 17 | 130 | https://github.com/mindsdb/mindsdb.git | 10 | 1,172 | 0 | 197 | 1,989 | Python | {
"docstring": "\n insert into view_tmp (id, name, company_id, query, integration_id)\n select id, name, company_id, query, datasource_id from view;\n \n insert into analysis (analysis) select analysis from datasource where id = :id;\n \n select id fro... | def upgrade():
op.drop_table('ai_table')
conn = op.get_bind()
# views was created with unnamed fk. Therefore need recreate it
op.create_table(
'view_tmp',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('company_i... | |
76,511 | 260,810 | 310 | sklearn/cluster/_bisect_k_means.py | 95 | 24 | def _predict_recursive(self, X, sample_weight, cluster_node):
if cluster_node.left is None:
# This cluster has no subcluster. Labels are just the label of the cluster.
return np.full(X.shape[0], cluster_node.label, dtype=np.int32)
# Determine if data points belong to th... | MAINT Remove `x_squared_norms` arg from `k_means_lloyd` signature (#24264)
Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> | _predict_recursive | 60f16feaadaca28f9a1cc68d2f406201860d27e8 | scikit-learn | _bisect_k_means.py | 11 | 22 | https://github.com/scikit-learn/scikit-learn.git | 3 | 171 | 0 | 67 | 254 | Python | {
"docstring": "Predict recursively by going down the hierarchical tree.\n\n Parameters\n ----------\n X : {ndarray, csr_matrix} of shape (n_samples, n_features)\n The data points, currently assigned to `cluster_node`, to predict between\n the subclusters of this node.\n\n ... | def _predict_recursive(self, X, sample_weight, cluster_node):
if cluster_node.left is None:
# This cluster has no subcluster. Labels are just the label of the cluster.
return np.full(X.shape[0], cluster_node.label, dtype=np.int32)
# Determine if data points belong to th... | |
77,188 | 262,331 | 117 | TTS/tts/models/vits.py | 54 | 27 | def inference_voice_conversion(self, reference_wav, speaker_id=None, d_vector=None, reference_speaker_id=None, reference_d_vector=None):
# com | Add Voice conversion inference support (#1337)
* Add support for voice conversion inference
* Cache d_vectors_by_speaker for fast inference using a bigger speakers.json
* Rebase bug fix
* Use the average d-vector for inference | inference_voice_conversion | dbe9da7f15544b83043f481a99e5bcb23e002dc9 | TTS | vits.py | 14 | 7 | https://github.com/coqui-ai/TTS.git | 3 | 128 | 0 | 42 | 187 | Python | {
"docstring": "Inference for voice conversion\n\n Args:\n reference_wav (Tensor): Reference wavform. Tensor of shape [B, T]\n speaker_id (Tensor): speaker_id of the target speaker. Tensor of shape [B]\n d_vector (Tensor): d_vector embedding of target speaker. Tensor of shape `... | def inference_voice_conversion(self, reference_wav, speaker_id=None, d_vector=None, reference_speaker_id=None, reference_d_vector=None):
# compute spectrograms
y = wav_to_spec(reference_wav, self.config.audio.fft_size, self.config.audio.hop_length, self.config.audio.win_length, center=False).tr... | |
9,587 | 48,736 | 39 | tests/test_routers.py | 11 | 7 | def test_nonconflicting_specified_basename(self):
self.router.register(r'notes', NoteViewSet, basename='notes')
self.router.register(r'notes_kwduplicate', KWa | raise ImproperlyConfigured exception if `basename` is not unique (#8438)
* raise ImproperlyConfigured if basename already exists
* rename already_registered function; return True/False
* additional basename tests
* additional basename tests
* Update rest_framework/routers.py
Co-authored-by: David Graves... | test_nonconflicting_specified_basename | 48a21aa0eb3a95d32456c2a927eff9552a04231e | django-rest-framework | test_routers.py | 9 | 4 | https://github.com/encode/django-rest-framework.git | 1 | 51 | 0 | 10 | 85 | Python | {
"docstring": "\n Ensure 2 routers with the same model, and a distinct basename specified\n on each does not throw an exception\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 19
} | def test_nonconflicting_specified_basename(self):
self.router.register(r'notes', NoteViewSet, basename='notes')
self.router.register(r'notes_kwduplicate', KWargedNoteViewSet, basename='notes_kwduplicate')
self.router.register(r'notes_duplicate', NoteViewSet, basename='notes_duplicate')
| |
@pytest.mark.parametrize("config_count", [(CONFIG_ATTRIBUTES, 1)]) | 112,947 | 314,340 | 1,649 | tests/components/group/test_cover.py | 389 | 35 | async def test_state(hass, setup_comp):
state = hass.states.get(COVER_GROUP)
# No entity has a valid state -> group state unknown
assert state.state == STATE_UNKNOWN
assert state.attributes[ATTR_FRIENDLY_NAME] == DEFAULT_NAME
assert state.attributes[ATTR_ENTITY_ID] == [
DEMO_COVER,
... | Improve group tests (#73630) | test_state | 9b8c3e37bbee3dbaa949705c7ae7b29f521988e7 | core | test_cover.py | 15 | 120 | https://github.com/home-assistant/core.git | 19 | 807 | 1 | 94 | 1,196 | Python | {
"docstring": "Test handling of state.\n\n The group state is unknown if all group members are unknown or unavailable.\n Otherwise, the group state is opening if at least one group member is opening.\n Otherwise, the group state is closing if at least one group member is closing.\n Otherwise, the group s... | async def test_state(hass, setup_comp):
state = hass.states.get(COVER_GROUP)
# No entity has a valid state -> group state unknown
assert state.state == STATE_UNKNOWN
assert state.attributes[ATTR_FRIENDLY_NAME] == DEFAULT_NAME
assert state.attributes[ATTR_ENTITY_ID] == [
DEMO_COVER,
... |
23,787 | 109,877 | 128 | lib/matplotlib/cbook/__init__.py | 36 | 16 | def connect(self, signal, func):
if self._signals is not None:
_api.check_in_list(self._signals, signal=signal)
self._func_cid_map.setdefault(signal, {})
proxy = _weak_or_strong_ref(func, self._remove_proxy)
| Remove miscellaneous deprecations from 3.5 | connect | e199c3b819f66a56f49657de0a9b3fb60c745b94 | matplotlib | __init__.py | 10 | 15 | https://github.com/matplotlib/matplotlib.git | 4 | 137 | 0 | 25 | 173 | Python | {
"docstring": "Register *func* to be called when signal *signal* is generated.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def connect(self, signal, func):
if self._signals is not None:
_api.check_in_list(self._signals, signal=signal)
self._func_cid_map.setdefault(signal, {})
proxy = _weak_or_strong_ref(func, self._remove_proxy)
if proxy in self._func_cid_map[signal]:
return ... | |
14,551 | 67,548 | 43 | erpnext/setup/setup_wizard/operations/taxes_setup.py | 69 | 22 | def get_or_create_account(company_name, account):
default_root_type = "Liability"
root_type = account.get("root_type", default_root_type)
existing_accounts = frappe.get_all(
"Account",
filters={"company": company_name, "root_type": root_type},
or_filters={
"account_name": account.get("account_name"),
| style: format code with black | get_or_create_account | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | taxes_setup.py | 14 | 26 | https://github.com/frappe/erpnext.git | 2 | 168 | 0 | 51 | 294 | Python | {
"docstring": "\n\tCheck if account already exists. If not, create it.\n\tReturn a tax account or None.\n\t",
"language": "en",
"n_whitespaces": 13,
"n_words": 15,
"vocab_size": 14
} | def get_or_create_account(company_name, account):
default_root_type = "Liability"
root_type = account.get("root_type", default_root_type)
existing_accounts = frappe.get_all(
"Account",
filters={"company": company_name, "root_type": root_type},
or_filters={
"account_name": account.get("account_name"),
... | |
20,926 | 101,515 | 47 | lib/gui/utils.py | 12 | 10 | def set_default_options(self) -> None:
default = self.cli_opts.get_option_values()
logger.debug(default)
self. | Bugfix: Preview for extract in batch mode | set_default_options | dc18c74eea0c7837a820d27628cb12b0824fa30e | faceswap | utils.py | 9 | 12 | https://github.com/deepfakes/faceswap.git | 1 | 37 | 0 | 10 | 64 | Python | {
"docstring": " Set the default options for :mod:`lib.gui.projects`\n\n The Default GUI options are stored on Faceswap startup.\n\n Exposed as the :attr:`_default_opts` for a project cannot be set until after the main\n Command Tabs have been loaded.\n ",
"language": "en",
"n_whitespa... | def set_default_options(self) -> None:
default = self.cli_opts.get_option_values()
logger.debug(default)
self._gui_objects.default_options = default
self.project.set_default_options()
| |
3,324 | 20,326 | 1,331 | pipenv/patched/notpip/_vendor/pygments/formatters/html.py | 244 | 42 | def _format_lines(self, tokensource): | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | _format_lines | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | html.py | 20 | 63 | https://github.com/pypa/pipenv.git | 26 | 453 | 0 | 124 | 751 | Python | {
"docstring": "\n Just format the tokens, without any wrapping tags.\n Yield individual lines.\n ",
"language": "en",
"n_whitespaces": 33,
"n_words": 11,
"vocab_size": 11
} | def _format_lines(self, tokensource):
nocls = self.noclasses
lsep = self.lineseparator
tagsfile = self.tagsfile
lspan = ''
line = []
for ttype, value in tokensource:
try:
cspan = self.span_element_openers[ttype]
except Key... | |
643 | 4,250 | 101 | octavia-cli/octavia_cli/apply/resources.py | 33 | 13 | def _get_remote_resource(self) -> Optional[Union[SourceRead, DestinationRead, ConnectionRead]]:
search_results = self._search().get(f"{self.resource_type}s", [])
if len(search_results) > 1:
raise DuplicateResourceError("Two or more ressources exist with the same name.")
if l... | 🐙 octavia-cli: `apply` connections (#10881) | _get_remote_resource | 56bf982cb96f831fe04f5e44a92ee4a669b9e16a | airbyte | resources.py | 11 | 16 | https://github.com/airbytehq/airbyte.git | 3 | 64 | 0 | 29 | 111 | Python | {
"docstring": "Find the remote resource on the Airbyte instance associated with the current resource.\n\n Raises:\n DuplicateResourceError: raised if the search results return multiple resources.\n\n Returns:\n Optional[Union[SourceRead, DestinationRead, ConnectionRead]]: The remo... | def _get_remote_resource(self) -> Optional[Union[SourceRead, DestinationRead, ConnectionRead]]:
search_results = self._search().get(f"{self.resource_type}s", [])
if len(search_results) > 1:
raise DuplicateResourceError("Two or more ressources exist with the same name.")
if l... | |
80,817 | 271,592 | 228 | keras/engine/training.py | 51 | 16 | def _get_compile_args(self, user_metrics=True):
self._assert_compile_was_called()
# pylint: disable=protected-a | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _get_compile_args | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | training.py | 12 | 17 | https://github.com/keras-team/keras.git | 4 | 95 | 0 | 34 | 158 | Python | {
"docstring": "Used for saving or cloning a Model.\n\n Args:\n user_metrics: Whether to return user-supplied metrics or `Metric` objects.\n Defaults to returning the user-supplied metrics.\n\n Returns:\n Dictionary of arguments that were used when compiling the model.\n ... | def _get_compile_args(self, user_metrics=True):
self._assert_compile_was_called()
# pylint: disable=protected-access
saved_metrics = self.compiled_metrics._user_metrics
saved_weighted_metrics = self.compiled_metrics._user_weighted_metrics
if not user_metrics:
... | |
38,542 | 160,170 | 93 | numpy/f2py/tests/test_f2py2e.py | 34 | 17 | def test_norestexdoc(capfd, hello_world_f90, monkeypatch):
ipath = Path(hello_world_f90)
mname = "blah"
monkeypatch.setattr(sys, "argv",
| TST: Initialize f2py2e tests of the F2PY CLI (#20668)
Increases F2PY coverage by around 15 percent. For the CLI itself it covers the major features (around 70 percent), with the exception of mostly numpy.distutils stuff.
More importantly, sets the groundwork for #20056, in that passing the same testsuite should ind... | test_norestexdoc | 729ad4f92420231e2a7009b3223c6c7620b8b808 | numpy | test_f2py2e.py | 11 | 9 | https://github.com/numpy/numpy.git | 1 | 61 | 0 | 32 | 115 | Python | {
"docstring": "Ensures that TeX documentation is written out\n\n CLI :: --no-rest-doc\n ",
"language": "en",
"n_whitespaces": 16,
"n_words": 10,
"vocab_size": 10
} | def test_norestexdoc(capfd, hello_world_f90, monkeypatch):
ipath = Path(hello_world_f90)
mname = "blah"
monkeypatch.setattr(sys, "argv",
f'f2py -m {mname} {ipath} --no-rest-doc'.split())
with util.switchdir(ipath.parent):
f2pycli()
out, _ = capfd.readouterr(... | |
81,383 | 275,342 | 341 | keras/optimizers/optimizer_v1.py | 82 | 18 | def set_weights(self, weights):
params = self.weights
if len(params) != len(weights):
raise ValueError(
"Length of the specified weight list ("
+ str(len(weights))
+ ") does not match the number of weights "
"of the opt... | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | set_weights | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | optimizer_v1.py | 17 | 21 | https://github.com/keras-team/keras.git | 4 | 125 | 0 | 56 | 212 | Python | {
"docstring": "Sets the weights of the optimizer, from Numpy arrays.\n\n Should only be called after computing the gradients\n (otherwise the optimizer has no weights).\n\n Args:\n weights: a list of Numpy arrays. The number of arrays and their shape\n must match number o... | def set_weights(self, weights):
params = self.weights
if len(params) != len(weights):
raise ValueError(
"Length of the specified weight list ("
+ str(len(weights))
+ ") does not match the number of weights "
"of the opt... | |
80,860 | 271,840 | 26 | keras/engine/training_utils_v1.py | 14 | 8 | def extract_tensors_from_dataset(dataset):
iterator = get_iterator(dataset)
inputs, targets, sample_weight = unpack_iterator_input(iterator)
return inputs, targets, sample_weight
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | extract_tensors_from_dataset | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | training_utils_v1.py | 8 | 4 | https://github.com/keras-team/keras.git | 1 | 28 | 0 | 10 | 46 | Python | {
"docstring": "Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset.\n\n Args:\n dataset: Dataset instance.\n\n Returns:\n Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.\n ",
"language": "en",
"n_whitespaces": 48,
"n_words": 29,
"vocab_size": 2... | def extract_tensors_from_dataset(dataset):
iterator = get_iterator(dataset)
inputs, targets, sample_weight = unpack_iterator_input(iterator)
return inputs, targets, sample_weight
| |
50,895 | 204,808 | 56 | django/db/backends/base/base.py | 21 | 4 | def _set_autocommit(self, autocommit):
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper | Refs #33476 -- Reformatted code with Black. | _set_autocommit | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | base.py | 8 | 4 | https://github.com/django/django.git | 1 | 13 | 0 | 20 | 26 | Python | {
"docstring": "\n Backend-specific implementation to enable or disable autocommit.\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 7,
"vocab_size": 7
} | def _set_autocommit(self, autocommit):
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require a _set_autocommit() method"
)
# ##### Generic transaction management methods #####
| |
15,886 | 72,413 | 209 | wagtail/admin/views/generic/multiple_upload.py | 29 | 17 | def get_edit_upload_form_context_data(self):
edit_form_class = self.get_edit_form_class()
return {
self.context_upload_name: self.upload_object,
"edit_action": reverse(
self.edit_upload_url | Reformat with black | get_edit_upload_form_context_data | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | multiple_upload.py | 14 | 16 | https://github.com/wagtail/wagtail.git | 1 | 100 | 0 | 25 | 155 | Python | {
"docstring": "\n Return the context data necessary for rendering the HTML form for supplying the\n metadata to turn an upload object into a final object\n ",
"language": "en",
"n_whitespaces": 45,
"n_words": 23,
"vocab_size": 19
} | def get_edit_upload_form_context_data(self):
edit_form_class = self.get_edit_form_class()
return {
self.context_upload_name: self.upload_object,
"edit_action": reverse(
self.edit_upload_url_name, args=(self.upload_object.id,)
),
"d... | |
52,128 | 207,841 | 310 | tests/admin_views/tests.py | 79 | 24 | def test_overriding_has_module_permission(self):
articles = Article._meta.verbose_name_plural.title()
sections = Section._meta.verbose_name_plural.title()
index_url = reverse("admin7:index")
self.client.force_login(self.superuser)
response = self.client.get(index_url)
... | Refs #33476 -- Reformatted code with Black. | test_overriding_has_module_permission | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 13 | 31 | https://github.com/django/django.git | 1 | 280 | 0 | 39 | 459 | Python | {
"docstring": "\n If has_module_permission() always returns False, the module shouldn't\n be displayed on the admin index page for any users.\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 18,
"vocab_size": 17
} | def test_overriding_has_module_permission(self):
articles = Article._meta.verbose_name_plural.title()
sections = Section._meta.verbose_name_plural.title()
index_url = reverse("admin7:index")
self.client.force_login(self.superuser)
response = self.client.get(index_url)
... | |
51,299 | 205,960 | 51 | django/forms/forms.py | 8 | 7 | def non_field_errors(self):
return self.errors.get(
NON_FIELD_ERRORS,
self.error_class(error_class | Refs #33476 -- Reformatted code with Black. | non_field_errors | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | forms.py | 11 | 5 | https://github.com/django/django.git | 1 | 31 | 0 | 8 | 51 | Python | {
"docstring": "\n Return an ErrorList of errors that aren't associated with a particular\n field -- i.e., from Form.clean(). Return an empty ErrorList if there\n are none.\n ",
"language": "en",
"n_whitespaces": 53,
"n_words": 24,
"vocab_size": 21
} | def non_field_errors(self):
return self.errors.get(
NON_FIELD_ERRORS,
self.error_class(error_class="nonfield", renderer=self.renderer),
)
| |
@add_start_docstrings(
"The bare VAN model outputting raw features without any specific head on top. Note, VAN does not have an embedding layer.",
VAN_START_DOCSTRING,
) | 6,577 | 36,152 | 59 | src/transformers/models/van/modeling_van.py | 40 | 10 | def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, V | Visual Attention Network (VAN) (#16027)
* encoder works
* addded files
* norm in stage
* convertion script
* tests
* fix copies
* make fix-copies
* fixed __init__
* make fix-copies
* fix
* shapiro test needed
* make fix-copie
* minor changes
* make style + quality
* minor refa... | _set_gradient_checkpointing | 0a057201a96565df29984d716f660fd8d634329a | transformers | modeling_van.py | 9 | 3 | https://github.com/huggingface/transformers.git | 2 | 24 | 1 | 36 | 64 | Python | {
"docstring": "\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`VanConfig`]... | def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, VanModel):
module.gradient_checkpointing = value
VAN_START_DOCSTRING = r
VAN_INPUTS_DOCSTRING = r
@add_start_docstrings(
"The bare VAN model outputting raw features without any specific head on top. Note, VAN ... |
12,544 | 61,396 | 35 | .venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py | 14 | 6 | def update(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
raise NotImplementedError
| upd; format | update | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | versioncontrol.py | 6 | 2 | https://github.com/jindongwang/transferlearning.git | 1 | 14 | 0 | 14 | 23 | Python | {
"docstring": "\n Update an already-existing repo to the given ``rev_options``.\n\n Args:\n rev_options: a RevOptions object.\n ",
"language": "en",
"n_whitespaces": 44,
"n_words": 13,
"vocab_size": 13
} | def update(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
raise NotImplementedError
| |
42,482 | 177,721 | 92 | label_studio/webhooks/utils.py | 34 | 10 | def get_nested_field(value, field):
if field == '__self__':
return value
fields = | fix: DEV-1725: Add ANNOTATIONS_CREATED webhook action to predictions to annotations action (#2052)
* fix: DEV-1725: Add ANNOTATIONS_CREATED webhook action to predictions to annotations action
* Update predictions_to_annotations.py
Co-authored-by: Max Tkachenko <makseq@gmail.com> | get_nested_field | b2aa62dc675036f7695c0b09dd509617ba9df90d | label-studio | utils.py | 13 | 10 | https://github.com/heartexlabs/label-studio.git | 5 | 62 | 0 | 24 | 101 | Python | {
"docstring": "\n Get nested field from list of objects or single instance\n :param value: Single instance or list to look up field\n :param field: Field to lookup\n :return: List or single instance of looked up field\n ",
"language": "en",
"n_whitespaces": 50,
"n_words": 34,
"vocab_size": 22
... | def get_nested_field(value, field):
if field == '__self__':
return value
fields = field.split('__')
for fld in fields:
if isinstance(value, list):
value = [getattr(v, fld) for v in value]
else:
value = getattr(value, fld)
return value | |
44,362 | 183,841 | 45 | tests/css/test_stylesheet.py | 27 | 12 | def test_stylesheet_many_classes_dont_overrule_id():
css = "#id {color: red;} .a.b.c.d {color: blue;}"
stylesheet = _make_stylesheet(css)
node = DOMNode(classes="a b c d", id="id")
style | Add various additional tests around CSS specificity | test_stylesheet_many_classes_dont_overrule_id | 4dd0d9fae43583638f34257f97d5749ca4f2c00c | textual | test_stylesheet.py | 10 | 6 | https://github.com/Textualize/textual.git | 1 | 47 | 0 | 24 | 82 | Python | {
"docstring": "#id is further to the left in the specificity tuple than class, and\n a selector containing multiple classes cannot take priority over even a\n single class.",
"language": "en",
"n_whitespaces": 31,
"n_words": 26,
"vocab_size": 24
} | def test_stylesheet_many_classes_dont_overrule_id():
css = "#id {color: red;} .a.b.c.d {color: blue;}"
stylesheet = _make_stylesheet(css)
node = DOMNode(classes="a b c d", id="id")
stylesheet.apply(node)
assert node.styles.color == Color(255, 0, 0)
| |
3,272 | 20,220 | 21 | pipenv/patched/notpip/_vendor/platformdirs/macos.py | 7 | 4 | def site_data_dir(self) -> str:
return self._append_ | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | site_data_dir | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | macos.py | 8 | 3 | https://github.com/pypa/pipenv.git | 1 | 15 | 0 | 7 | 29 | Python | {
"docstring": ":return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def site_data_dir(self) -> str:
return self._append_app_name_and_version("/Library/Application Support")
| |
43,550 | 181,764 | 113 | tests/tpot_tests.py | 37 | 16 | def test_warm_start():
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light',
warm_start=True)
tpot_obj.fit(pretest_X, pretest_y)
assert tpot_obj._pop is not None
... | Revert "Deployed 7ccda9a with MkDocs version: 1.3.0"
This reverts commit bd9629c40e01241766197119b581a99409b07068. | test_warm_start | 388616b6247ca4ea8de4e2f340d6206aee523541 | tpot | tpot_tests.py | 10 | 16 | https://github.com/EpistasisLab/tpot.git | 1 | 83 | 0 | 25 | 126 | Python | {
"docstring": "Assert that the TPOT warm_start flag stores the pop and pareto_front from the first run.",
"language": "en",
"n_whitespaces": 14,
"n_words": 15,
"vocab_size": 13
} | def test_warm_start():
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light',
warm_start=True)
tpot_obj.fit(pretest_X, pretest_y)
assert tpot_obj._pop is not None
... | |
37,381 | 158,212 | 85 | d2l/mxnet.py | 26 | 18 | def load_data_wiki(batch_size, max_len):
num_workers = d2l.get_dataloader_workers()
data_dir = d2l.download_extract('wikitext-2', 'wikitext-2')
paragraphs = _read_wiki(data_dir)
train_set = _WikiTextDataset(paragraphs, max_len)
train_iter = gluon.data.DataLoader(train_set, batch_size, s | [PaddlePaddle] Merge master into Paddle branch (#1186)
* change 15.2 title in chinese version (#1109)
change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘
* 修改部分语义表述 (#1105)
* Update r0.17.5 (#1120)
* Bump versions in installation
* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)
* line 313: "b... | load_data_wiki | b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2 | d2l-zh | mxnet.py | 9 | 8 | https://github.com/d2l-ai/d2l-zh.git | 1 | 65 | 0 | 22 | 105 | Python | {
"docstring": "Load the WikiText-2 dataset.\n\n Defined in :numref:`subsec_prepare_mlm_data`",
"language": "en",
"n_whitespaces": 9,
"n_words": 7,
"vocab_size": 7
} | def load_data_wiki(batch_size, max_len):
num_workers = d2l.get_dataloader_workers()
data_dir = d2l.download_extract('wikitext-2', 'wikitext-2')
paragraphs = _read_wiki(data_dir)
train_set = _WikiTextDataset(paragraphs, max_len)
train_iter = gluon.data.DataLoader(train_set, batch_size, shuffle=T... | |
7,483 | 42,087 | 60 | seaborn/_core/plot.py | 21 | 8 | def save(self, loc, **kwargs) -> Plot:
# TODO expose important keyword arguments in our signature?
with theme_context(self._theme_with_defaults()):
self._plot().save(loc, **kwargs)
return self
| Add rudimentary themeing support (#2929)
* WIP Plot.theme
* Add default values for theme to match set_theme()
* Depend on matplotib style defaults and update rcParams more selectively
* Fix lines test
* Improve test coverage | save | 762db897b52d16ab2f164d5103df4cc26c1d0503 | seaborn | plot.py | 11 | 16 | https://github.com/mwaskom/seaborn.git | 1 | 38 | 0 | 20 | 66 | Python | {
"docstring": "\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n ... | def save(self, loc, **kwargs) -> Plot:
# TODO expose important keyword arguments in our signature?
with theme_context(self._theme_with_defaults()):
self._plot().save(loc, **kwargs)
return self
| |
17,908 | 85,028 | 97 | zerver/tests/test_signup.py | 32 | 10 | def test_create_realm_no_creation_key(self) -> None:
email = "user1@test.com"
with self.settings(OPEN_REALM_CREATION=False):
# Create new realm with the email, but no creation key.
| realm_creation: Rework error pages.
The previous error page was inadequate for serving the two different
scenarios where we show errors in realm_creations, in particular
containing a misleading sentence about realm creation being disabled
(even in the case where it was actually enabled and the user simply had
an expir... | test_create_realm_no_creation_key | 582d5b0aa31ac79a5ee1af95b2e71c4bfc53d5aa | zulip | test_signup.py | 13 | 10 | https://github.com/zulip/zulip.git | 1 | 53 | 0 | 29 | 96 | Python | {
"docstring": "\n Trying to create a realm without a creation_key should fail when\n OPEN_REALM_CREATION is false.\n ",
"language": "en",
"n_whitespaces": 36,
"n_words": 14,
"vocab_size": 13
} | def test_create_realm_no_creation_key(self) -> None:
email = "user1@test.com"
with self.settings(OPEN_REALM_CREATION=False):
# Create new realm with the email, but no creation key.
result = self.client_post("/new/", {"email": email})
self.assertEqual(result.... | |
20,038 | 100,574 | 61 | lib/gpu_stats/nvidia.py | 17 | 11 | def _get_device_names(self) -> List[str]:
names = [pynvml.nvmlDeviceGetName(handle).d | Refactor lib.gpu_stats (#1218)
* inital gpu_stats refactor
* Add dummy CPU Backend
* Update Sphinx documentation | _get_device_names | bdbbad4d310fb606b6f412aa81e9f57ccd994e97 | faceswap | nvidia.py | 11 | 12 | https://github.com/deepfakes/faceswap.git | 2 | 43 | 0 | 16 | 77 | Python | {
"docstring": " Obtain the list of names of connected Nvidia GPUs as identified in :attr:`_handles`.\n\n Returns\n -------\n list\n The list of connected Nvidia GPU names\n ",
"language": "en",
"n_whitespaces": 63,
"n_words": 23,
"vocab_size": 16
} | def _get_device_names(self) -> List[str]:
names = [pynvml.nvmlDeviceGetName(handle).decode("utf-8")
for handle in self._handles]
self._log("debug", f"GPU Devices: {names}")
return names
| |
73,013 | 249,592 | 162 | tests/push/test_push_rule_evaluator.py | 71 | 17 | def test_delayed_message(self) -> None:
user1 = UserID.from_string(self.user_id1)
# Send a message before user2 joins
event_id1 = self.create_and_send_event(self.room_id, user1)
# Have user2 join the room
self.helper.join(self.room_id, self.user_id2, tok=self.tok2)
... | Speed up calculating push actions in large rooms (#13973)
We move the expensive check of visibility to after calculating push actions, avoiding the expensive check for users who won't get pushed anyway.
I think this should have a big impact on rooms with large numbers of local users that have pushed disabled. | test_delayed_message | 285b9e9b6c3558718e7d4f513062e277948ac35d | synapse | test_push_rule_evaluator.py | 10 | 10 | https://github.com/matrix-org/synapse.git | 1 | 96 | 0 | 52 | 154 | Python | {
"docstring": "Test that a delayed message that was from before a user joined\n doesn't cause a notification for the joined user.\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 20,
"vocab_size": 16
} | def test_delayed_message(self) -> None:
user1 = UserID.from_string(self.user_id1)
# Send a message before user2 joins
event_id1 = self.create_and_send_event(self.room_id, user1)
# Have user2 join the room
self.helper.join(self.room_id, self.user_id2, tok=self.tok2)
... | |
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False]) | 76,233 | 260,409 | 777 | sklearn/linear_model/_glm/tests/test_glm.py | 314 | 49 | def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset):
model, X, y, coef, _, _, _ = glm_dataset
n_samples, n_features = X.shape
alpha = 0 # unpenalized
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
# solver=solver, # only lbfgs avail... | TST tight tests for GLMs (#23619)
Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org> | test_glm_regression_unpenalized_hstacked_X | 9d863aba2b6dab9c9cbbcf2f7c3b7a99b6ad168f | scikit-learn | test_glm.py | 16 | 48 | https://github.com/scikit-learn/scikit-learn.git | 9 | 414 | 1 | 170 | 664 | Python | {
"docstring": "Test that unpenalized GLM converges for all solvers to correct solution.\n\n We work with a simple constructed data set with known solution.\n GLM fit on [X] is the same as fit on [X, X]/2.\n For long X, [X, X] is a singular matrix and we check against the minimum norm\n solution:\n ... | def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset):
model, X, y, coef, _, _, _ = glm_dataset
n_samples, n_features = X.shape
alpha = 0 # unpenalized
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
# solver=solver, # only lbfgs avail... |
73,022 | 249,612 | 43 | tests/storage/test_event_push_actions.py | 15 | 11 | def test_count_aggregation_threads(self) -> None:
| Track notification counts per thread (implement MSC3773). (#13776)
When retrieving counts of notifications segment the results based on the
thread ID, but choose whether to return them as individual threads or as
a single summed field by letting the client opt-in via a sync flag.
The summarization code is also up... | test_count_aggregation_threads | b4ec4f5e71a87d5bdc840a4220dfd9a34c54c847 | synapse | test_event_push_actions.py | 8 | 69 | https://github.com/matrix-org/synapse.git | 1 | 434 | 0 | 14 | 49 | Python | {
"docstring": "\n This is essentially the same test as test_count_aggregation, but adds\n events to the main timeline and to a thread.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 17
} | def test_count_aggregation_threads(self) -> None:
user_id, token, _, other_token, room_id = self._create_users_and_room()
thread_id: str
last_event_id: str
| |
72,180 | 248,249 | 76 | tests/config/test_cache.py | 20 | 14 | def test_global_instantiated_before_config_load(self):
cache = LruCache(100)
add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor)
self.as | Reload cache factors from disk on SIGHUP (#12673) | test_global_instantiated_before_config_load | d38d242411b8910dfacde1e61fd3a0ec5cbcaa66 | synapse | test_cache.py | 11 | 8 | https://github.com/matrix-org/synapse.git | 1 | 76 | 0 | 18 | 130 | Python | {
"docstring": "\n If a cache is instantiated before the config is read, it will be given\n the default cache size in the interim, and then resized to the new\n default cache size once the config is loaded.\n ",
"language": "en",
"n_whitespaces": 64,
"n_words": 35,
"vocab_size": 24... | def test_global_instantiated_before_config_load(self):
cache = LruCache(100)
add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor)
self.assertEqual(cache.max_size, 50)
config = {"caches": {"global_factor": 4}}
self.config.read_config(config, config_di... | |
80,086 | 269,448 | 15 | keras/backend.py | 10 | 8 | def in_top_k(predictions, targets, k):
return tf.compat.v1.math.in_top_k(p | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | in_top_k | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | backend.py | 10 | 2 | https://github.com/keras-team/keras.git | 1 | 27 | 0 | 9 | 42 | Python | {
"docstring": "Returns whether the `targets` are in the top `k` `predictions`.\n\n Args:\n predictions: A tensor of shape `(batch_size, classes)` and type `float32`.\n targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.\n k: An `int`, number of top elements to consider.\n... | def in_top_k(predictions, targets, k):
return tf.compat.v1.math.in_top_k(predictions, targets, k)
# CONVOLUTIONS
| |
564 | 3,805 | 53 | airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_async_job.py | 14 | 9 | def test_update_job(self, parent_job, grouped_jobs, api, batch):
parent_job.update_job()
# assert
for job in grouped_jobs:
j | 🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805)
* Facebook Marketing performance improvement
* add comments and little refactoring
* fix integration tests with the new config
* improve job status handling, limit concurrency to 10
* fix campaign jobs, refactor manager
* big refactori... | test_update_job | a3aae8017a0a40ff2006e2567f71dccb04c997a5 | airbyte | test_async_job.py | 10 | 4 | https://github.com/airbytehq/airbyte.git | 2 | 34 | 0 | 14 | 54 | Python | {
"docstring": "Checks jobs status in advance and restart if some failed.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def test_update_job(self, parent_job, grouped_jobs, api, batch):
parent_job.update_job()
# assert
for job in grouped_jobs:
job.update_job.assert_called_once_with(batch=batch)
| |
54,446 | 216,169 | 30 | salt/modules/cp.py | 14 | 6 | def list_master_symlinks(saltenv=None, prefix=""):
if not saltenv:
salt | fixes saltstack/salt#61562 cp functions derive saltenv from config | list_master_symlinks | 2bd6323ef5f87d871891a59917ee96f44ef55e75 | salt | cp.py | 11 | 4 | https://github.com/saltstack/salt.git | 3 | 35 | 0 | 14 | 63 | Python | {
"docstring": "\n .. versionchanged:: 3005\n ``saltenv`` will use value from config if not explicitly set\n\n List all of the symlinks stored on the master\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' cp.list_master_symlinks\n ",
"language": "en",
"n_whitespaces": 60,
"n_... | def list_master_symlinks(saltenv=None, prefix=""):
if not saltenv:
saltenv = __opts__["saltenv"] or "base"
return _client().symlink_list(saltenv, prefix)
| |
83,854 | 281,557 | 60 | gamestonk_terminal/stocks/options/screener_controller.py | 25 | 12 | def print_help(self):
has_screen_tickers_start = "" if self.screen_tickers else "[unvl]"
has_screen_tickers_end = "" if self.screen_tic | Terminal Wide Rich (#1161)
* My idea for how we handle Rich moving forward
* remove independent consoles
* FIxed pylint issues
* add a few vars
* Switched print to console
* More transitions
* Changed more prints
* Replaced all prints
* Fixing tabulate
* Finished replace tabulate
* Finish... | print_help | 82747072c511beb1b2672846ae2ee4aec53eb562 | OpenBBTerminal | screener_controller.py | 11 | 16 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 3 | 40 | 0 | 18 | 106 | Python | {
"docstring": "Print help[cmds]\n view view available presets (or one in particular)\n set set one of the available presets\n[/cmds]\n[param]PRESET: [/param]{self.preset}[cmds]\n\n scr screen data from this preset[/cmds]\n{has_screen_tickers_start}\n[param]Last screened tickers... | def print_help(self):
has_screen_tickers_start = "" if self.screen_tickers else "[unvl]"
has_screen_tickers_end = "" if self.screen_tickers else "[/unvl]"
help_text = f
console.print(text=help_text, menu="Stocks - Options - Screener")
| |
49,586 | 200,292 | 1,749 | sympy/testing/runtests.py | 358 | 43 | def _find(self, tests, obj, name, module, source_lines, globs, seen):
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Make sure we don... | runtests.py: Undo auto-formatting, re-add changes to blacklist for scipy, numpy | _find | 6d2bbf80752549276a968fd4af78231c569d55c5 | sympy | runtests.py | 18 | 65 | https://github.com/sympy/sympy.git | 32 | 512 | 0 | 161 | 803 | Python | {
"docstring": "\n Find tests for the given object and any contained objects, and\n add them to ``tests``.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 15,
"vocab_size": 14
} | def _find(self, tests, obj, name, module, source_lines, globs, seen):
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Make sure we don... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.