content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def rgb(color):
""" Converts a RGB value from 0-255 to a value from 0-1 """
r = color[0] / 255.0
g = color[1] / 255.0
b = color[2] / 255.0
return r, g, b | 547f407676168f66c9f9e312aee4146c4d91532c | 128,114 |
def timeout(process):
"""If the process timed out, returns the last 5 lines of output and kills the process"""
line_number = 0
stderr = b""
stdout = b""
while line_number <= 5:
stdout += process.stdout.readline()
line_number += 1
process.kill()
timeout_flag = True
return stdout, stderr, timeout_flag | b2a81e7a1f1cc6fa84cb86ec28ba70a83fb7ad87 | 128,119 |
def _valid_task_name(name):
"""Check if ``name`` is a valid Fabric task name"""
if not name:
return False
if name.startswith('-'):
return False
if ' ' in name:
return False
if ':' in name:
return False
if '.' in name:
return False
return True | 42eb880e31a2d36207781156a2b578cb3912967e | 128,147 |
def merge(left, right):
"""Merges two sorted lists.
Args:
left: A sorted list.
right: A sorted list.
Returns:
The sorted list resulting from merging the two sorted sublists.
Requires:
left and right are sorted.
"""
items = []
i = 0
j = 0
while i < len(left) and j < len(right):
if left[i] <= right[j]:
items.append(left[i])
i = i + 1
else:
items.append(right[j])
j = j + 1
if i < len(left):
items.extend(left[i:])
elif j < len(right):
items.extend(right[j:])
return items | 86defc36d0eeb9a7da223fa42d8da84d427e1807 | 128,148 |
import typing
def get_lines_from_file(file: typing.IO, lines: list[int]) -> list[str]:
"""
Get the given lines from the given file. Lines are 0 indexed. Line numbers are sorted and returned in that order.
If lines is already sorted, then the lines will be in the same order.
:param file: file to get the lines from
:param lines: list with line numbers
:return: list with the lines as strings and with newlines removed
"""
file.seek(0)
lines.sort()
current_line = 0
line_contents = []
while (len(lines) > 0) and (line := file.readline()):
if lines[0] == current_line:
line_contents.append(line.removesuffix("\n"))
lines.pop(0)
current_line += 1
return line_contents | 41a7e78d0fc00b126664caba416cde0fd31b4cbf | 128,150 |
def _is_empty(textrange):
"""Here `empty` means some characters exists except
`IGNORE_CHARS` (e.g. `\r`)
"""
IGNORE_CHARS = {"\r", "\013"}
return not bool(textrange.text.strip("".join(IGNORE_CHARS))) | 1e3dcbd28beefc1abcb0aea9ba40367e147784ec | 128,151 |
def getsearchresult(title='', type='', items=None):
"""Return a dict containing a group of items used for search results"""
return {'title': title, 'type': type, 'items': items or []} | 5829aa3f1ce8f7d4a0afdfaf5be5e994ddb7262d | 128,153 |
def load_bytes(buf, num, pos):
"""Load sequence of bytes"""
end = pos + num
if end > len(buf):
return (0,0)
return buf[pos : end], end | 97ddfd6668ffbffad7a83238b192849450089fa0 | 128,164 |
def get_vanilla_url(version: str) -> str:
"""Ensures a constant and streamlined string creation for vanilla urls"""
return f"https://mcversions.net/download/{version}" | ed8296d21cc615c62e1cf256cc9e63e5e10b6d98 | 128,175 |
import math
def _raw_hp(base: int, ev: int, iv: int, level: int) -> int:
"""Converts to raw hp
:param base: the base stat
:param ev: HP Effort Value (EV)
:param iv: HP Individual Value (IV)
:param level: pokemon level
:return: the raw hp
"""
s = math.floor((math.floor(ev / 4) + iv + 2 * base) * level / 100) + level + 10
return int(s) | 21adf05cc21d20ea4ae9ab8a16cbd3a7c2f7d4ec | 128,179 |
from typing import Any
import pickle
def load_data_object(path: str) -> Any:
"""This function open the file in specified path and
return cooresponding data object
Args:
path (path): any
Returns:
Any: a python object(something like list, dataframe, dict, ...)
"""
with open(path, 'rb') as handle:
data = pickle.load(handle)
return data | 291ca104d7520816ecf6822bd290b51bf1684c10 | 128,185 |
def list_2_str(data_list, join_del=""):
"""Function: list_2_str
Description: Convert a list to a string. Will be joined together with a
join delimiter. The list can consist of strings or integers.
Arguments:
(input) data_list -> List to join.
(input) join_del -> Join delimiter for the string.
(output) join_str -> Join together string.
"""
return join_del.join(str(item) for item in list(data_list)) | eecaa0799b9340b5c992f8b92e78cb7fdfbf78f3 | 128,194 |
def log2(n):
"""
Smallest integer greater than or equal to log_2(n).
"""
i = 1
log = 0
while n > i:
log += 1
i *= 2
return log | 8dd6a80f8c1781f18926851eda09e66636a54a85 | 128,195 |
def hexdump(data, wrap=0):
"""
Return a spaced string of printed hex bytes for the given data.
"""
wrap = wrap if wrap else len(data)
if not data:
return ''
lines = []
for i in range(0, len(data), wrap):
lines.append(' '.join(['%02X' % x for x in data[i:i+wrap]]))
return '\n'.join(lines) | d21f52c8ebd37f558eb80eac79c726fd9100e87e | 128,197 |
import random
def random_float(min_rng: float, max_rng: float) -> float:
"""Randomly selects a float based on a given range"""
return random.random() * (max_rng - min_rng) + min_rng | 34da1c3787c68346d60632e7b5db0d35837b47d8 | 128,203 |
from typing import Mapping
from typing import Iterable
def rmap(func, data):
"""
Apply :code:`func` recursively to :code:`data`.
Examples
--------
>>> data = {'A': [0, 1, 2], 'B': ({'C': 3, 'D': [4, 5, 6]}, {7, 8, 9})}
>>> rmap(lambda x: x + 1, data)
{'A': [1, 2, 3], 'B': ({'C': 4, 'D': [5, 6, 7]}, {8, 9, 10})}
"""
if isinstance(data, Mapping):
return {key: rmap(func, value) for key, value in data.items()}
if isinstance(data, Iterable):
return type(data)([rmap(func, elem) for elem in data])
return func(data) | a63d6ca3e00a586887ecad0ce49d8b8d0ba64435 | 128,206 |
from bs4 import BeautifulSoup
def get_pages_from_grobid_xml(raw_xml: BeautifulSoup) -> str:
"""
Returns the page numbers of grobid bib entry
Grobid <biblscope unit="page">
:return:
"""
for bibl_entry in raw_xml.find_all("biblscope"):
if bibl_entry.has_attr("unit") and bibl_entry["unit"] == "page" and bibl_entry.has_attr("from"):
from_page = bibl_entry["from"]
if bibl_entry.has_attr("to"):
to_page = bibl_entry["to"]
return f'{from_page}--{to_page}'
else:
return from_page
return "" | 99ef287ffe866874f59fd3435cfc641819ae15a7 | 128,207 |
from typing import Union
def str_to_bool_or_str(val: str) -> Union[str, bool]:
"""Possibly convert a string representation of truth to bool.
Returns the input otherwise.
Based on the python implementation distutils.utils.strtobool
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'.
"""
lower = val.lower()
if lower in ('y', 'yes', 't', 'true', 'on', '1'):
return True
elif lower in ('n', 'no', 'f', 'false', 'off', '0'):
return False
else:
return val | 43d73f44be75e4e9229b06dcecc509695d5c2830 | 128,209 |
def get_xy(artist):
"""Gets the xy coordinates of a given artist"""
if "Collection" in str(artist):
x, y = artist.get_offsets().T
elif "Line" in str(artist):
x, y = artist.get_xdata(), artist.get_ydata()
else:
raise ValueError("This type of object isn't implemented yet")
return x, y | 535c0ee0307eac5645debc841dcdd89885c10600 | 128,213 |
def identical_format_path(fmt1, fmt2):
"""Do the two (long representation) of formats target the same file?"""
for key in ['extension', 'prefix', 'suffix']:
if fmt1.get(key) != fmt2.get(key):
return False
return True | 03ee3715d7fcd338af387a2c7242270ed9f30fe6 | 128,215 |
def format_crc64_pair(crc_pair):
"""
Formats two 32-bit integers to digest
Args:
crc_pair (tuple): Higher and lower halves of 64-bit CRC result
Returns:
str: 64-bit checksum (digest)
"""
return "%08X%08X" % crc_pair | a7d3d6b6075c367dc4f6d307c1086e18a14bf193 | 128,217 |
import functools
def call_only_once(f):
"""Wrapper that makes sure a function is called only once.
This is useful for example if it's known that a very
expensive function is run many times but with the same
parameters and return value. It's not memoization since
this wrapper doesn't take the paramters into consideration.
The parameters used on the first call dictates what value is
stored and returned upon all future calls.
This is thus not the same as memoization where the cache
takes parameter values into consideration. The reason this was
used instead of memoization was that the parameters to a target
function weren't hashable and able to be used in memoization.
"""
result = None
@functools.wraps(f)
def wrapper(*args, **kwds):
nonlocal result
if not result:
result = f(*args, **kwds)
return result
return wrapper | e2c2742006c15f8808e3600f317f565e2ab39743 | 128,221 |
def tostring(s):
"""
Convert the object to string for py2/py3 compat.
"""
try:
s = s.decode()
except (UnicodeDecodeError, AttributeError):
pass
return s | be284dcbcca4ec267c45bdec75a6c9214e6940bd | 128,222 |
import hashlib
import base64
def signAsymmetric(message, client, key_path):
"""
Create a signature for a message using a private key stored on Cloud KMS
"""
# Note: some key algorithms will require a different hash function
# For example, EC_SIGN_P384_SHA384 requires SHA384
digest_bytes = hashlib.sha256(message.encode('ascii')).digest()
digest64 = base64.b64encode(digest_bytes)
digest_JSON = {'sha256': digest64.decode('utf-8')}
request = client.projects() \
.locations() \
.keyRings() \
.cryptoKeys() \
.cryptoKeyVersions() \
.asymmetricSign(name=key_path,
body={'digest': digest_JSON})
response = request.execute()
return response.get('signature', None) | 9c0f33f75016af90c551e61d8e1ae659b9ea2a11 | 128,223 |
def group_by_simulation(d):
"""
Group metrics by simulation (gset, awset, ...).
"""
r = {}
for type in d:
simulation = type.split("~")[0]
if not simulation in r:
r[simulation] = {}
r[simulation][type] = d[type]
return r | 85983df4ca76e28e841b5ac98cb751099257ac02 | 128,225 |
def huber_loss_spatial(dvf):
"""
Calculate approximated spatial Huber loss
Args:
dvf: (Tensor of shape (N, 2, H, W)) displacement vector field estimated
Returns:
loss: (Scalar) Huber loss spatial
"""
eps = 1e-8 # numerical stability
# spatial derivatives
dvf_dx = dvf[:, :, 1:, 1:] - dvf[:, :, :-1, 1:] # (N, 2, H-1, W-1)
dvf_dy = dvf[:, :, 1:, 1:] - dvf[:, :, 1:, :-1] # (N, 2, H-1, W-1)
return ((dvf_dx.pow(2) + dvf_dy.pow(2)).sum(dim=1) + eps).sqrt().mean() | 2c048a742097d7793074c8efaa123cb618d0fd6d | 128,226 |
def load_raw_image(path):
"""
Load raw image in binary format from specified path.
Also see :py:func:`.save_raw_image`.
:param str path: File path.
:return: Raw image
:rtype: str
"""
with open(path, 'rb') as f:
return f.read() | 96ef7e9a3fca0aa7115b957c75d87d2bb9c7825f | 128,232 |
import torch
import math
def _get_survey_extents_one_side(pad, side, source_locations,
receiver_locations, shape, dx):
"""Get the survey extent for the left or right side of one dimension.
Args:
pad: Positive float specifying padding for the side
side: 'left' or 'right'
source/receiver_locations: Tensor with coordinates for the current
dimension
shape: Int specifying length of full model in current dimension
dx: Float specifying cell spacing in current dimension
Returns:
Min/max index as int or None
"""
if pad is None:
return None
if side == 'left':
pad = -pad
op = torch.min
nearest = math.floor
else:
pad = +pad
op = torch.max
nearest = math.ceil
extreme_source = op(source_locations + pad)
extreme_receiver = op(receiver_locations + pad)
extreme_cell = nearest(op(extreme_source, extreme_receiver).item() / dx)
if side == 'right':
extreme_cell += 1
if (extreme_cell <= 0) or (extreme_cell >= shape):
extreme_cell = None
return extreme_cell | 761fe0e282fd30c1bbd8cbbc9a9d5249b7a8c04f | 128,236 |
from datetime import datetime
def from_parse_datetimes(ts_fmt, ts_col, event_list):
"""
Returns a planning list by parsing datetimes in event_list[:][ts_col] to datetime using ts_fmt
Returns a new list of rows (lists) where
first column is the parsed datetime, the rest is the original row
"""
return [[datetime.strptime(evt[ts_col], ts_fmt)] + evt for evt in event_list] | d14ed7e060a76bd7b8dfca3d317a9a09728cb351 | 128,241 |
def make_batch_sizes(num_records, max_batch_size):
"""Function that generates a sequence of batch sizes from
total number of records and batch size.
Parameters
----------
num_records : int
Overall number of records
max_batch_size : int
Number of records in a batch
Returns
-------
tuple of integers
Tuple with batch sizes (in terms of number of records)
"""
if num_records <= max_batch_size:
return tuple([num_records])
nb = num_records / max_batch_size
mbs = max_batch_size
batches = [mbs for _ in range(int(nb))]
remainder = int((nb % int(nb)) * mbs)
if remainder > 0:
batches += [remainder]
return tuple(batches) | 915ae31a7c08d370cb1e5238acb5da52a8e64972 | 128,242 |
def tupleToDictRGB(rgb_list: list) -> dict:
"""
Converts RGB values from a tuple to a dict object
:param list rgb_list: RGB value to convert
:return: JSON object as a dictionary
"""
return {"r": rgb_list[0], "g": rgb_list[1], "b": rgb_list[2]} | ae23be56ce4a9932cf241b50ef902883a00702a3 | 128,244 |
def generate_all_dets(norbs):
"""Generate all determinants with all combinations of nparticles (the
number of particles) and Ms (the total spin).
Args
----
norbs : int
The number of orbitals.
Returns
-------
dets : list of (tuple of int)
List of determinants generated.
"""
dets = []
for i in range(2**norbs):
# Binary string representation of determinant i.
i_bin = bin(i)[2:].zfill(norbs)
occ_list = [ind for ind,a in enumerate(i_bin) if a == '1']
occ_tuple = tuple(occ_list)
dets.append(occ_tuple)
return dets | ff156d46ae2e46afc408623a3db275c0d53ad73a | 128,246 |
def apply(rom, patches):
"""Applies a patch, which is a list of dictionaries
Arguments:
rom {list} -- A list of bytes depicting the ROM data to be patched.
patches {list} -- A list of dictionaries that depict of set of patches to be applied to the ROM.
Returns:
list -- a list of bytes depicitng the patched rom
"""
for patch in patches:
offset = int(list(patch.keys())[0])
patch_values = list(patch.values())[0]
for idx, value in enumerate(patch_values):
rom[offset + idx] = value
return rom | a509f01ba8290a40e4e2ee20e66d4fb22f231729 | 128,247 |
def _find_user(oneandone_conn, user):
"""
Validates that the user exists by ID or a name.
Returns the user if one was found.
"""
for _user in oneandone_conn.list_users(per_page=1000):
if user in (_user['id'], _user['name']):
return _user | b16030b31868b5beac42801dff7d4ac9d617d317 | 128,258 |
def parse_size(image_size):
"""
Parses a string like 640x480 and returns width, height.
"""
width, height = image_size.split('x')
return int(width), int(height) | d2cd3a27f999c236c5a8fc18f424e97e5cae0233 | 128,262 |
from typing import Callable
def check_callable_arguments(func: Callable, *args) -> Callable:
"""
Function checks that the passed callable func accepts arguments passed through args.
Args:
func (): Callable function to perform check on.
*args (): Arguments passed to check presence in passed function.
Returns:
Callable: Passed function check passed, otherwise error gets raised.
"""
var_names = func.__code__.co_varnames
for i, arg in enumerate(args):
if var_names[i] != arg:
error_msg = f"check_function_arguments: Function {func} " \
f"does not have the argument {arg} in the right position."
raise ValueError(error_msg)
return func | 6daa4d9d1ffd408133cbfd7526d1827fa88e2d7c | 128,268 |
def sum(i: int, j: int) -> int:
"""Sum two integers.
Long description of method.
Parameters
----------
i: First integer
j: Second integer
Returns
-------
Sum of the two integers.
"""
return i + j | b7ec1239ce3db39f85bef080efd32a113f2481f3 | 128,271 |
def run_2_names_from(worklow_report):
"""Retrieve the maps between run numbers and run names from 'workflow_report'."""
return (dict(zip(worklow_report.workflow_index.values,
worklow_report.acquired_name.values)),
dict(zip(worklow_report.acquired_name.values,
worklow_report.workflow_index.values))) | 296199cb90da5e6aafc7ed61c0a1c9b1b9fbdb8c | 128,274 |
import click
def _validate_month(ctx, param, val: int) -> int:
"""
Helper function to validate a month coming from the CLI.
Arguments:
ctx: click cmd context
param: required default parameter
val: value to validate (month between 1-12)
Returns:
val: validated month value
Raises:
click.BadParameter: When month value does not lie in between 1-12
"""
if val < 1 or val > 12:
raise click.BadParameter('Month must be between 1 and 12')
return val | 18b54277156437fa18a1d7ca4f4108230bd6338d | 128,277 |
def _inclusive_range(start, stop=None, step=1):
"""return a range i <= j <= k
if k is not provided, return a range containing only i.
step is also supported and defaults to 1
"""
if stop is None:
stop = start
stop += 1
return range(start, stop, step) | 9322f2e80926494b3f0814a11ffb03cb6f75deeb | 128,282 |
import re
def parse_cnn_logs(filename):
"""Given a cnn log, return the number of true positives, and total test cases"""
with open(filename, 'r') as handle:
lines = [l.rstrip() for l in handle]
summary_line = lines[-1]
fraction = re.findall(r"[0-9]. \/ [0-9].$", summary_line).pop()
tp, total = fraction.split("/")
tp, total = int(tp), int(total)
return tp, total | f01001e177dcb1b8e9ab6c927fc88bd86c008afa | 128,284 |
def read_add_mos_args(add_mos_result, get_opt_args=False):
"""
:param add_mos_result: result from add_mos
:param get_opt_args: if there is opt_args_function, an array of opt_args will be return as well
:return: array of key_args and opt_args
"""
key_args = []
opt_args = []
for i in add_mos_result:
key_args.append(i['key_args'])
if get_opt_args:
opt_args.append(i['opt_args'])
if get_opt_args:
return key_args, opt_args
else:
return key_args | db8926315eb4b01e5d154d0c742cc488f16f0f3b | 128,291 |
def fix_nextprog(forest_halos):
"""
Walks the descendants of a single forest to generate the `NextProgenitor`
field.
Parameters
----------
forest_halos : Numpy structured array with data structure defined by
:py:mod:`astro3D.genesis.utils.treefrog_to_lhalo.get_LHalo_datastruct`
The halos within a single forest.
Returns
----------
forest_halos : Numpy structured array with data structure defined by
:py:mod:`astro3D.genesis.utils.treefrog_to_lhalo.get_LHalo_datastruct`
The forest halos with updated ``NextProgenitor`` field.
"""
all_descendants = forest_halos["Descendant"][:]
for ii, d in enumerate(all_descendants):
if d == ii:
continue
curr = forest_halos["FirstProgenitor"][d]
if curr == ii:
continue
while forest_halos["NextProgenitor"][curr] != -1:
curr = forest_halos["NextProgenitor"][curr]
assert(forest_halos["NextProgenitor"][curr] == -1)
forest_halos["NextProgenitor"][curr] = ii
return forest_halos | 876d4832148890a4b196b319b663c86963d27adc | 128,293 |
import random
def _generate_reason_end() -> str:
"""
Picks a reason for an episode to end - this can be any from the codeset except X1
:returns: Reason for ending string
"""
return random.choice([
'E11', 'E12', 'E2', 'E3', 'E4A', 'E4B', 'E13', 'E41', 'E45', 'E46', 'E47', 'E48', 'E5',
'E6', 'E7', 'E9', 'E14', 'E15', 'E16', 'E17', 'E8'
]) | 2248dd63d78ad6d7cfea74885cb522db18b9ee74 | 128,297 |
def multi_char_literal(chars):
"""Emulates character integer literals in C. Given a string "abc",
returns the value of the C single-quoted literal 'abc'.
"""
num = 0
for index, char in enumerate(chars):
shift = (len(chars) - index - 1) * 8
num |= ord(char) << shift
return num | 8224b7102396417b57eb3a5fb2cbbe5303f1b8b0 | 128,301 |
def file_extensions_all_equal(ext_list):
"""Checks that all file extensions are equal.
Args:
ext_list (list): file extensions, eg ['.csv','.csv']
Returns:
bool: all extensions are equal to first extension in list?
"""
return len(set(ext_list))==1 | 7d6443c09571fb68bb6e69fb7d69e92a79911c09 | 128,304 |
from typing import List
from typing import Dict
from typing import Any
def get_actr_functions() -> List[Dict[str, Any]]:
"""Creates a list of all the ACT-R functions as MDF specifications.
Returns:
A list of MDF function specifications.
"""
actr_funcs = []
actr_funcs.append(
dict(
name="change_goal",
description="ACT-R change goal buffer function",
arguments=["pattern", "curr_goal"],
expression_string="actr.change_goal(pattern, curr_goal)",
)
)
actr_funcs.append(
dict(
name="retrieve_chunk",
description="ACT-R retrieve chunk function",
arguments=["pattern", "dm_chunks", "types"],
expression_string="actr.retrieve_chunk(pattern, dm_chunks, types)",
)
)
actr_funcs.append(
dict(
name="pattern_matching_function",
description="ACT-R pattern matching function",
arguments=["productions", "goal", "retrieval"],
expression_string="actr.pattern_matching_function(productions, goal, retrieval)",
)
)
actr_funcs.append(
dict(
name="conflict_resolution_function",
description="ACT-R conflict resolution function",
arguments=["productions"],
expression_string="actr.conflict_resolution_function(productions)",
)
)
actr_funcs.append(
dict(
name="update_goal",
description="ACT-R update goal buffer function",
arguments=["production"],
expression_string="actr.update_goal(production)",
)
)
actr_funcs.append(
dict(
name="update_retrieval",
description="ACT-R update retrieval buffer function",
arguments=["production"],
expression_string="actr.update_retrieval(production)",
)
)
actr_funcs.append(
dict(
name="check_termination",
description="check_termination",
arguments=["production"],
expression_string="actr.check_termination(production)",
)
)
return actr_funcs | 5030228918b8b62400f3d6d0d7dd33e00b0942ab | 128,310 |
from bs4 import BeautifulSoup
import requests
def tryUpdateImgurURL(url):
"""Try to get actual image url from imgur metadata"""
if 'imgur' not in url: # Only attempt on urls that have imgur in it
return url
soup = BeautifulSoup(requests.get(url).content, "lxml")
# Get metadata tags
meta = soup.find_all('meta')
# Get the specific tag, ex.
# <meta content="https://i.imgur.com/bStt0Fuh.jpg" name="twitter:image"/>
tags = list([tag for tag in meta if 'name' in tag.attrs and tag.attrs['name'] == "twitter:image"])
if tags:
# Replace url with metadata url
url = tags[0]['content']
return url | 0593ccfcf5fe692e65ecb8e48e87c40f9c91932e | 128,313 |
def get_wandb_project_dict(project_name):
"""Return wandb project dictionary
Args:
project_name (str): wandb project name
Returns:
dict: dictionary containing best models run_id split_name and dataset_name
"""
res = None
if project_name == "lastfm_dat":
res = {
"dataset": "LastFM",
"split_name": "kcore10_stratified",
"bprmf": "o6a7sngq",
"ngcf": "zwxafb6r",
"lrgccf": "qys5xomj",
"lightgcn": "kn8qq88a",
"fism": "dxs2avyz",
"igccf": "guvmz8e2",
}
elif project_name == "ml1m_dat":
res = {
"dataset": "Movielens1M",
"split_name": "kcore10_stratified",
"bprmf": "yzdzx1jm",
"ngcf": "wxjd3sk9",
"lightgcn": "2p95cak5",
"fism": "hitn1f45",
"igccf": "jfq47wsf"
}
elif project_name == "gowalla_dat":
res = {
"dataset": "Gowalla",
"split_name": "kcore10_stratified",
"bprmf": "oq6rgsmj",
"ngcf": "svsr8gx5",
"lightgcn": "0w013uy1",
"igccf": "j0t03v1w",
"fism":"j0djegaq"
}
elif project_name == "Amaz_dat":
res = {
"dataset": "AmazonElectronics",
"split_name": "kcore10_stratified",
"bprmf": "hxrtbycw",
"ngcf": "kj51vh0g",
"lightgcn": "zt6ccusa",
"fism": "c0gbd3hz",
"igccf": "4janl10s"
}
return res | 5d947305a0a71deea03e1d40e192e55141612292 | 128,318 |
def update(i, v, xs):
"""Returns a new copy of the array with the element at the provided index
replaced with the given value"""
return [v if i == ind else x for ind, x in enumerate(xs)] | 096a3e6713c0a66bfb71ca9e49ff4f43738f477e | 128,322 |
def remove_leading(needle, haystack):
"""Remove leading needle string (if exists).
>>> remove_leading('Test', 'TestThisAndThat')
'ThisAndThat'
>>> remove_leading('Test', 'ArbitraryName')
'ArbitraryName'
"""
if haystack[:len(needle)] == needle:
return haystack[len(needle):]
return haystack | 74beeb96496b0a7ae0220cf5d6272355083219bb | 128,323 |
def process_benchmark_df(df_input):
"""Take in the featurized the benchmark dataframe and clean it up"""
# select the relevant columns
df_output = df_input[["formula", "avg_mx_dists", "avg_mm_dists", "iv", "iv_p1",
"v_m", "v_x", "est_hubbard_u", "est_charge_trans"]]
# rename the column names to match those found in torrance_tabulated.xlsx
df_output = df_output.rename(columns={"avg_mx_dists": "d_mo", "avg_mm_dists": "d_mm", "v_x": "v_o",
"est_hubbard_u": "hubbard", "est_charge_trans": "charge_transfer"})
# drop rows containing NA values, sort by the formula and reindex the dataframe
return df_output.dropna().sort_values("formula").reset_index(drop=True) | 1ef6e95adbcfd4185d1597f29e3e218ee8a8eae3 | 128,324 |
def coordinatewise_product(L):
"""
Returns the coordinatewise product of a list of vectors.
INPUT:
- ``L`` is a list of `n`-vectors or lists all of length `n` with a common
parent. This returns the vector whose `i`-th coordinate is the product of
the `i`-th coordinates of the vectors.
EXAMPLES::
sage: from sage.combinat.designs.incidence_structures import coordinatewise_product
sage: L = [[1,2,3],[-1,-1,-1],[5,7,11]]
sage: coordinatewise_product(L)
[-5, -14, -33]
"""
n = len(L[0])
ans = [1]*n
for x in L:
ans = [ans[i]*x[i] for i in range(n)]
return ans | e19435cdb0be3e23805424297e087d6604d58b2d | 128,325 |
def get_unique_filename(names: list, name: str, ext: str) -> str:
""" returns a filename not present in input names list
When generating a new file, sometimes there are existing
files with the same name, in that case, we want to create
an unique filename: e.g. "name1.ext".
This function does that!
:param names: list of already taken names. WITH EXTENSIONS!
:type names: list
:param name: original name
:type name: str
:param ext: extension of the name. WHITHOUT THE DOT!
:type ext: str
:return: unique filename not present in names
:rtype: str
"""
out_name = '.'.join([name, ext])
if out_name not in names:
return out_name
i = 1
out_name = '.'.join([name + str(i), ext])
while out_name in names:
i += 1
out_name = '.'.join([name + str(i), ext])
return out_name | 47eb48f02d962448497360b52f2d71ae370d2c2b | 128,329 |
def nightly_calendar(twilight_evening, twilight_morning, time_windows, verbose = False):
"""
Sort observation tot_time windows by nightly observing window.
Parameters
----------
twilight_evening : '~astropy.tot_time.core.Time'
Evening twilight tot_time for scheduling period (UTC)
twilight_morning : '~astropy.tot_time.core.Time'
Morning twilight tot_time for scheduling period (UTC)
time_windows : list of lists of '~astropy.tot_time.core.Time' pairs
Array of tot_time windows for all observations.
Returns
-------
i_obs : int array
Indices of observations with a time_window during the night of the provided date.
obs_windows : array of '~astropy.tot_time.core.Time' pair(s)
Observation tot_time windows for current night corresponding to 'i_obs'.
"""
# define start of current day as local noon
night_start = twilight_evening
night_end = twilight_morning
if verbose:
print('\nDate window (start,end): ', night_start.iso, night_end.iso)
i_obs = [] # list of current night's observations
obs_windows = [] # tot_time windows corresponding to i_obs
for i in range(len(time_windows)): # cycle through observations
if verbose:
print('\tobs i:', i)
if time_windows[i] is not None:
obs_wins = []
for j in range(len(time_windows[i])): # cycle through tot_time windows
if verbose:
print('\t\ttime_window[' + str(i) + '][' + str(j) + ']:',
time_windows[i][j][0].iso, time_windows[i][j][1].iso)
# save index if there is overlap with schedule period
if time_windows[i][j][1] >= night_start and night_end >= time_windows[i][j][0]:
obs_wins.append(time_windows[i][j])
if verbose:
print('\t\t\tadded window')
# else:
# print('\t\tnot added')
# if tot_time window(s) overlapped with night, save obs index and window(s)
if len(obs_wins) != 0:
i_obs.append(i)
obs_windows.append(obs_wins)
if verbose:
print('\t\tadded obs index'
' to list')
else:
if verbose:
print('\t\t\ttime_window[' + str(i) + ']:', time_windows[i])
pass
# if verbose:
# print('i_obs', i_obs)
# print('obs_windows', obs_windows)
return i_obs, obs_windows | 54c59458cbf4816a573e1edfc2b41799f30f0763 | 128,331 |
def is_png(data):
"""True if data is the first 8 bytes of a PNG file."""
return data[:8] == '\x89PNG\x0d\x0a\x1a\x0a' | b3b805cdf0db6cd524c6fcc93675670ce16e5fc1 | 128,334 |
def _interval_from_str(s):
"""Turn e.g. "1981-1988" into [1981, 1988]"""
return map(int, s.split("-")) | 2048959a7fe14ebf3ad1fa8fcd94ad1d491fab67 | 128,337 |
from datetime import datetime
import math
def calc_travel_3d(current_plane, lead_s: float):
"""Extrapolate the 3D position of the aircraft
Arguments:
lat {float} -- Starting latitude (degrees)
lon {float} -- Starting longitude (degrees)
alt {float} -- Starting altitude (meters)
lat_lon_time {datetime} -- Last time lat / lon was updated
altitude_time {datetime} -- Last time altitude was updated
speed_mps {float} -- Speed (meters per second)
heading {float} -- Heading (degrees)
climb_rate {float} -- climb rate (meters per second)
Returns:
Tuple[float, float, float] -- The new latitude (deg)/longitude (deg)/alt (meters) as a tuple
"""
lat = current_plane["lat"]
lon = current_plane["lon"]
alt = current_plane["altitude"]
# The time values sometimes do not have a microsecond component if it falls exactly on the second
# we need to parse both formats to avoid crashing.
try:
lat_lon_time = datetime.strptime(current_plane["latLonTime"], '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
lat_lon_time = datetime.strptime(current_plane["latLonTime"], '%Y-%m-%d %H:%M:%S')
try:
altitude_time = datetime.strptime(current_plane["altitudeTime"], '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
altitude_time = datetime.strptime(current_plane["altitudeTime"], '%Y-%m-%d %H:%M:%S')
speed_mps = current_plane["groundSpeed"]
heading = current_plane["track"]
climb_rate = current_plane["verticalRate"]
lat_lon_age = datetime.utcnow() - lat_lon_time
lat_lon_age_s = lat_lon_age.total_seconds() + lead_s
alt_age = datetime.utcnow() - altitude_time
alt_age_s = alt_age.total_seconds() + lead_s
R = float(6371) # Radius of the Earth
brng = math.radians(heading) # Bearing is 90 degrees converted to radians.
d = float((lat_lon_age_s * speed_mps) / 1000.0) # Distance in km
lat1 = math.radians(lat) # Current lat point converted to radians
lon1 = math.radians(lon) # Current long point converted to radians
lat2 = math.asin(math.sin(lat1)*math.cos(d/R) + math.cos(lat1)*math.sin(d/R)*math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng)*math.sin(d/R)*math.cos(lat1), math.cos(d/R)-math.sin(lat1)*math.sin(lat2))
lat2 = math.degrees(lat2)
lon2 = math.degrees(lon2)
alt2 = alt+climb_rate*alt_age_s
return (lat2, lon2, alt2) | 7ff9471c3fb79e6351bca33c999c877ef6792b1a | 128,342 |
def sum_multiples(start, end, divisor):
"""
>>> sum_multiples(1, 12, 4)
24
>>> sum_multiples(1, 12, 3)
30
"""
result = 0
counter = start
while counter <= end:
if counter % divisor == 0:
result += counter
counter += 1
return result | 385099a20c4e3945b6046dab241d03a6336d4733 | 128,350 |
def get_daycare_ratio(school_type):
"""Return the ratio of children in daycare in different school types."""
daycare_ratios = {
'primary':0,
'primary_dc':0.5,
'lower_secondary':0,
'lower_secondary_dc':0.38,
'upper_secondary':0,
'secondary':0,
'secondary_dc':0.36
}
return daycare_ratios[school_type] | e323b29682fb0be473bb43c105f14f1e82c4c65b | 128,355 |
def format_y(n, pos):
"""Format representation of yticks with K,M metric prefixes"""
if n >= 1e6:
return '%1.0fM' % (n * 1e-6)
if n >= 1e3:
return '%1.0fK' % (n * 1e-3)
return '%1.0f' % n | 4b0b311edd33eb9854cfdec470fc9582011889ec | 128,359 |
import re
def remove_comments_from_code(code: str) -> str:
"""Removes comments and strings from code.
:param code: input string to process
:returns: The code without comments and strings.
"""
code = re.sub(r"(#+)(.*)\n", "\n", code) # single line comments
code = re.sub(r"\"\"\"[\s\S]*?\"\"\"", "", code) # multiline comments/strings
# multiline comments/strings with apostrophe quotes
code = re.sub(r'\'\'\'[\s\S]*?\'\'\'', "", code)
# strings with single quotation marks
code = re.sub(r"\"[\s\S]*?\"", "", code)
# strings with single quotation marks with apostrophe quotes
code = re.sub(r'\'[\s\S]*?\'', "", code)
return code | 5dfd44de77df6ee59bc01ba6c2d704bcf6a975bd | 128,368 |
def last(array):
"""
Return the last element of a list
"""
return array[-1] | 2ac80abd862685e9c638acf5692f194413b8efa1 | 128,375 |
def flatten_image(img):
"""
Flatten an image
:param img: numpy, shape (H, W, C)
:return: (H * W, C)
"""
H, W, C = img.shape
return img.reshape(H * W, C) | 7eabdad510c7ef1920d0df0619c7ed9c0c063f32 | 128,377 |
def get_rr_p_parameter_default(nn: int) -> float:
"""
Returns p for the default expression outlined in arXiv:2004:14766.
Args:
nn (int): number of features currently in chosen subset.
Returns:
float: the value for p.
"""
return max(0.1, 4.5 - 0.4 * nn ** 0.4) | dc54f29d1b5cc7a9d0842eeced62e10a5a3e291c | 128,378 |
def crop_lsun_image_coords( height: int,
width: int,
crop_size: int):
"""
Get coordinates for cropping images (y1, y2, x1, x2)
The original images are either (x, 256) or (256, x) where x>= 256
Args:
crop_size : Crop size of image
"""
offset_height = (height - crop_size) // 2
offset_width = (width - crop_size) // 2
return offset_height, offset_height + crop_size, offset_width, offset_width + crop_size | 32a447c67c5761812517c736d7c258e45d13a9b0 | 128,379 |
from datetime import datetime
def ndays(date1, date2):
"""
Number of Days between two dates
:param date1: Start date
:param date2: End date
:type date1: datetime.datetime or date tuple (year, date, month)
:type date2: datetime.datetime or date tuple (year, date, month)
:return: Number of Days between two dates
:rtype: int
"""
if isinstance(date1, tuple):
date1 = datetime(*date1)
if isinstance(date2, tuple):
date2 = datetime(*date2)
Ndays = (date2 - date1).days
Ndays = abs(Ndays) - 1
return Ndays | 0169b9bf2a90d6fee72ece1791b4638d7ee0ef2c | 128,380 |
def select_regions(df, regions, col_name="denominazione_regione"):
"""
Select rows by values in regions from column col_name
:param df: pandas dataFrame
:param regions: a list of values
:param col_name: a string indicating the column
:return: a new DataFrame with only the selected rows of df
"""
return df[df[col_name].isin(regions)].reset_index() | b656374cca85b9ad6f5b98bcec0769b5ed113714 | 128,384 |
def groupList(grpName):
"""
returns a list of resNames corresponding to grpName
"""
grpList = {
'ASP': ["ASP"],
'GLU': ["GLU"],
'C- ': ["C- "],
'CYS': ["CYS"],
'TYR': ["TYR"],
'HIS': ["HIS"],
'LYS': ["LYS"],
'ARG': ["ARG"],
'COO': ["ASP", "GLU", "C- "],
}
return grpList[grpName] | 1fa811d01a4091e61799bd3d004cbcd71bcdb130 | 128,386 |
def diameter(d_2):
"""Calculate internal diameter at the start angle.
:param d_2 (float): diameter of the impeller at section 2 [m]
:return d (float): diameter [m]
"""
d = round(1.1 * d_2, 3)
return d | b2fe808e0cd9aee81bfe29d7fb35f643710e5a43 | 128,390 |
def clean_legal_action_cause(cause):
"""Normalize the statement of the cause for a legal action."""
return cause.strip() | ff99da45565e4b673b9e428eeda3636c9d7a4931 | 128,393 |
def raise_skip_event(events, event_name, *event_args):
"""Execute all functions defined for an event of a parser.
If a function returns ``False``, this function will return ``True`` meaning
that an event is trying to skip the associated function.
Args:
events (dict): Dictionary with all events defined.
event_name (str): Name of the event whose functions will be executed.
*event_args: Arguments propagated to the events functions.
Returns:
bool: ``True`` if an event function returns ``False``, ``False``
otherwise.
"""
try:
pre_events = events[event_name]
except KeyError:
return False
skip = False
for event in pre_events:
if event(*event_args) is False:
skip = True
return skip | aa7ae508a396510c862dd2f0a0d3ba12112b8aeb | 128,395 |
from typing import Optional
def get_iree_benchmark_module_arguments(
results_filename: str,
driver: str,
benchmark_min_time: Optional[float] = None):
"""Returns the common arguments to run iree-benchmark-module."""
if driver == "iree-vmvx":
# VMVX is very unoptimized for now and can take a long time to run.
# Decrease the repetition for it until it's reasonably fast.
repetitions = 3
else:
repetitions = 10
cmd = [
"--benchmark_format=json",
"--benchmark_out_format=json",
f"--benchmark_out={results_filename}",
]
if benchmark_min_time:
cmd.extend([
f"--benchmark_min_time={benchmark_min_time}",
])
else:
cmd.extend([
f"--benchmark_repetitions={repetitions}",
])
return cmd | 7b3f895f1982a08a187d4254dffac64c60270083 | 128,399 |
def format_shape_listing(data):
"""Returns a formated list of shapes.
Args:
data (list): A list of shape objects.
Returns:
The formated list as string
"""
out = ""
# return compartments in READABLE text output
i = 1
for s in data:
# cSpell:ignore ocpus
out += f"{i:>4} {s.shape:20} {s.ocpus:5.1f}x " \
f"{s.processor_description[:22]:22} " \
f"{s.memory_in_gbs:5.0f}GB Ram\n"
i += 1
return out | 0cb02637d1a2fe728b4638b2a948dbff79b6b3ad | 128,402 |
def module_is_package(module):
""" Get whether the given module represents a package.
"""
if hasattr(module, '__file__'):
if module.__file__.rsplit('.', 1)[0].endswith('__init__'):
return True
return False | eda5dbc0f4f2e5cf7a1e2c2cc8aba417b94bb4db | 128,405 |
import re
def title_to_dirname(text: str) -> str:
"""Converts a title into a sanitized dirname.
title_to_dirname('Godot for Designers') outputs 'godot-for-designers'"""
string = text.lower().replace(" ", "-").replace(".", "-")
string = re.sub(r"[_:,/?]|(\[.*\])|(\(.*\))", "", string,)
return re.sub(r"-+", "-", string, flags=re.DOTALL) | adb5c1558e09fbb35c70221b123ea638e1857028 | 128,406 |
def filter_(func, seq):
""" imitates built-in `filter`, returning tuples
"""
return tuple(filter(func, seq)) | da89c02d267e04ca1db94f9d3f20e48ae147ed6a | 128,410 |
import zipfile
import pickle
def pickle_load(path, compression=False):
"""
Unpickle a possible compressed pickle.
:param path: (str) path to the output file
:param compression: (bool) if true assumes that pickle was compressed when created and attempts decompression.
:return: (Object) the unpickled object
"""
if compression:
with zipfile.ZipFile(path, "r", compression=zipfile.ZIP_DEFLATED) as myzip:
with myzip.open("data") as file_handler:
return pickle.load(file_handler)
else:
with open(path, "rb") as file_handler:
return pickle.load(file_handler) | d8811afe8c872cd5984a1f063d14e22b19c12454 | 128,413 |
def get_wallpapers_filenames(get_wallpapers_urls_from_file) -> tuple:
"""
Fixture. Get names of wallpapers files.
Args:
get_wallpapers_urls_from_file (Fixture): fixture that return
names and URLs of the wallpapers as a dict.
Returns:
tuple: tuple of lists with filenames of wallpapers.
One list contains filenames of wallpapers with calendar.
Second list contains filenames of wallpapers without calendar.
"""
wallpapers_files_names = get_wallpapers_urls_from_file.keys()
with_calendar_wallpapers_files_names = []
without_calendar_wallpapers_files_names = []
for wallapper_file_name in wallpapers_files_names:
if "-cal-" in wallapper_file_name:
with_calendar_wallpapers_files_names.append(wallapper_file_name)
else:
without_calendar_wallpapers_files_names.append(wallapper_file_name)
return (
with_calendar_wallpapers_files_names.sort(),
without_calendar_wallpapers_files_names.sort(),
) | bfa41f530c96247bb75b429e18bc8d1eb108c046 | 128,414 |
from typing import Callable
def linear_schedule(initial_value: float) -> Callable[[float], float]:
"""Linear schedule, e.g. for learning rate.
Args:
initial_value: the initial value.
Returns:
A function computing the current output.
"""
def func(progress: float) -> float:
"""Computes current rate.
Args:
progress: between 1 (beginning) and 0 (end).
Returns:
The current rate.
"""
return progress * initial_value
return func | ea7c868e4c0da579775a62bca570f53b10f48a3a | 128,419 |
from pathlib import Path
def find(path, pattern, sort=True):
"""
Recursively search for files in a directory.
Args:
path (str): Path to the directory.
pattern (str): The pattern of file names.
sort (bool, optional): Whether to sort the results. Default: ``True``.
Returns:
list: The list of found files.
"""
out = [str(m) for m in Path(path).rglob(pattern)]
if sort:
out = sorted(out)
return out | e7dba0430b971b87ec67d0854bb1fa043e53afcd | 128,420 |
import math
def vectorLength(vector):
"""Calculate the length of the given vector.
Args:
vector: A 2D tuple.
Returns:
The Euclidean length of the vector.
"""
x, y = vector
return math.sqrt(x**2 + y**2) | bdb841cd9d9ccdb47f962b76fc59911ecc9b5391 | 128,426 |
def any_tasks_failed(tasks):
"""Return True if any rq job failed."""
for task in tasks:
if task.get_rq_job() is None:
# job is not available in rq anymore
continue
else:
if task.get_rq_job().get_status() == "failed":
return True
return False | 24ff89ce0afa010d4dbff4341d2330b3494a5251 | 128,430 |
import string
import random
def get_random_str(length):
"""
function to generate random string of given length
"""
str_range = string.ascii_lowercase + string.ascii_uppercase + string.digits
return ''.join(random.sample(str_range, length)) | 72cb577ba889517e721f466731089728a62ce6e9 | 128,434 |
def is_valid_match(match, text):
"""True unless the match starts or ends in the middle of a word."""
end, phrase_mapping = match
start = end - phrase_mapping.phrase_length + 1
return (
end + 1 == len(text)
or (not text[end + 1].isalnum())
or (not text[end].isalnum())
) and (
start == 0
or (not text[start].isalnum())
or (not text[start - 1].isalnum())
) | cad70182f8b9183a0dbd7eeed1e4d97990a00d31 | 128,436 |
def invert_colors(clip):
""" Returns the color-inversed clip.
The values of all pixels are replaced with (255-v) or (1-v) for masks
Black becomes white, green becomes purple, etc.
"""
maxi = 1.0 if clip.ismask else 255
return clip.fl_image(lambda f: maxi - f) | 50a2ae76369fac4cb71456e6a506d4b0238f922d | 128,438 |
def get_filelen(fname: str) -> int:
"""Get length of csv file.
Args:
fname: file name.
Returns:
int.
"""
cnt_lines = -1
with open(fname, 'rb') as fin:
for line in fin:
if len(line.strip()) > 0:
cnt_lines += 1
return cnt_lines | 87e626d8135ae741dc01c9cf8d19476197ef6200 | 128,441 |
import json
def get_issue_data(response):
"""Get the issue data contained in the response.
Parameters
----------
response : Response
Response data.
Returns
-------
issues : list
Issue data.
"""
issues = json.loads(response.content.decode("utf-8"))
return issues | 47735f286ed9bf68dae6b1824a8a4998009dd077 | 128,442 |
import json
def dict_to_binary(json_dict: dict) -> bytes:
"""
Encode the json file to utf-8
:param json_dict: dict
:return: json
"""
return json.dumps(json_dict).encode('utf-8') | dfb5c64342fa232379ebbbd9d7a69b1b7b93cf32 | 128,443 |
import torch
def _collate_fn_tensor(x):
""" Collate function for tensors.
Parameters
----------
x : `List` of `torch.Tensor`
Tensors to be stacked.
Returns
-------
x : `torch.Tensor`
Output tensor.
"""
return torch.stack(x) | 418aefa9e5057624489a0e8e874c40ca403cb7d1 | 128,449 |
def obs_likelihood(obs, freq):
"""Bernoulli probability of observing binary presence / absence given circulating frequency"""
"""obs is 0 or 1 binary indicator for absence / presence"""
return (1-freq)**(1-obs) * freq**obs | 01ce87cc8e19051c5b90fa93968bdd81afac72d1 | 128,454 |
def divide_by_two(n):
"""Divide a number by two."""
return n / 2.0 | 05c94e570fa47f4e98e2a047e289fa9dc3a0ad30 | 128,455 |
def claimantize(value):
""" Summarize 'lives with' as Claimant 1, 2, or both """
if 'you' in value:
return 'Claimant 1'
elif 'spouse' in value:
return 'Claimant 2'
elif 'both' in value:
return 'Claimant 1 & Claimant 2'
return value | 4f76bf8113863454086969270d55e035b4dd7bfa | 128,457 |
import struct
def get_fileheader(f):
"""
Get fileheader from file and return a list
Reads the 180 byte file header of a sparky file
"""
# file header as descriped in ucsffile.cc of sparky source
# header is packed as follows:
# ident(10s),naxis(c),ncomponents(c),encoding(c),version(c)
# owner(9s),date(26s),comment(80s),pad(3x),seek_pos(l),scratch(40s),
# pad(4x)
# note that between comment and seek_pos is a 3 byte pad
# so that the long is @ a multiple of 4
# also sparky always packs big-endian, hence >
return struct.unpack('>10s 4c 9s 26s 80s 3x l 40s 4x',f.read(180) ) | 1d8326310cba4ce91819de9b545ba79045e2a142 | 128,459 |
def make_founder_product_name(founder1, founder2, product):
"""Get the name of two people forming a company and combine it.
Args:
founder1 (str): Your founder name 1.
founder2 (str): Your founder name 2.
product (str): Your product/feature/service name.
Returns:
str: The updated name.
>>> make_founder_product_name('chris', 'ella', 'widgets')
>>> 'chris & ella widgets'
"""
return '{} & {} {}'.format(
founder1[0].upper(),
founder2[0].upper(),
product) | 2fdb2f9ed8b6249f8c5aeab8ea6bc5803f1b060e | 128,461 |
import inspect
def is_submodule(obj, module_name: str) -> bool:
"""
Return true if an object is a submodule
"""
return inspect.ismodule(obj) and getattr(obj, "__name__", "").startswith(
f"{module_name}."
) | cfa17f5f0bb9de341d0d99e242fea97baf36f779 | 128,462 |
def monthdelta(date, delta):
"""because we wish datetime.timedelta had a month kwarg.
Courtesy of: http://stackoverflow.com/a/3425124/3916180
Parameters
----------
date : datetime.date
Date object
delta : int
Month delta
Returns
-------
datetime.date
New Date object with delta offset.
"""
month, year = (date.month + delta) % 12, date.year + ((date.month) + delta - 1) // 12
if not month:
month = 12
day = min(date.day, [31, 29 if year % 4 == 0 and not year % 400 == 0 else 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][month - 1]) # pylint: disable=line-too-long
return date.replace(day=day, month=month, year=year) | d79afe2bf8d86a86caa01f88bed16814c27fcdda | 128,468 |
from typing import Union
from typing import Any
def inputValue() -> Union[int, float, str, list, tuple, Any]:
"""
Function for reading values from console
It using eval() to read values, so you must follow Python3 syntax
Returns:
int / float / str / list / tuple / Any: read value
Notes:
The function reads in the loop, so it will not end until you enter a valid value
"""
while True:
try:
inputStr = input("Type value to calculate or <Enter> to exit: ")
assert inputStr
return eval(inputStr)
except (SyntaxError, TypeError, ValueError):
print("Wrong type, try again.")
except AssertionError:
return False | 13343b1071a80d0371ccb726e6ed278186ac03fb | 128,472 |
def decode(tuple_):
"""Decode tuple_'s first and second elements and XOR them."""
first = int(tuple_[0], base=16)
second = int(tuple_[1], base=16)
return '{:x}'.format(first ^ second) | d58b98d4bfaeff625c148acefc62ace05b1a5087 | 128,474 |
def winCheck(choice1: int, choice2: int) -> bool:
"""Returns the result of the round
- choice1: int {choice of the first player}
- choice2: int {choice of the second player}
"""
return True if (
choice1 == 0 and choice2 == 2
) or (
choice1 == 1 and choice2 == 0
) or (
choice1 == 2 and choice2 == 1
) else False | 796cba21cdb04c947f8728f53722074482156e2f | 128,476 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.