content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def page_not_found(e):
"""Return a custom 404 error."""
return '앗! 이 주소엔 아무것도 없어요.', 404
|
94bf86f104603dca0b7c0ffebfb3a25087165b01
| 71,713
|
def get_pagination_readable_message(page: int, limit: int) -> str:
"""
Gets a readable output message for commands with pagination.
Args:
page (int): The page used in the command.
limit (int): The limit used in the command.
Returns:
str: The message that describes the pagination.
"""
return f'Current page size: {limit}\n Showing page {page} out of others that may exist.'
|
589e385e1a4e152625b13a62c2f5c0fbc98b8618
| 71,715
|
from typing import Union
from datetime import datetime
def ensure_datetime(value: Union[str, datetime]):
"""Makes sure that the value is in the right format and return a datetime
object. Raises TypeError if format is wrong."""
if isinstance(value, datetime):
return value
try:
parsed_date = datetime.strptime(value, "%Y-%m-%d")
except:
raise TypeError(f"{value} must be in format %Y-%m-%d")
return parsed_date
|
4aef505fe13c8605df3cad7aa1b630f414c07fdf
| 71,722
|
def random_background_template_with_FWHM(background_dataset, FWHM, cosmic=0):
"""
Parameters:
-----------
background_dataset : dataframe
contains the background template data
FWHM : float
Desired FWHM parameter
cosmic : bool (optional)
Choice to include cosmic radiation
Returns:
--------
random_background_spectrum : vector
The full background spectrum template
"""
background_choices = background_dataset[
(background_dataset['fwhm'] == FWHM) &
(background_dataset['cosmic'] == cosmic)]
random_background = background_choices.sample()
random_background_spectrum = random_background.values[0][3:]
return random_background_spectrum
|
dc53f185068f794318c799f0018d16c99a34caea
| 71,723
|
def pro_rata_monthly_charge(monthly_charge: float, days: int) -> float:
""" The monthly or annual charges shall be calculated pro rata having
regard to the number of days in the billing cycle that supply was
connected (days) and one-twelfth of 365.25 days (to allow for leap years).
monthly_charge: The monthly charge
days: The number of days in the billing period
"""
daily_charge = (monthly_charge * 12) / 365.25
return daily_charge * days
|
06acff7d4e7892f97bd0855e26cf941eb2e2c113
| 71,733
|
def read_lines_from_file(filename):
"""
Function to read lines from file
:param filename: The text file to be read.
:return: content: A list of strings
"""
with open(filename) as f:
content = f.readlines()
content = [x.strip() for x in content]
return content
|
788a59d42cdae0830021274baa57964972f57ce6
| 71,739
|
def namespaces(labels):
"""
Converts fully-qualified names to a list of namespaces.
namespaces(['clojure.core/map']) => ['clojure.core']
"""
return list(map(lambda label: label.split('/')[0], labels))
|
ebfdaad6dee6e82188d7c3fff9d0f58032d619b1
| 71,740
|
def WrappedJoin(items, separator=' | ', width=80):
"""Joins the items by the separator, wrapping lines at the given width."""
lines = []
current_line = ''
for index, item in enumerate(items):
is_final_item = index == len(items) - 1
if is_final_item:
if len(current_line) + len(item) <= width:
current_line += item
else:
lines.append(current_line.rstrip())
current_line = item
else:
if len(current_line) + len(item) + len(separator) <= width:
current_line += item + separator
else:
lines.append(current_line.rstrip())
current_line = item + separator
lines.append(current_line)
return lines
|
576e9991c3e66226b32e2f14b2c08e933cf36aa0
| 71,743
|
def _new_entry(ctime, mtime, profiler=None):
"""Create a new entry."""
return {
'create_time': ctime,
'update_time': mtime,
'summary_files': 0,
'lineage_files': 0,
'explain_files': 0,
'graph_files': 0,
'profiler': profiler
}
|
ebeb72a53407c8740ef6d4659cb68740511d7b4b
| 71,747
|
def select_option(lb, ub):
"""Returns the selected integer type option between lb and ub. Otherwise shows an error message.
Parameters:
lb (lower bound): Lower limit of the integer type for options.
ub (upper bound): Upper limit of the integer type for options.
"""
while True:
option = input("Insert option: ")
if option.isnumeric() and (int(option) in range(lb, ub + 1)):
break
else:
print("You put invalid option, insert number between {} and {}".format(lb, ub))
return int(option)
|
c9f141b8c9e55814097df4744b5b7b088dfdfc65
| 71,748
|
def get_obj_type(obj):
""" returns the type of an object """
return type(obj)
|
f5be4b7c8fde997a05f48dd4c35097decefc1967
| 71,749
|
def determine_benchmark_machine(coreclr_args):
""" Determine the name of the benchmark machine to use
Args:
coreclr_args (CoreclrArguments): parsed args
Return:
(str) : name of the benchmnark machine
"""
if coreclr_args.arch == "x64":
if coreclr_args.host_os == "windows":
# return "aspnet-perf-win"
return "aspnet-citrine-win"
elif coreclr_args.host_os == "Linux":
return "aspnet-perf-lin"
else:
raise RuntimeError("Invalid OS for x64.")
elif coreclr_args.arch == "arm64":
if coreclr_args.host_os == "Linux":
return "aspnet-citrine-arm"
else:
raise RuntimeError("Invalid OS for arm64.")
else:
raise RuntimeError("Invalid arch.")
|
672f902fce8aaf0370fb56da38515d7861f66c09
| 71,751
|
def iso7816_4_pad(message, total_len):
"""
Pads a message according as specified in ISO 7816-4:
- Append byte 0x80
- Pad the message to requested length with NULL bytes
"""
if len(message) >= total_len:
raise ValueError(
f'Padded message is at least {len(message) + 1} bytes long'
)
return (message + b'\x80').ljust(total_len, b'\x00')
|
a7d59f0a7246c8edd1a32fbd7c71838d6726e368
| 71,759
|
def get_and_remove_or_default(dict, key, default):
"""
Get an item from a dictionary, then deletes the item from the dictionary.
If the item is not found, return a default value.
:return: The value corresponding to the key in the dictionary, or the default value if it wasn't found.
"""
if key in dict:
ret = dict[key]
del dict[key]
return ret
else:
return default
|
402be98aa112010abfffa1e3dcb2ae9686a12a62
| 71,762
|
def cigar_has_insertion(cigar):
"""Check if cigarstring has an I in it. Return boolean"""
has_insertion = False
if cigar is not None:
if 'I' in cigar:
has_insertion = True
return has_insertion
|
995f0ce7262cb5bb7f5eb2b0c5b0a24631103e00
| 71,763
|
def get_feedstock_name_from_meta(meta):
"""Resolve the feedtstock name from the parsed meta.yaml."""
if "feedstock-name" in meta.meta["extra"]:
return meta.meta["extra"]["feedstock-name"]
elif "parent_recipe" in meta.meta["extra"]:
return meta.meta["extra"]["parent_recipe"]["name"]
else:
return meta.name()
|
1706bf74005730139f6783e4bb6c68c725be4f0b
| 71,765
|
def get_models_deployed(api, project_id):
"""
Gets the list of models deployed
Arguments :
api : object, API object to access CloudML Engine
project_id : string, project id of the project
Returns :
list_of_models : list, List of the models deployed
"""
model_response = api.projects().models().list(parent=project_id).execute()
return [a['name'] for a in model_response['models']]
|
28256c1f33f7f09e3cbb45d8c9f5911e4f456a19
| 71,767
|
def read(name, opener=open):
"""Load file contents as a bytestring."""
with opener(name, "rb") as f:
return f.read()
|
e44870cf7232a5e21300825efc44e2537497e0d1
| 71,770
|
def get_object_columns(df):
"""
Return all columns with dtype object
Parameters
----------
df : Pandas dataframe
Returns
-------
Dataframe with columns of dtype object
"""
return df.select_dtypes(include=['object'])
|
f7ffd1ee1b22f48ec82e185ba13c6e3096312800
| 71,774
|
def dictRemove(dict, keys):
""" Remove dict values who has a key in keys"""
for key in keys:
dict.pop(key)
return dict
|
e18531fece9051fcaf78c498aa7e9d45431a628b
| 71,776
|
def epoch_difference(M, b_prd, e_prd):
"""
Compute difference between two epochs accounting for end of horizon circling
:param M: Model
:param b_prd: global start period
:param e_prd: global end period
:return: end - start + 1 (+ n_prds_per_cycle if spans end of horizon)
"""
if e_prd >= b_prd:
return e_prd - b_prd + 1
else:
return M.n_prds_per_cycle + e_prd - b_prd + 1
|
74ab249e1741007a1d26ba97464be8f94c54e342
| 71,779
|
import torch
def compute_target(
value_final: torch.Tensor,
rewards: torch.Tensor,
masks: torch.Tensor,
gamma: float,
) -> torch.Tensor:
"""
Compute target (sum of total discounted rewards) for rollout.
Parameters
-----------
value_final: torch.Tensor
state values from final time step of rollout, size (num_env,)
rewards: torch.Tensor
rewards across rollout, size (rollout_steps, num_env)
masks: torch.Tensor
masks for episode end states, 0 if end state, 1 otherwise,
size (rollout_steps, num_env)
gamma: float
discount factor for rollout
Returns
-------
torch.Tensor: targets, size (rollout_steps, num_env)
"""
G = value_final
T = rewards.shape[0]
targets = torch.zeros(rewards.shape)
for i in range(T - 1, -1, -1):
G = rewards[i] + gamma * G * masks[i]
targets[i] = G
return targets
|
51a60839df16fb3db0b93e4db37d4ec7982df511
| 71,780
|
def is_price_good_for_ps(price:float, ps:list)->bool:
"""
Decides if the given price can be used as the trading price for the given PS.
>>> is_price_good_for_ps(5, [9, -3])
True
>>> is_price_good_for_ps(5, [9, -7])
False
>>> is_price_good_for_ps(5, [4, -3])
False
"""
for i in range(len(ps)):
if ps[i] >= 0 and price > ps[i]:
return False # price too high for a buyer
elif ps[i] <= 0 and -price > ps[i]:
return False # price too low for a seller
return True
|
cdbd60c7b5b9b81e7529f577a326bfe823c569ed
| 71,781
|
def setup_parser(parser):
"""
Setup argument parser.
:param parser: argument parser
:return: configured parser
"""
parser.add_argument('--chunksize', type=int, required=False,
help="Use chunking for large files. N as size of chunk.")
parser.add_argument('--samplesize', type=int, required=False,
help="Use sampling. N as first rows read")
return parser
|
db1287a99b030d9e9d4da7e1aa34dc10aaf3b612
| 71,786
|
from typing import List
from typing import Any
def group_items_in_batches(items: List[Any], items_per_batch: int = 0, fill: Any = None) -> List[List[Any]]:
"""
Responsible for grouping items in batch taking into account the quantity of items per batch
e.g.
>>> group_items_in_batches(items=[1,2,3,4], items_per_batch=3)
[[1, 2, 3], [4]]
>>> group_items_in_batches(items=[1,2,3,4], items_per_batch=3, fill=0)
[[1, 2, 3], [4, 0, 0]]
:param items: list of any values
:param items_per_batch: number of items per batch
:param fill: fill examples when items is not divisible by items_per_batch, default is None
:return:
"""
items_length = len(items)
if not isinstance(items_per_batch, int):
raise TypeError(f"Value for items_per_batch is not valid. Please send integer.")
if items_per_batch < 0 or items_per_batch > len(items):
raise ValueError(f"Value for items_per_batch is not valid. I need a number integer between 0 and {len(items)}")
if items_per_batch == 0:
return items
if fill is not None:
missing = items_per_batch - items_length % items_per_batch
items += missing * [fill]
batches = []
for i in range(0, items_length, items_per_batch):
batch = [group for group in items[i:i + items_per_batch]]
batches.append(batch)
return batches
|
75642f1159e1470e568746521ccf62ecb8642c7a
| 71,787
|
import copy
def get_value(obj, attr, default=None):
"""Gets a copy of the value from a nested dictionary of an object with nested attributes.
Parameters
----------
obj :
An object or a dictionary
attr :
Attributes as a string seprated by dot(.)
default :
Default value to be returned if attribute is not found.
Returns
-------
Any:
A copy of the attribute value. For dict or list, a deepcopy will be returned.
"""
keys = attr.split(".")
val = default
for key in keys:
if hasattr(obj, key):
val = getattr(obj, key)
elif hasattr(obj, "get"):
val = obj.get(key, default)
else:
return default
obj = val
return copy.deepcopy(val)
|
30187ff4b2977717688ccee82817b1ded108cefe
| 71,791
|
def fibonacci_loop(n):
"""Calculate the nth Fibonacci number using a loop"""
nn = n - 2 #Already done 1st and second vals
fib = [1, 1]
while nn > 0:
tmp = fib[0]
fib[0] = fib[1]
fib[1] +=tmp
nn -= 1
#If even, return the first val, else, the second
return fib[n%2]
|
5356a1a904cc45621a8165065d4733a7961fb518
| 71,795
|
def lin(pos, goal, x, n_steps):
"""
Returns the y value of a linear function based which goes from y=pos
on x=0 to y=goal on x=n_steps.
"""
return pos + (1/(n_steps-1) * (goal-pos))*x
|
852f1b21bfd17baaa82b1c23a2316d04d97586d3
| 71,796
|
def annuity_factor(n, i):
"""
Calculates the annuity factor derived by formula (1+i)**n * i / ((1+i)**n - 1)
:param n: depreciation period (40 = 40 years)
:type n: int
:param i: interest rate (0.06 = 6%)
:type i: float
:returns:
- **a**: annuity factor
:rtype: float
"""
a = (1 + i) ** n * i / ((1 + i) ** n - 1)
return a
|
3b01acde0cd044d98e63835c64094ff471d79b19
| 71,797
|
def is_even(number:int):
"""
This function verifies if the number is even or not
Returns:
True if even false other wise
"""
return number % 2 == 0
|
dfc0e77f90075d1b77be2218f7f5d1c7e1265b29
| 71,799
|
def runner(app):
"""Return a cli test fixture with app context."""
return app.test_cli_runner()
|
266fba4e87237783c79fbf65106778bab3816a4e
| 71,803
|
def load_process_rules(process_rules_file_name):
"""
Load process rules file to rule list.
:param process_rules_file_name:
:return: a list containing rules like:
[
(grapheme:string, phones:list, modified_phone:string),
(grapheme:string, phones:list, modified_phone:string),
...,
(grapheme:string, phones:list, modified_phone:string),
]
"""
rule_list = []
with open(process_rules_file_name) as process_rules_file:
while 1:
lines = process_rules_file.readlines(10000)
if not lines:
break
for line in lines:
if line.startswith("##"):
continue
grapheme, phones, modified_phone, adm_len_diff, adm_silent_error = line.strip().split("\t")
phones = phones.split(" ")
rule_list.append((grapheme, phones, modified_phone, adm_len_diff, adm_silent_error))
pass
pass
pass
return rule_list
|
1520fe21d662aba9db12ba0491e4cb85fb5ec520
| 71,807
|
import math
def calculate_num_Grover_iters(n, k):
"""
Calculate optimal number of Grover search iterations
n: number of qubits
k: number of solutions
"""
num_iters = (math.pi / 4) * math.sqrt(2 ** n / k)
return math.floor(num_iters)
|
d4e99e2bddf7182187197f41fdde85659af58cc8
| 71,808
|
import json
def load_from_json(file):
"""
Load a json file into a dictionary.
:param file: the .json file (with path)
"""
with open(file, "r") as json_file:
contents = json.load(json_file)
return contents
|
7b6f6c2d0afe6b43f06ceacb53ac4f0551857481
| 71,812
|
def _is_valid_module_args(parsed_args, required_args):
""" Validate that parsed args have required args, and no values are None or empty strings """
return len([x for x in required_args if x not in parsed_args.keys()]) == 0 and \
len([x for x in parsed_args.values() if not x]) == 0
|
f89567489744454390db275585decd7a33910448
| 71,813
|
def isDummy(a):
"""True if atom is a dummy atom. False otherwise."""
return (a.mass == 0 and a.atnum == 0 and a.charge == 0.0 and a.sigma == 0.0 and a.epsilon == 0.0)
|
658dae81ab22804c98b80053d905f3545fc87034
| 71,825
|
def makeVector(dim, initValue):
"""
Return a list of dim copies of initValue
"""
return [initValue]*dim
|
2d5d77c21ae24eb59ae70fbd6d4024bf446507c6
| 71,831
|
def filter_tests_by_attribute(tests_dict, attribute, value):
"""
Returns tests that have `attribute` == `value`
"""
filtered_tests = {}
for test_name, test_data_dict in tests_dict.items():
if test_data_dict[attribute] == value:
filtered_tests[test_name] = test_data_dict
return filtered_tests
|
ef50e7267a913d842ad099e171e1522842c640ab
| 71,834
|
import secrets
def generate_random_token(nbytes: int = 32) -> str:
"""Generate a securely random token.
Random 32 bytes - default value here - is believed to be sufficiently secure for practically all purposes:
https://docs.python.org/3/library/secrets.html#how-many-bytes-should-tokens-use
"""
return secrets.token_urlsafe(nbytes)
|
4c87c5ae88cb1b38e1b812dba65fa271f28c3648
| 71,835
|
from typing import List
def min_candies(n: int, ratings: List[int]) -> int:
"""
Return the minimum amount of candies to give to students.
Candies are assigned using the following rules:
1. Every student gets at least 1 candy
2. If 2 students are next to each other, the one with the higher rating
must receive at least one more candy.
3. The candies given should be minimized.
:time: O(n) where n is the amount of students
:space: O(n)
"""
candies = [1] * n
for i, (r1, r2) in enumerate(zip(ratings, ratings[1:])):
if r2 > r1:
candies[i + 1] = candies[i] + 1
ratings, candies = ratings[::-1], candies[::-1]
for i, (r1, r2) in enumerate(zip(ratings, ratings[1:])):
if r2 > r1 and candies[i + 1] <= candies[i]:
candies[i + 1] = candies[i] + 1
return sum(candies)
|
656b4a8fd1754d51795b4e80cca0a42a7dab46b7
| 71,836
|
def int_or_none(x, limit):
"""Returns `int(x)` if `x` is a valid `int` or `None` otherwise.
`x` is valid if `1 <= x <= limit`.
"""
try:
value = int(x)
if 1 <= value <= limit:
return value
else:
return None
except ValueError:
return None
|
ef2039911d77b69304538d48218263f95524b933
| 71,840
|
def find_previous(element, l):
"""
find previous element in a sorted list
>>> find_previous(0, [0])
0
>>> find_previous(2, [1, 1, 3])
1
>>> find_previous(0, [1, 2])
>>> find_previous(1.5, [1, 2])
1
>>> find_previous(3, [1, 2])
2
"""
length = len(l)
for index, current in enumerate(l):
# current is the last element
if length - 1 == index:
return current
# current is the first element
if index == 0:
if element < current:
return None
if current <= element < l[index+1]:
return current
|
38e557061224215648b341e0cfe533c040eaabd3
| 71,841
|
import functools
def _deepgetattr(obj, name, default=None):
"""Recurses through an attribute chain to get the ultimate value."""
try:
return functools.reduce(getattr, name.split('.'), obj)
except AttributeError:
return default
|
b03887865b9594cb806bc6bc72c54eba010dcae8
| 71,843
|
def __pagination_handler(query_set, model, params):
"""
Handle user-provided pagination requests.
Args:
query_set: SQLAlchemy query set to be paginated.
model: Data model from which given query set is generated.
params: User-provided filter params, with format {"offset": <int>, "limit": <int>, ...}.
Returns:
A query set with user-provided pagination applied.
"""
# Offset
offset = params.get("offset")
if offset!=None:
query_set = query_set.offset(offset)
# Limit
limit = params.get("limit")
if limit!=None:
query_set = query_set.limit(limit)
return query_set
|
b12ea353caf62e39d5c09d2ad0581520ed6de43e
| 71,845
|
def xstr(s):
"""Sane string conversion: return an empty string if s is None."""
return '' if s is None else str(s)
|
5a9856c6d61ac3a83729f685a5b89e00026ee64d
| 71,852
|
def sg_or_pl(number):
"""Return 's' if number is greater than 1. Return '' if number is 1."""
if number > 1:
return 's'
elif number == 1:
return ''
|
bb24c349327a31681ea72cfae792e8946b3e21a4
| 71,853
|
import json
def load_json(fname: str):
"""
Loads an object from a JSON file.
Args:
fname: the name of the file to load from.
Returns:
the JSON object.
"""
return json.load(open(fname, 'r'))
|
15cea574ff50e4cfb1af008321f7e8e7005558fe
| 71,858
|
import requests
import json
def _post_req(url, email, password):
"""
Posts non-graphql requests to ThreatPlaybook API Server. Currently used for:
* Create User
* Login
:param url: ThreatPlaybook API URL
:param email: E-mail of the User
:param password: Password set by the User
:return: Returns with response
"""
headers = {'content-type': 'application/json'}
auth = {"email": email, "password": password}
try:
r = requests.post(url=url, headers=headers, data=json.dumps(auth))
if r.status_code == 500:
return {'error': 'Server Error'}
return r.json()
except ConnectionError:
return {'error': 'Unable to contact Threatplaybook API server'}
|
a35046c699193a9a8132ef4b155eb922a70a88a1
| 71,860
|
from typing import Dict
def add_collection_info_to_data(
pk: int, collection: str, result_data: Dict[int, dict]
) -> Dict[int, dict]:
"""Add collection info to product data.
This functions adds info about collection to dict with product data.
If some collection info already exists in data, collection slug is added
to set with other values.
It returns updated product data.
"""
if collection:
header = "collections__slug"
if header in result_data[pk]:
result_data[pk][header].add(collection) # type: ignore
else:
result_data[pk][header] = {collection}
return result_data
|
77074d9a87c8289515de1df2f1bd7b65e3805c4d
| 71,861
|
def mean(iterable, length=None):
""" Returns the arithmetic mean of the values in the given iterable or iterator.
"""
if length is None:
if not hasattr(iterable, "__len__"):
iterable = list(iterable)
length = len(iterable)
return sum(iterable) / float(length or 1)
|
40111ca3f0cb63c9eda7169244c43f33c1316cf2
| 71,863
|
def fill_missing_data(df):
"""Return dataframe with missing values filled in and numeric columns as integers."""
# fix top row
df.iloc[0, :] = df.iloc[0, :].fillna(0)
# fill missing values
df = df.fillna(method="ffill")
df.iloc[:, 1:] = df.iloc[:, 1:].astype(int)
return df
|
312392cd2e8ff7e5cdbbc9c1911506e817e37e18
| 71,866
|
import time
def date_to_TS(date, format='%y-%m-%d %H:%M:%S'):
""" Convert date to timestamp.
Parameters
----------
date : int, str or date
Date to convert to timestamp
format : str
Format of input date.
Return
------
int
Timestamp of the date.
"""
if isinstance(date, int):
return date
elif isinstance(date, str):
return time.mktime(time.strptime(date, format))
else:
print('Date format not allowed')
raise ValueError('Unknow format', type(date))
|
d68ddba720dbb46a14079d384c3090c4e3c354c2
| 71,867
|
import torch
def concat_mean_stats(inputs):
"""Add mean statistics to discriminator input.
GANs often run into mode collapse since the discriminator sees every
sample in isolation. I.e., it cannot detect whether all samples in a batch
do look alike.
A simple way to allow the discriminator to have access to batch statistics
is to simply concatenate the mean (across batch dimension) of all
discriminator samples to each sample.
Args:
inputs: The input batch to the discriminator.
Returns:
The modified input batch.
"""
stats = torch.mean(inputs, 0, keepdim=True)
stats = stats.expand(inputs.size())
return torch.cat([stats, inputs], dim=1)
|
cfc96450c355e0de6a4c9af0066a6881cbb0ba92
| 71,868
|
def unique_based_on_id(data):
""" returns a list with only unique items, where the uniqueness
is determined from the id of the items. This can be useful in the
case where the items cannot be hashed and a set can thus not be used. """
result, seen = [], set()
for item in data:
if id(item) not in seen:
result.append(item)
seen.add(id(item))
return result
|
6a5c5f9714e3a145824df9ca42799986cc191d94
| 71,869
|
def dict_merge(*dicts):
"""
Merge all provided dicts into 1 dict.
*dicts : `dict`
dictionaries to merge
"""
merged = {}
for d in dicts:
merged.update(d)
return merged
|
574d031463e108b17b7fddc947af52431bc9ddb9
| 71,872
|
from typing import Tuple
def _get_physical_coordinates_of_z_plane(zrange: Tuple[float, float]):
"""Calculate the midpoint of the given zrange."""
physical_z = (zrange[1] - zrange[0]) / 2 + zrange[0]
return physical_z
|
5db61f6cfb60c34fb00bd65905f7150f4a9fa727
| 71,877
|
def get_file_offset(vfp: int) -> int:
"""Convert a block compressed virtual file pointer to a file offset."""
address_mask = 0xFFFFFFFFFFFF
return vfp >> 16 & address_mask
|
fbbca6bcf9fdcc95867d59a63ac6dbab877723f2
| 71,878
|
import struct
import socket
def ip_to_int(addr):
"""
Converts a ip address (string) to it's numeric representation (int)
Example:
>>> ip_to_int('127.0.0.1')
2130706433
"""
return struct.unpack("!I", socket.inet_aton(addr))[0]
|
60068feeb0092a984972c32382df14a32533fa15
| 71,885
|
import binascii
def _bytearray_to_b64_str(arr: bytearray) -> str:
"""Converts a bytearray to a base64 string."""
return binascii.b2a_base64(bytearray(arr)).strip().decode()
|
5019ff8fbcee41f9d302d549b06e95ff01f36817
| 71,893
|
def _temperature(temperature_register, farenheight=False):
"""Return the temperature in Celsius given the raw temperature register value.
If farenheight is True, return the temperature in Farenheight.
"""
temp = (0xFF & (temperature_register >> 8)) + ((0x7 & (temperature_register >> 5)) / 8.0)
if farenheight:
temp = (((9 * temp) / 5.0) + 32)
return temp
|
6d70f12bf76a6bab1d19c8668941f569954d3282
| 71,900
|
def _gaps_from(intervals):
"""
From a list of intervals extract
a list of sorted gaps in the form of [(g,i)]
where g is the size of the ith gap.
"""
sliding_window = zip(intervals, intervals[1:])
gaps = [b[0] - a[1] for a, b in sliding_window]
return gaps
|
b3be47684d992c3828bbf03444b1d8bb4fb1e84e
| 71,904
|
def create_features(data):
"""Function that creates features for the model from the image.
# Args
data: Image (observation, height, width)
# Returns
Pixels on the radius of the images.
"""
data_features = data[:, 100, 100:201]
return data_features
|
267339e300971b9f87d4dce21e86f86eb9910f1f
| 71,911
|
def get_string(data):
"""Get string ending with '\0'.
Args:
data: Data containing string.
Returns:
String without '\0'.
"""
ret = ''
for c in data:
if '\x00' == c:
break
ret += c
return ret
|
97f66c89d4f1c282539c63bf31f83aa8e668925b
| 71,912
|
def get_summary_table(valid):
"""Optimize the summary table we potentially use.
Args:
valid (datetime with time zone): Datetime
Returns:
str table to query
"""
if valid is None:
return "summary"
if (valid.month == 12 and valid.day >= 30) or (
valid.month == 1 and valid.day < 3
):
return "summary"
return f"summary_{valid.year}"
|
476950b7c1b2a03f3624a50ceb7450073550066b
| 71,915
|
def get_chain_tasks(chain_instance):
"""Get chain tasks
Return list of tasks in chain
"""
return chain_instance.tasks
|
f41af29455260a2b4fcb306448cab32750e4c3b8
| 71,917
|
from pathlib import Path
def is_directory_empty(dir_path: Path) -> bool:
"""Check if a directory is empty.
Args:
dir_path (Path): path to directory
Returns:
bool: True if empty, False otherwise
"""
assert (
dir_path.exists()
), f"The provided directory does not exist: '{dir_path}'."
assert dir_path.is_dir(), f"Provide a directory path, not '{dir_path}'."
return not any(Path(dir_path).iterdir())
|
09e4a1a9d1536b2d081ba7e5776e0b1f03cf1d25
| 71,919
|
def str_to_hexstr(s):
"""Convert a string into an hexadecimal string representation"""
if type(s) is not str:
s = chr(s)
return ":".join("{:02x}".format(ord(c)) for c in s)
|
c3deb58d3c8951c45faf728240b418b4f78e151e
| 71,920
|
def _process_box(df):
""" Perform basic processing on a box score - common to both methods
Args:
df (DataFrame): the raw box score df
Returns:
DataFrame: processed box score
"""
df.columns = list(map(lambda x: x[1], list(df.columns)))
df.rename(columns = {'Starters': 'PLAYER'}, inplace=True)
if 'Tm' in df:
df.rename(columns = {'Tm': 'TEAM'}, inplace=True)
reserve_index = df[df['PLAYER']=='Reserves'].index[0]
df = df.drop(reserve_index).reset_index().drop('index', axis=1)
return df
|
9a5bd3468dcc4aba303faffcd00a1504ce9d1750
| 71,923
|
def split_competition_name(long_name):
"""
Split a verbose competition name as a competition name and a weight.
Return value
------------
(str, str)
"""
(name, weight) = long_name.rsplit(": ", 2)
return (
name.rstrip(),
weight[:-1].strip()
)
|
75db6331c652f4a49417436628cefaf212e815e6
| 71,924
|
def as_key(key):
"""Strips any / prefix and/or suffix from a supplied key (path) string.
Most uses of the asset path as a key (other than validation by
is_path_allowed) expect any leading and trailing / URL path
separators have been removed.
"""
return key.lstrip('/').rstrip('/')
|
a7f0518ace628eb23208a7838c6fa10a5ad94260
| 71,926
|
def check_indent(cdline):
"""
to check the indent of a given code line
to get the number of starting blank chars,
e.t. blankspaces and \t
\t will be interpreted as 4 single blankspaces,
e.t. '\t'=' '
Args:
cdline(str) : a single line of code from the source file
Returns:
int : the indent of the number of interpreted
blankspaces
"""
indent = 0
for c in cdline:
if c == '\t':
indent += 4
elif c == ' ':
indent += 1
if c != ' ' and c != '\t':
break
return indent
|
2daddedd56aae5efa0622682643601da05dc4332
| 71,929
|
def extract_suffixed_keys(dictionary, suffix):
"""Returns a dictionary with all keys from :obj:`dictionary` that are suffixed
with :obj:`suffix`.
"""
sub_dict = {}
for key, value in dictionary.items():
if key.endswith(suffix):
original_key = key[:-len(suffix)]
sub_dict[original_key] = value
return sub_dict
|
1d5eba2f6d79d35a1189bdb9d16642ba30f92452
| 71,934
|
def append_list_to_list_or_item(items1, items2):
"""
Append a list or a single item to a list or a single item
:param items1: The list (or item) to append to
:param items2: The list (or item) to be appended
:return: The appended list
"""
if type(items1) is not list:
items1 = [items1]
if type(items2) is not list:
items2 = [items2]
return items1 + items2
|
9e3be44178e15348c527332e7d18bdefd05fc409
| 71,935
|
def get_path_lines(path_obj):
"""
return lines of text file from path obj
"""
with path_obj.open() as fh:
lines = fh.readlines()
return [line.rstrip() for line in lines]
|
165f14d9f4d36c6216537ce748848c7a64ca2c2e
| 71,941
|
import time
def timed_message(message: str) -> str:
"""Adds time information before a message string.
Args:
message (str): Message string to print and wrap in time
Returns:
str: A time preceeded version of the message.
"""
t = time.localtime()
now = time.strftime("%H:%M:%S", t)
return f"{now} ---- {message}"
|
5f266e8b8c2797fbac5f6b1b5f904041f9575ec7
| 71,944
|
from typing import Sequence
def reconstitute_einsum_formula(input_formulas: Sequence[str],
output_formula: str) -> str:
"""Joins einsum input formulas and output formula into a complete formula."""
joined_input_formula = ','.join(input_formulas)
return f'{joined_input_formula}->{output_formula}'
|
3a29212bf0130eaf10e964faf819b28cc35cf11a
| 71,946
|
def assign_y_height_per_read(df, phased=False, max_coverage=100):
"""Assign height of the read in the per read traces
Gets a dataframe of read_name, posmin and posmax.
Sorting by position, and optionally by phase block.
Determines optimal height (y coordinate) for this read
Returns a dictionary mapping read_name to y_coord
"""
if phased:
dfs = df.sort_values(by=['HP', 'posmin', 'posmax'],
ascending=[True, True, False])
else:
dfs = df.sort_values(by=['posmin', 'posmax'],
ascending=[True, False])
heights = [[] for i in range(max_coverage)]
y_pos = dict()
for read in dfs.itertuples():
for y, layer in enumerate(heights, start=1):
if len(layer) == 0:
layer.append(read.posmax)
y_pos[read.Index] = y
break
if read.posmin > layer[-1]:
layer.append(read.posmax)
y_pos[read.Index] = y
break
return y_pos
|
d2723371fda2ccff7822b5a1cec7ed88f474c370
| 71,947
|
from datetime import datetime
def find_weekday(time):
""" find weekdays for time_two from time_one in weekday: day_one
Arg:
time: for example, 01/04/2010
Return:
weekday: 1:Monday 2:Tuesday ...
"""
week=datetime.strptime(time,'%m/%d/%Y').weekday()+1
return (week)
|
e7b7176bd71bbed650d908edaac3af11ff649a7a
| 71,952
|
def sdss2decam(g_sdss, r_sdss, i_sdss, z_sdss):
"""
Converts SDSS magnitudes to DECam magnitudes
Args:
[griz]_sdss: SDSS magnitudes (float or arrays of floats)
Returns:
g_decam, r_decam, z_decam
Note: SDSS griz are inputs, but only grz (no i) are output
"""
gr = g_sdss - r_sdss
ri = r_sdss - i_sdss
iz = i_sdss - z_sdss
# - DESI-1788v1 equations 4-6
g_decals = g_sdss + 0.01684 - 0.11169*gr
r_decals = r_sdss - 0.03587 - 0.14114*ri
z_decals = z_sdss - 0.00756 - 0.07692*iz
return g_decals, r_decals, z_decals
|
a0869eeb138c00eb30476dd34883eabe77066129
| 71,954
|
import glob
def load_games(src):
"""
Takes a search string and returns a list of PNG string objects.
Args:
src (str): A search string that can be passed directly to
``glob.glob`` to return a list of files to open. e.g.
``data/*.pgn``.
Returns:
list: A list of string representations of the PGN files.
"""
all_games = []
filename_list = glob.glob(src)
if len(filename_list) == 0:
return None
for filename in filename_list:
with open(filename, "r") as f:
all_games += [f.read()]
return all_games
|
4f909c901391362e51121f00d30866a8572abcdc
| 71,958
|
def pairs2map( pairs, key_col=0, value_col=1):
"""Converts a list of key value pairs to a dictionary.
@type pairs: array
@param pairs: an array of key value paris,
representing a map as a list of tuples.
@type key_col: mixed
@param key_col: the column that contains the key (default: 0)
@type value_col: mixed
@param value_col: the column that contains the value (default: 1)
@rtype: dictionary
@return: the key value pairs in pairs.
"""
m = {}
for p in pairs:
k = p[key_col]
m[ k ] = p[value_col]
return m
|
0e05988559669342eeaea3e68d3e6f329b6d36e8
| 71,963
|
from typing import Union
def isfloat(val: Union[int, float, str]):
"""
Checks if the contents of a string is a float.
:param val: String to parse
:return: bool
"""
try:
if isinstance(val, str):
val.strip()
float(val)
return True
except ValueError:
return False
|
075bf2227d3620904a038a5462b0ae9797ec6aa5
| 71,964
|
def intersection(*args):
"""intersection prend en paramatre un nombre fini de listes.
intersection fait l'intersection de listes python au sens des ensembles.
exemple :
intersection( [1,2,3], [2,3], [5,3,4]) == [3] #True
intersection( [ (1,2), (3,4) ], [ (1,2), (2,2)] ) == [(1,2)] #True
"""
# On utilise les fonctions intersection et intersection_de_2 récursivement
# pour faire faire l'intersection des n listes
def intersection_de_2(l1, l2):
"""Fait l'intersection de deux listes python au sens des ensembles."""
resultat = []
for i in l1:
if i in l2:
resultat.append(i)
return resultat
if len(args) == 1:
return args[0]
elif len(args) == 2:
return intersection_de_2(args[0], args[1])
return intersection(intersection_de_2(args[0], args[1]), *args[2:])
|
eacd713edb5487fddc765a5f720e3c62e8e60a57
| 71,969
|
def large_num_formatter(num, pos=None):
"""
Format large numbers using appropriate sufixes for powers of 1000
Parameters:
num: (int): The tick value to be formatted
pos: (int): Position of the ticker
"""
for unit in ['', 'mil', 'Mi.', 'Bi.']:
if abs(num) < 1000.0:
return "%3.1f %s" % (num, unit)
num /= 1000.0
return "%.1f %s" % (num, 'Tri.')
|
34e2918615d35365b78194eb34ab0b4aaacfb028
| 71,972
|
def _plot_ids(ax, x, y, size, marker='.', color=None, **kwargs):
"""Plot points, return the used color"""
# plot is faster than scatter, go figure...
return ax.plot(x, y, color=color, markersize=size, marker=marker,
linestyle='None', **kwargs)[0].get_color()
|
7e4dda2315005f576aff6d2d2746dd4dc3f8f807
| 71,983
|
def seginfo(segment_marker):
"""Convert segment marker to dictionary of segment info.
For example, b'\xff\xe1' is converted to a dictionary with these
key/value pairs: name: 'APP1', has_data: True, has_meta: True
has_data = whether this segment type has a data size in bytes 3-4 and
a data payload starting at byte 5; if False, then this
segment type is justa 2-byte segment marker (e.g., SOI/EOI)
has_meta = whether this segment type contains metadata to be
retrieved (i.e., Exif/JFIF/XMP/other metadata).
Most of these segment types are defined in Exif 2.3, and some
additional information on other segment types can be found here:
http://www.ozhiker.com/electronics/pjmt/jpeg_info/app_segments.html
"""
segdata = {b'\xff\x01':{'name':'ff01', 'has_data':False, 'has_meta':False},
b'\xff\xe0':{'name':'APP0', 'has_data':True, 'has_meta':True},
b'\xff\xe1':{'name':'APP1', 'has_data':True, 'has_meta':True},
b'\xff\xe2':{'name':'APP2', 'has_data':True, 'has_meta':True},
b'\xff\xe3':{'name':'APP3', 'has_data':True, 'has_meta':True},
b'\xff\xe4':{'name':'APP4', 'has_data':True, 'has_meta':True},
b'\xff\xe5':{'name':'APP5', 'has_data':True, 'has_meta':True},
b'\xff\xe6':{'name':'APP6', 'has_data':True, 'has_meta':True},
b'\xff\xe7':{'name':'APP7', 'has_data':True, 'has_meta':True},
b'\xff\xe8':{'name':'APP8', 'has_data':True, 'has_meta':True},
b'\xff\xe9':{'name':'APP9', 'has_data':True, 'has_meta':True},
b'\xff\xea':{'name':'APP10', 'has_data':True, 'has_meta':True},
b'\xff\xeb':{'name':'APP11', 'has_data':True, 'has_meta':True},
b'\xff\xec':{'name':'APP12', 'has_data':True, 'has_meta':True},
b'\xff\xed':{'name':'APP13', 'has_data':True, 'has_meta':True},
b'\xff\xee':{'name':'APP14', 'has_data':True, 'has_meta':True},
b'\xff\xef':{'name':'APP15', 'has_data':True, 'has_meta':True},
b'\xff\xfe':{'name':'COM', 'has_data':True, 'has_meta':False},
b'\xff\xc4':{'name':'DHT', 'has_data':True, 'has_meta':False},
b'\xff\xdb':{'name':'DQT', 'has_data':True, 'has_meta':False},
b'\xff\xdd':{'name':'DRI', 'has_data':True, 'has_meta':False},
b'\xff\xd9':{'name':'EOI', 'has_data':False, 'has_meta':False},
b'\xff\xd0':{'name':'RST0', 'has_data':False, 'has_meta':False},
b'\xff\xd1':{'name':'RST1', 'has_data':False, 'has_meta':False},
b'\xff\xd2':{'name':'RST2', 'has_data':False, 'has_meta':False},
b'\xff\xd3':{'name':'RST3', 'has_data':False, 'has_meta':False},
b'\xff\xd4':{'name':'RST4', 'has_data':False, 'has_meta':False},
b'\xff\xd5':{'name':'RST5', 'has_data':False, 'has_meta':False},
b'\xff\xd6':{'name':'RST6', 'has_data':False, 'has_meta':False},
b'\xff\xd7':{'name':'RST7', 'has_data':False, 'has_meta':False},
b'\xff\xc0':{'name':'SOF0', 'has_data':True, 'has_meta':False},
b'\xff\xc2':{'name':'SOF2', 'has_data':True, 'has_meta':False},
b'\xff\xd8':{'name':'SOI', 'has_data':False, 'has_meta':False},
b'\xff\xda':{'name':'SOS', 'has_data':True, 'has_meta':False}}
if segment_marker in segdata:
return segdata[segment_marker]
else:
# unknown segment marker
hexname = ''.join('{:02x}'.format(char) for char in segment_marker)
return {'name': hexname + '?', 'has_data': False, 'has_meta': False}
|
ff4f9778b9e0191eabfd52bb76042ee6d7c0215e
| 71,988
|
def _cat(fs, bundle_name):
"""Dump the contents of a bundle
:param fs: A Disdat file system handle.
:param bundle_name: The name of the bundle
:return: the presentable contents of the bundle, if such contents exists
"""
return fs.cat(bundle_name)
|
c038ff16adf752fe32fd49322e3477e0bbf663bc
| 71,989
|
def hex_distance(h1, h2):
"""Get hexagonal distance (manhattan distance) of two hexagon points
given by the hexagonal coordinates h1 and h2
Parameters
----------
h1 : int, int
Hexagonal coordinates of point 1.
h2 : int, int
Hexagonal coordinates of point 2.
Returns
-------
float
distance
"""
a1, b1 = h1
a2, b2 = h2
c1 = -a1-b1
c2 = -a2-b2
return (abs(a1 - a2) + abs(b1 - b2) + abs(c1 - c2)) / 2
|
d9ed78ef2d93acfb74e0b1c69f4b4cf79da96976
| 71,992
|
import math
def area_triangle_sss(side1, side2, side3):
"""Returns the area of a triangle, given the lengths of its three sides."""
# Use Heron's formula
s = (side1 + side2 + side3)/2.0
return math.sqrt(s * (s-side1) * (s-side2) * (s-side3))
|
b4f0ffd1646cbea000f446297cf3465d7ea9cd95
| 71,993
|
def get_diphone_wishlist(sentence_diphone_dict):
"""
get a wishlist of diphones in dictionary with diphone as key and number of occurrences as value
:param sentence_diphone_dict: a dictionary with sentence key and diphones in that sentence
:return: diphone_wishlist
"""
diphone_wishlist = {} # dictionary with diphones
for k, v in sentence_diphone_dict.items():
for type in v: # making the wishlist for diphones
if type in diphone_wishlist:
diphone_wishlist[type] += 1
else:
diphone_wishlist[type] = 1
return diphone_wishlist
|
e8712738d7699d1b3447119a48887289236d5113
| 71,994
|
import torch
def cosine_similarity(feature):
"""
Input:
feature: source points, [B, N, C]
Output:
dist: per-point cosine_similarity distance, [B, N, N]
"""
B, N, C = feature.shape
feat = torch.matmul(feature, feature.permute(0, 2, 1)) # [B, N, N]
norm = torch.sqrt(torch.sum(feature ** 2, -1)).view(B, N, 1)
norm = torch.matmul(norm, norm.permute(0, 2, 1))
res = torch.div(feat, norm)
return res
|
a32fb4a58633fc06a63b56bb3bfc88e9b859f05a
| 71,996
|
import math
def R_coat(delta_coat, k_coat, radius, Theta):
""" thermal resistance due to conduction through coating layer
Parameters
----------
k_coat: float
thermal conductivity of the coating in W/(mK)
delta_coat: float
thickness of the coating in m
radius: float
radius of drop in m
Theta: float
static contact angle in deg
Returns
----------
R_coat: float
thermal resistance due to conduction through coating layer in K/W
"""
R_coat = delta_coat / (k_coat*math.pi*radius**2*(math.sin(math.radians(Theta)))**2)
return R_coat
|
fe60610ae93b41342fc107d471aa6aeb58e07797
| 72,001
|
def common_vector_root(vec1, vec2):
"""
Return common root of the two vectors.
Args:
vec1 (list/tuple): First vector.
vec2 (list/tuple): Second vector.
Usage example::
>>> common_vector_root([1, 2, 3, 4, 5], [1, 2, 8, 9, 0])
[1, 2]
Returns:
list: Common part of two vectors or blank list.
"""
root = []
for v1, v2 in zip(vec1, vec2):
if v1 == v2:
root.append(v1)
else:
return root
return root
|
a272f2daaecdb2d79a803a1cf02fd0da5bb9e463
| 72,006
|
def get_channel_descriptions(model, channel_name=None):
"""
Returns a dictionary of channel descriptions for the given model
:param model: the model
:param channel_name: the channel name
:return: dictionary of results, or None
"""
if not channel_name:
return model.get_channel_descriptions()
else:
return model.get_channel_description(channel_name)
|
0018495b8d3275b3df8ea95c9b47a8a00c0617ac
| 72,010
|
import socket
def scan_server(address, port):
"""Checks for a reachable port on an IP/host"""
s = socket.socket()
try:
s.connect((address, port))
return True
except socket.error:
return False
|
8f484db99c5a49f105f449b6e0056589a57ac85f
| 72,013
|
import math
def distance(ref_point_1, ref_point_2):
"""
Calculates straight line distance between two points (2d or 3d (tm))
Args:
ref_point_1 (tm): Reference point 1
ref_point_2 (tm): Reference point 2
Returns:
float: distance between points 1 and 2
"""
try:
d = math.sqrt((ref_point_2[0] - ref_point_1[0])**2 + (ref_point_2[1] - ref_point_1[1])**2 + (ref_point_2[2] - ref_point_1[2])**2)
except:
d = math.sqrt((ref_point_2[0] - ref_point_1[0])**2 + (ref_point_2[1] - ref_point_1[1])**2)
return d
|
8a1d1881b2636a7686274675e78a8c15697af765
| 72,015
|
def encode_message(message, key = 4):
"""Encoding the message. Converts input string into a string containing
Unicode code points.
Parameters
----------
message : string
String to be converted to code points
key : int or float
Number needed to encrpyt the message to a specific code point
to later be accessed by another with the same key
Return
---------
encoded : string
Result string containing the code points for input string.
"""
encoded = ''
for character in message:
unicodeVal = ord(character)
unicodeVal = key+unicodeVal
toReturn = chr(unicodeVal)
encoded = encoded + toReturn
return encoded
|
da5c0696836917f5d0f3a680144e48e75a3f04ee
| 72,018
|
def bit_read(data, position):
"""Returns the value of a single bit in a number.
:param: data The number (bytearray) that contains the desired bit.
:param: position The position of the bit in the number.
:return: The bit value (True or False).
"""
byte_pos = int(position // 8) # byte position in number
bit_pos = int(position % 8) # bit position in byte
return bool((data[byte_pos] & (1 << bit_pos)) >> bit_pos)
|
1bc64d676bd9ee4786622d95141abd05a8266d8d
| 72,019
|
import torch
def arap_R(Si):
"""
Given the covariance matrix Si, computes the ARAP rotation for point Pi
Parameters
----------
Si : Tensor
the (3,3,) tensor represeting the covariance matrix of point Pi
Returns
-------
Tensor
a (3,3,) tensor representing the ARAP rotation matrix of point Pi
"""
U, _, V = torch.svd(Si)
return torch.matmul(V, torch.t(U))
|
7d266f16d6ce46ce04c8bcb3b074d089800423b5
| 72,021
|
def scalar_pass_through(value):
"""Pass-through method for single scalar values.
Parameters
----------
value: scalar
Scalar cell value from a data frame row.
Returns
-------
scalar
"""
return value
|
d561b5b5a00c57cdf0a325204a346b77bc5a3e96
| 72,022
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.