content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def devices_to_string(devices):
"""
Format device list to string
:param devices: list of devices
:type devices: int or str or list
:return: string of device list
:rtype: str
"""
if isinstance(devices, str):
return devices
if isinstance(devices, int):
devices = [devices]
return ', '.join(['cuda:{}'.format(d) for d in devices]) | 3b9879f7726fc3b42cda4e656689ff584dbc6719 | 46,132 |
def dict_encode(dict, encoding='cp1251'):
"""Encode dict values to encoding (default: cp1251)."""
encoded_dict = {}
for key in dict:
encoded_dict[key] = dict[key].encode(encoding)
return encoded_dict | adfc1f9217ff48d20d44df5af4220f58e73419f4 | 46,134 |
def convert_pyr_coeffs_to_pyr(pyr_coeffs):
"""this function takes a 'new pyramid' and returns the coefficients as a list
this is to enable backwards compatibility
Parameters
----------
pyr_coeffs : `dict`
The `pyr_coeffs` attribute of a `pyramid`.
Returns
-------
coeffs : `list`
list of `np.array`, which contains the pyramid coefficients in each band, in order from
bottom of the pyramid to top (going through the orientations in order)
highpass : `np.array` or None
either the residual highpass from the pyramid or, if that doesn't exist, None
lowpass : `np.array` or None
either the residual lowpass from the pyramid or, if that doesn't exist, None
"""
highpass = pyr_coeffs.pop('residual_highpass', None)
lowpass = pyr_coeffs.pop('residual_lowpass', None)
coeffs = [i[1] for i in sorted(pyr_coeffs.items(), key=lambda x: x[0])]
return coeffs, highpass, lowpass | 6b7ab3c4a6a85d05b7628cd7d31c46a62e549afc | 46,135 |
def is_ganache(ethereum_network):
"""true if we're using ganache"""
return not ethereum_network | e21a01190b92b95e2399542fef596d22db1baa5e | 46,136 |
def format_resp(number, digits=4):
"""
Formats a number according to the RESP format.
"""
format_string = "%%-10.%dE" % digits
return format_string % (number) | 6fe34c4431b9bfd7156e8e6062fdb35f909b1028 | 46,137 |
import torch
def relative_time_to_absolute(batch, relative_lens, rate):
"""Converts SpeechBrain style relative length to the absolute duration.
Operates on batch level.
Arguments
---------
batch : torch.tensor
Sequences to determine the duration for.
relative_lens : torch.tensor
The relative length of each sequence in batch. The longest sequence in
the batch needs to have relative length 1.0.
rate : float
The rate at which sequence elements occur in real-world time. Sample
rate, if batch is raw wavs (recommended) or 1/frame_shift if batch is
features. This has to have 1/s as the unit.
Returns
------:
torch.tensor
Duration of each sequence in seconds.
Example
-------
>>> batch = torch.ones(2, 16000)
>>> relative_lens = torch.tensor([3./4., 1.0])
>>> rate = 16000
>>> print(relative_time_to_absolute(batch, relative_lens, rate))
tensor([0.7500, 1.0000])
"""
max_len = batch.shape[1]
durations = torch.round(relative_lens * max_len) / rate
return durations | 88caf837d49dd46d14106e0df6011f22415cb73b | 46,140 |
def _escape(st):
"""Escape Strings for Dot exporter."""
return st.replace('"', '\\"') | 0fe5af78a9e8a36c918f040a197cf92624271d07 | 46,141 |
def get_good_qa_ints_to_keep_l2cloud():
"""
https://ecostress.jpl.nasa.gov/downloads/psd/ECOSTRESS_SDS_PSD_L2_ver1-1.pdf for qa flag bits
"""
cloud_mask_flag = ["0", "1"] # 1 is determined, 0 is not determined
cloud_flag = "0" # 0 is no 1 is yes, either bit 2 3 or 4 set
thermal_brightness_test = "0" # 0 is no for all these
thermal_diff_test_b45 = "0"
thermal_diff_test_b25 = "0"
landwater_flag = ["0", "1"] # 0 is land 1 is water
good_bin_strs_for_masking = [
landwater_flag[0] + thermal_diff_test_b25 + thermal_diff_test_b45 + thermal_brightness_test + cloud_flag + cloud_mask_flag[0],
landwater_flag[0] + thermal_diff_test_b25 + thermal_diff_test_b45 + thermal_brightness_test + cloud_flag + cloud_mask_flag[1],
landwater_flag[1] + thermal_diff_test_b25 + thermal_diff_test_b45 + thermal_brightness_test + cloud_flag + cloud_mask_flag[1],
landwater_flag[1] + thermal_diff_test_b25 + thermal_diff_test_b45 + thermal_brightness_test + cloud_flag + cloud_mask_flag[0]
]
good_ints_for_masking = []
for i in good_bin_strs_for_masking:
good_ints_for_masking.append(int(i, 2)) #converts base 2 binary string to int
return good_ints_for_masking | 8b992234604d1e2287760fad472303e2cf2a62d8 | 46,142 |
def multiply(x, y):
"""Multiplies two numbers. Equivalent to a * b but curried"""
return x * y | 21ddc96142998879669d30f6c03abba22dc0ba08 | 46,143 |
from typing import List
def find_dup(numbers: List) -> int:
"""
Solution: Iterate through the list and store items in a set. When an item is found that already exists in the set
return that item.
Complexity:
Time: O(n) - Iterate through our list once
Space: O(n) - We could potentially store each item found
"""
numbers_seen = set()
for num in numbers:
if num in numbers_seen:
return num
numbers_seen.add(num)
raise Exception("No duplicate found") | 94620fc5ceb565b417bcf1f0de6f6d7af23968ed | 46,144 |
import argparse
import os
def get_args():
"""Parses command line arguments.
Configures and runs argparse.ArgumentParser to extract command line
arguments.
Returns:
An argparse.Namespace containing the arguments parsed from the
command line
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--path",
default=os.path.abspath(os.getcwd()),
help="path to the terraform configuration to get outputs from")
return parser.parse_args() | 5b228c82aa6a0cf1f5bfc0cd10bd0c8d515b2c9c | 46,145 |
def conjugate(x: complex) -> complex:
"""
Returns the conjugate of a complex number
"""
return x.real - x.imag * 1j | 6cacee436e7ca74d586364a4f578b745c08768d2 | 46,146 |
import math
def radians(degrees):
"""
Converts degrees to radians, which is used in math functions.
"""
return math.pi / 180.0 * degrees | 9925b4f10b097ddad384c7ed34668a6127296bff | 46,147 |
def to_lower_camel(s: str) -> str:
"""
Convert a snake-case string into lower camel case.
"""
parts = s.split("_")
return parts[0] + "".join([p.title() for p in parts[1:]]) | d937d70a9a1914079a377edd2d8d3e3d0a78bcb5 | 46,148 |
def get_export_options(defaults=None):
"""code export related options
"""
if defaults is None:
defaults = {}
options = {
# ID of the model to generate a local model
'--model': {
'action': 'store',
'dest': 'model',
'default': defaults.get('model', None),
'help': ("ID of the model to generate a local model.")},
# ID of the ensemble to generate the local models in the ensemble
'--ensemble': {
'action': 'store',
'dest': 'ensemble',
'default': defaults.get('ensemble', None),
'help': ("ID of the ensemble to generate the local models in"
" the ensemble.")},
'--language': {
'action': 'store',
'dest': 'language',
'choices': ['python', 'javascript', 'tableau', 'mysql', 'r'],
'default': defaults.get('language', 'javascript'),
'help': ("Language to be used in code generation.")}}
# If a BigML logistic regression is provided, the script will
# use it to generate predictions
# '--logistic-regression': {
# 'action': 'store',
# 'dest': 'logistic_regression',
# 'default': defaults.get('logistic_regression', None),
# 'help': "BigML logistic regression Id."}
return options | c72288e49f673e42a44d1f4dc97bebbf2a636962 | 46,149 |
import logging
def _setup_text_logger(name, stream, level):
"""Setup a text logger."""
res = logging.getLogger(name)
handler = logging.StreamHandler(stream)
handler.setFormatter(logging.Formatter(fmt="%(message)s"))
res.addHandler(handler)
res.setLevel(level)
return res | 54373e2c7d5e9337a6dd7ed321eefd6fd2d55f96 | 46,150 |
def normalize_ipam_config_key(key):
"""Normalizes IPAM config keys returned by Docker API to match Ansible keys.
:param key: Docker API key
:type key: str
:return Ansible module key
:rtype str
"""
special_cases = {
'AuxiliaryAddresses': 'aux_addresses'
}
return special_cases.get(key, key.lower()) | 24c85ed68f284baf96fbc805b6217cca2e0d7221 | 46,151 |
import yaml
def fetch_mapping(filepath: str) -> dict:
"""Returns a dictionary from a YML file"""
with open(filepath, "r") as stream:
map = yaml.safe_load(stream)
return map | 8a228d6474e0d474eb7a441f068adc711926b433 | 46,152 |
def upcase(val: str) -> str:
"""Make all characters in a string upper case."""
return val.upper() | f96247fa0b46daca4bc35420d8b218c2ee22f25a | 46,154 |
from typing import Union
from pathlib import Path
import wave
def empty_wav(wav_path: Union[Path, str]) -> bool:
"""Check if a wav contains data"""
with wave.open(str(wav_path), 'rb') as wav_f:
return wav_f.getnframes() == 0 | 7ae5fa1a01138314ace682c636001b087aa61818 | 46,156 |
def _blockdevice_volume_from_datasetid(volumes, dataset_id):
"""
A helper to get the volume for a given dataset_id.
:param list volumes: The ``BlockDeviceVolume`` instances to inspect for a
match.
:param UUID dataset_id: The identifier of the dataset the volume of which
to find.
:return: Either a ``BlockDeviceVolume`` matching the given ``dataset_id``
or ``None`` if no such volume can be found.
"""
for volume in volumes:
if volume.dataset_id == dataset_id:
return volume | a520cf9795fa5cad5ff92dea8b378d2c43009419 | 46,157 |
def kebab_case_to_human(word):
"""Should NOT be used to unslugify as '-' are
also used to replace other special characters"""
if word:
return word.replace("-", " ").title() | 9d101d7cbddda8ca183d61002dc65613d1388517 | 46,158 |
import os
import logging
def get_l3energy(job, par, bls=0):
"""
Get the L3, single-point energies.
This is not object oriented.
"""
if bls:
key = par['barrierless_saddle_single_point_key']
else:
key = par['single_point_key']
if par['single_point_qc'] == 'molpro':
if os.path.exists('molpro/' + job + '.out'):
with open('molpro/' + job + '.out', 'r') as f:
lines = f.readlines()
for index, line in enumerate(reversed(lines)):
if ('SETTING ' + key) in line:
e = float(line.split()[3])
logging.info('L3 electronic energy for {} is {} Hartree.'.format(job, e))
return 1, e # energy was found
if par['single_point_qc'] == 'gaussian':
if os.path.exists('gaussian/' + job + '.log'):
gaussname = 'gaussian/' + job + '.log'
elif os.path.exists('gaussian/' + job + '_high.log'):
gaussname = 'gaussian/' + job + '_high.log'
elif os.path.exists('gaussian/' + job + '_well_high.log'):
gaussname = 'gaussian/' + job + '_well_high.log'
else:
logging.info('L3 for {} is missing.'.format(job))
return 0, -1 # job not yet started to run
with open(gaussname) as f:
lines = f.readlines()
for line in reversed(lines):
if (key) in line:
words = line.split()
wi = words.index(key) + 2
e = float(words[wi].replace('D', 'E'))
logging.info('L3 electronic energy for {} is {} Hartree.'.format(job, e))
return 1, e # energy was found
# if no file or no energy found
logging.info('L3 for {} is missing.'.format(job))
return 0, -1 | c142d4c91f06c77aeb085650ce76287a606af4ef | 46,159 |
import os
def count_files(root_folder):
"""
Count files in source folder.
:param root_folder:
:return:
"""
print('Contagem de arquivos ... Aguarde')
file_counter = 0
for root, dirs, files in os.walk(root_folder):
for _ in files:
file_counter += 1
print('Total de arquivos na busca: {}'.format(file_counter))
return file_counter | 1d25bef151839eb8dcae20a4d341c379496d8297 | 46,160 |
import torch
def batch_diag1(A):
"""
A :(t,d,d)
returns : (t,d)
"""
t = A.shape[0]
sollist = [torch.diag(A[i,:,:]) for i in range(t)]
result = torch.stack(sollist,dim=0)
return result | 1b3f136a937e9a6f6ad371c3fe0b00cbf7a5f3ef | 46,161 |
def parse_word_expression(expr):
"""
Parses a word expression such as "thermoelectric - PbTe + LiFePO4" into positive and negative words
:param expr: a string expression, with " +" and " -" strings separating the words in the expression.
:return: Returns a tuple of lists (positive, negative)
"""
last_word, i, is_positive = "", 0, True
positive, negative = [], []
while i < len(expr):
if expr[i:i+2] != " +" and expr[i:i+2] != " -":
last_word += expr[i]
else:
if last_word.strip():
positive.append(last_word.strip()) if is_positive else negative.append(last_word.strip())
is_positive, last_word, i = expr[i:i+2] == " +", "", i + 1
i += 1
if last_word.strip():
positive.append(last_word.strip()) if is_positive else negative.append(last_word.strip())
return positive, negative | 9872e05f7ce86f2973efec8c5e0d5306d718419a | 46,162 |
import re
def __modify_name_replace(file_name, string, replace_string, position):
"""
Core method to replace a string inside the base name of a file.
"""
file_newname = ""
if position == "any":
file_newname = file_name.replace(string, replace_string)
elif position == "prefix":
file_newname = re.sub("^" + string, replace_string, file_name)
elif position == "suffix":
file_newname = re.sub(string + "$", replace_string, file_name)
return file_newname | da6b5998cb712369bb7814e974ec4523f38cb65c | 46,163 |
import random
import string
def rand_string(count=12):
"""Return random string of length count with letters and numbers, mixed case. Uses Python randomness."""
return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(count)) | bc800528c6b45a7cc468ec2727ee1ce00136238d | 46,164 |
from typing import Union
from typing import Sequence
from typing import Tuple
def parse_vspace(
ncols: int,
vspace: Union[Sequence[int], bool] = False,
) -> Tuple[bool, Sequence[int]]:
"""
:param ncols:
:type ncols: int
:param vspace:
:return:
:rtype:
"""
if isinstance(vspace, Sequence):
add_vspace = True
else:
add_vspace = bool(vspace)
vspace = list(range(ncols))
return add_vspace, vspace | 3b9149e282260dbdb3c12c6a45407e3aa00ba9ee | 46,165 |
def setBits( lst ):
"""
set the bits in lst to 1.
also set bits 18, 20, 22, 24, and 28 to one (since they should always be set)
all other bits will be 0
"""
res = 0
for b in lst + [18,20,22,24,28]:
res |= (1 << b)
return res | 645509278a7f974c6da163935b3ee3e2e3724a06 | 46,168 |
def predicate(printer, ast):
"""Prints a predicate."""
return f'{printer.uppaal_c_printer.ast_to_string(ast["expr"])}' | bf46fe1b9d7b2dfd214e787ab1ae41b65fa621f3 | 46,170 |
import string
import random
def set_xrf():
"""
Create XRF key used to prevent cross site request forgery
"""
characters = string.ascii_letters + string.digits
return ''.join(random.sample(characters, 16)) | 9932f9d94b29ac8e27f1d9833621620aa662246c | 46,172 |
from typing import List
from typing import Callable
def create_matcher(queries: List[str], query_type: str) -> Callable[[str], bool]:
"""
Create a matcher for a list of queries
Parameters
----------
queries : list
List of queries
query_type: str
Type of query to run: ["or"|"and"]
Returns
-------
Matcher function
"""
queries = [query.lower() for query in queries]
if query_type == "or":
return lambda x: any((q in x.lower() for q in queries))
else:
return lambda x: all((q in x.lower() for q in queries)) | cc7cb37cab30728a70ac03446ed985ed0f71b9fc | 46,174 |
def reverseSentence(s):
"""i am a student. -> student. a am i"""
s = s[::-1]
sNums = s.split(' ')
for i in range(len(sNums)):
sNums[i] = sNums[i][::-1]
return " ".join(sNums) | 25e74f96182d94dd9031627e46d0a89235ffb796 | 46,175 |
async def format_embed_description(records: dict, format_str: str):
"""Formats an embed description to use.
Args:
records (dict): records to convert to description
format_str (str, optional): the format string to use;
do not pass a F-String!
Returns:
str: the formatted description
"""
return '```{}```'.format(
'\n'.join(
[
format_str.format(*record)
for record
in records.items()
]
)
) | 27ba27becb490bd5d2243db5be8f26133ad50caa | 46,176 |
def remove_suffix(s: str, suffix: str) -> str:
"""Remove the suffix from the string. I.e., str.removesuffix in Python 3.9."""
# suffix="" should not call s[:-0]
return s[: -len(suffix)] if suffix and s.endswith(suffix) else s | 17474d37726249dc84aa89f0861fe43db43bf1e9 | 46,177 |
import re
def split_camel_cased(text: str) -> str:
"""Split camelCased elements with a space.
Arguments:
text:
The text to be processed.
Returns:
The text with all camelCased elements split into different elements.
"""
return re.sub("(?!^)([A-Z][a-z]+)", r" \1", text) | 4ecc9e30bd50fb9898c23955917c953b3827b9a3 | 46,178 |
from bs4 import BeautifulSoup
import re
def default_db_connection(publish_settings):
""""Takes PublishSettings looks for Azure default db connection, returns default db connection string for local environment and SQL to add user to local db"""
username, password = '', ''
soup = BeautifulSoup(publish_settings, 'html.parser')
connections = soup.find_all('add')
regex = 'Database=(.+);Data Source=(.+);User Id=(.+);Password=(.+)'
db_connection = [conn for conn in connections if conn['name'] == 'defaultConnection'][0]
matches = re.search(regex, db_connection['connectionstring'])
if matches:
username = matches.group(3)
password = matches.group(4)
return username, password | 37351f7f41c6dfd037fd33c5bc94faf18fa40fcd | 46,179 |
import torch
import pathlib
def migrate_180826_models(f_folder: str, f_glob: str) -> int:
"""
inside <EXPERIMENT>/compressor/
provide f_glob -> '*.torch'
Converts the old *.torch files containing
'codebooks' to 'components'
"""
p_glob = pathlib.Path(f_folder).glob(f_glob)
old_key = '_decoder.codebooks'
new_key = '_decoder.components'
count = 0
for fname in [str(p) for p in p_glob]:
state_dict = torch.load(fname)
if old_key in state_dict:
state_dict[new_key] = state_dict[old_key]
del state_dict[old_key]
torch.save(state_dict, fname)
count += 1
assert new_key in state_dict
return count | 60acaec4cd9956d1ce09ae0b37c8d732ee832fcd | 46,181 |
import math
def triangle(v):
"""
Return a value corresponding to a triangle wave
"""
return (2 * abs(v - math.floor(v + (1/2)))) - 1 | 85b5824a5686b56a0ab2455d8aaba60ee0c45d6d | 46,182 |
def get_datatypes(xml_data_model, tag_name='nodeset', data_type='type', max_str=None):
"""
:param xml_data_model: data model list read from ODK config (read_odk_data_model)
:param tag_name: column name containing the tag names of the data model
:param data_type: column name containing the data type belonging to tag in data model
:param max_str: maximum string length used in tags in GIS file to check (can be limited to 10 in case of shapefile)
:return: check_keys: dictionary of key, value = tag name, datatype
check_json: same as check_keys but using a string representation of datatype
"""
check_keys = {}
check_json = {}
for data in xml_data_model: # skip first header line
if len(data.keys()) > 0:
try:
name = data.get(tag_name).split('/')[-1][0:max_str]
datatype = data.get(data_type)
if datatype in ['string', 'select1']: # '', 'date', 'dateTime'
dtype = 'str'
elif datatype in ['int']:
dtype = 'int'
elif datatype in ['float']:
dtype = 'float'
else:
dtype = None
if dtype is not None:
check_keys[name] = eval(dtype)
check_json[name] = dtype
except:
pass
return check_keys, check_json | 47f3ee6c1ab192ad87c23147458ae6d22ffd3c3c | 46,183 |
def filter_tagged_vocabulary(tagged_vocabulary, vocabulary, split="|"):
"""Filters tagged_vocabulary (tokens merged with tags) for tokens
occurring in vocabulary.
Parameters
----------
tagged_vocabulary : collection
vocabulary of tokens (can be merged with tags)
vocabulary : collection
target vocabulary of tokens without tags
split : str
string delimiting tags and tokens in tagged_vocabulary
"""
targets = set()
for tagged_word in tagged_vocabulary:
word, *tag = tagged_word.split(split)
if word in vocabulary:
targets.add(tagged_word)
return targets | a84ded5b44db2a4075591fd56847dc9529eefc7a | 46,184 |
def _f(X, y, clf):
"""Returns the flattened coefficients of a fitted classifier.
This function exists at the module level instead of as an anonymous or
subordinate function so that it is importable by `multiprocessing`.
Parameters
----------
X : array
Design matrix.
y : array
Response vector.
clf : sklearn.base.BaseEstimator
A scikit-learn estimator with `fit` and `predict` methods and a
`coef_` attribute.
Returns
-------
coef : array
The flattened coefficients of the fitted classifier.
"""
return clf.fit(X, y).coef_.flatten() | 209d60f77168d39b541e3b091a8d342ed8c7fead | 46,185 |
def load_languages(language, languages_dict):
"""loads the language from the string-dict loaded from the config
Args:
language (str): selectbox choice for the language model from the user
languages_dict (dict): dict containing to-evaluate strings of the language
Returns:
tfhub language or transformers language or whatlies language: language model to encode texts
"""
return eval(languages_dict[language]) | 3f7d945ab5154a44249755a067fc53d7ff7c4d9f | 46,187 |
def get_data_symptom(driver, options):
""" Returns record with selected symptom """
return {"symptom": options[1].text.strip()} | 828077ed1dd6bb230990e9499cf4ec7c82dcdb06 | 46,188 |
def inputthis(question='-> ', expected_tuple=('Y', 'N'), error='ERRO! Resposta inesperada!'):
"""
:param question: Input text
:param expected_tuple: Tuple containing all the options from wich the user should choose from
:param error: Error message for when the input isn't cointained in the tuple
:return: The user's answer
"""
while True:
x = str(input(question)).strip()
if x in expected_tuple:
return x
else:
print(error, '\n') | 36c3d47f0ba0c73d12a1323241e1d6173aecd621 | 46,189 |
def standardize_json_string(json_string):
"""
Replace " with ' if they occur within square brackets
eg {"key":"["Key":"value"]"} => {"key":"['Key':'value']"}
"""
inside_brackets_flag = False
standard_json_string = ""
for i in range(0, len(json_string)):
if json_string[i] == '[':
inside_brackets_flag = True
if json_string[i] == ']':
inside_brackets_flag = False
if inside_brackets_flag:
if json_string[i] == '\"':
standard_json_string += "\'"
else:
standard_json_string += json_string[i]
else:
standard_json_string += json_string[i]
# Note: json object cannot have python lists as keys
# standard_json_string \
# = standard_json_string.replace("\"[","[").replace("]\"","]")
return standard_json_string | cba3311fca294d39ee80666c989228d5b56db056 | 46,190 |
from typing import Any
import ctypes
def di(id_: int) -> Any:
"""
Hacky inverse for id
"""
return ctypes.cast(id_, ctypes.py_object).value | ea3fd2e6199e3505d9a6bd56ac5d18ff97ef9716 | 46,191 |
def get_default_model_settings():
"""Return some default settings for a DeepSphere model."""
model_settings = {"pretrained_model_name": None,
"model_name_prefix": None,
"model_name": None,
"model_name_suffix": None,
# Architecture options
# - ConvBlock options
"kernel_size_conv": 3,
"bias": True,
"batch_norm": False,
"batch_norm_before_activation": False,
"activation": True,
"activation_fun": 'relu',
# - Pooling options
"pool_method": "Max",
"kernel_size_pooling": 4, # half the resolution
# Convolution types
"conv_type": 'graph',
"graph_type": "knn",
"knn": 20,
# - Options for conv_type="image" when sampling="Equiangular
"periodic_padding": 'True',
}
return model_settings | 68a63a06edd30cbc737e1d11b862ded10b7add03 | 46,193 |
def _find_match(needle: dict, haystack: list, keys: list):
"""Find a dictionary in a list of dictionary based on a set of keys"""
for item in haystack:
for key in keys:
if item.get(key) != needle[key]:
break
else:
return item
return None | 4cc009583cd3238bba3b4e3da94257229ee37894 | 46,194 |
def _extract_params_from_i0(only_variable_parameters, parameters_with_variability, initial_conditions_with_variability):
"""
Used within the distance/cost function to create the current kinetic parameter and initial condition vectors
to be used during that interaction, using current values in i0.
This function takes i0 and complements it with additional information from variables that we do not want to vary
so the simulation function could be run and values compared.
:param only_variable_parameters: `i0` list returned from `make_i0`
:param param: list of starting values for kinetic parameters
:param vary: list to identify which values in `param` to vary during inference (0=fixed, 1=optimise)
:param initcond: list of starting values (i.e. at t0) for moments
:param varyic: list to identify which values in `initcond` to vary (0=fixed, 1=optimise)
:return:
"""
complete_params = []
counter = 0
for param, is_variable in parameters_with_variability:
# If param not variable, add it from param list
if not is_variable:
complete_params.append(param)
else:
# Otherwise add it from variable parameters list
complete_params.append(only_variable_parameters[counter])
counter += 1
complete_initial_conditions = []
for initial_condition, is_variable in initial_conditions_with_variability:
if not is_variable:
complete_initial_conditions.append(initial_condition)
else:
complete_initial_conditions.append(only_variable_parameters[counter])
counter += 1
return complete_params, complete_initial_conditions | bdda0afa34a1b825558bed632ae06e800f167fed | 46,195 |
def handler(mocker, aws_request_id):
"""
this fixture provides a way to call the function handlers so as
to insert a canned context (which is assumed by the logger setup)
"""
def _handler(func_module, event, context=None):
if context is None:
context = mocker.Mock(aws_request_id=aws_request_id)
else:
context.aws_request_id = aws_request_id
return getattr(func_module, "handler")(event, context)
return _handler | 55440de29bc5070ae111547db687c97be531a178 | 46,196 |
import os
def getOutputFileName(originalFileName, outputExtension, index=None):
"""
Return a filename which is the same as C{originalFileName} except for the
extension, which is replaced with C{outputExtension}.
For example, if C{originalFileName} is C{'/foo/bar.baz'} and
C{outputExtension} is C{'quux'}, the return value will be
C{'/foo/bar.quux'}.
@type originalFileName: C{str}
@type outputExtension: C{stR}
@param index: ignored, never passed.
@rtype: C{str}
"""
return os.path.splitext(originalFileName)[0]+outputExtension | f544f440c0cca050670aa3b70f935570129628f8 | 46,197 |
def classify(instruction):
"""Classify instruction.
Return name of instruction handler and arguments.
"""
if instruction in (0x00e0, 0x00ee):
return f"op_{instruction:04x}",
opcode = instruction >> 12
if 0 <= opcode <= 2:
return f"op_{opcode}nnn", instruction & 0x0fff
if 3 <= opcode <= 4:
x = instruction & 0x0f00
kk = instruction & 0x00ff
return f"op_{opcode}xkk", x >> 8, kk
if opcode == 5:
# if instruction & 0xf00f == 0x5000
if instruction & 0x000f == 0:
x = instruction & 0x0f00
y = instruction & 0x00f0
return "op_5xy0", x >> 8, y >> 4
if 6 <= opcode <= 7:
x = instruction & 0x0f00
kk = instruction & 0x00ff
return f"op_{opcode}xkk", x >> 8, kk
if opcode == 8:
function = instruction & 0x000f
x = instruction & 0x0f00
y = instruction & 0x00f0
if 0 <= function <= 7:
return f"op_8xy{function}", x >> 8, y >> 4
if function == 0xe:
return f"op_8xye", x >> 8, y >> 4
if opcode == 9:
if instruction & 0x000f == 0:
x = instruction & 0x0f00
y = instruction & 0x00f0
return "op_9xy0", x >> 8, y >> 4
if 0xa <= opcode <= 0xb:
return f"op_{opcode:1x}nnn", instruction & 0x0fff
if opcode == 0xc:
x = instruction & 0x0f00
kk = instruction & 0x00ff
return "op_cxkk", x >> 8, kk
if opcode == 0xd:
x = instruction & 0x0f00
y = instruction & 0x00f0
n = instruction & 0x000f
return "op_dxyn", x >> 8, y >> 4, n
if opcode == 0xe:
function = instruction & 0x00ff
x = instruction & 0x0f00
if function == 0x9e:
return "op_ex9e", x >> 8
if function == 0xa1:
return "op_exa1", x >> 8
if opcode == 0xf:
function = instruction & 0x00ff
if function in (0x07, 0x0a, 0x15, 0x18, 0x1e, 0x29, 0x33, 0x55, 0x65):
x = instruction & 0x0f00
return f"op_fx{function:02x}", x >> 8
return "", | 1e639c426221bda290ac1e8adfc03b36cab9af5c | 46,198 |
def get_cached_stickers(context, fuzzy=False):
"""Return cached search results from a previous inline query request."""
query_id = context.inline_query_id
cache = context.tg_context.bot_data[query_id]
if fuzzy:
results = cache["fuzzy"]
offset = context.fuzzy_offset
else:
results = cache["strict"]
offset = context.offset
return results[offset : offset + 50] | c3399ac727d4f412f35b4442390c10abc7eb1ad5 | 46,199 |
from typing import List
from typing import Tuple
def find_two_smallest(L:List[float]) -> Tuple[int, int]:
""" (see above) """
# Find the index of the minimum and remove that item
smallest = min(L)
min1 = L.index(smallest)
L.remove(smallest)
# Find the index of the new minimum item in the list
next_smallest = min(L)
min2 = L.index(next_smallest)
# Put smallest back into L
L.insert(min1, smallest)
# Fix min2 in case it was affected by the removal and reinsertion:
if min1 <= min2:
min2 +=1
return (min1, min2) | 6847fc028d01fa5539b3fd4a3e8de416089150ab | 46,201 |
def get_attribute(name):
""" Gets the value of the element's attribute named ``name``. """
def evaluator(element):
return element.attrib.get(name)
return evaluator | 8ffc06ae088f09284509d3a47ef85f32175385f3 | 46,202 |
import requests
def get_attd_as_json():
"""
We can fetch the attendance 4PM report via the
SODA api using the requests library, as the
SODA api is REST-ful. The data updates every
day, and includes previous data, so we only
need to fetch this on a daily basis after 4
in the afternoon.
:return request response object
"""
return requests.get('https://data.cityofnewyork.us/resource/uzy6-icxe.json') | 382b8fa9389450a5211e5d4ac8ee8a0fc2b11264 | 46,203 |
from typing import Counter
def unique_words_by_tag(tokens, tag_value='N/A'):
"""Return a counter of the tokens with the given tag value."""
nouns = []
for word, tag in tokens:
if tag.startswith(tag_value) or tag_value == 'N/A':
nouns.append(word.lower())
return Counter(nouns) | b56dc9dbcdcb25b8d81fe2ebc79784008d848d23 | 46,204 |
def total_per_person(single_plan):
"""Returns total cost per person
Parameter
single_plan: single plan from database
Returns: total cost per person"""
costs = single_plan.cost_set.all()
if costs:
total_cost = 0
for cost in costs:
cost_per_person = round(cost.cost / cost.number_of_members, 2)
total_cost += cost_per_person
return total_cost
else:
return 0 | 0019267cfa754e4a64631b812f026fcd8534f8b8 | 46,205 |
def writeable(doc):
"""
Return a writeable tuple for doc.
writeable tuple is any 2-tuple of `output_path`, `bytes`.
`lettersmith.write` knows how to write these tuples to disk.
"""
return doc.output_path, doc.content.encode() | 186580eee94dc537968b3c6edac1f5a755f858ed | 46,207 |
def coco_valid_joints(jointsx, jointsy, jointsv, dims):
"""
coco_valid_joints: Function to decide which joints are valid. Each dataset
has different criteria, so can't have a single function. Boo.
jointsx: ndarray joints x cood
jointsy: ndarray joints y coord
dims: (width, height)
"""
assert(jointsx.shape==jointsy.shape)
assert(jointsx.shape==jointsv.shape)
val_joints=(jointsv>0)
# This all seems unnecessary because no images "break the rules"
# Can keep to check in future if new data added
#inval_joints=(jointsv==0)
#zerox=jointsx[inval_joints]
#zeroy=jointsy[inval_joints]
#if (zerox|zeroy).any():
# print('INVALID BUT NONZERO X/Y')
# print(jointsx)
# print(jointsy)
# print(jointsv)
#if (jointsx>=dims[0]).any():
# print(f'BAD X BIG {index} {dims[0]}')
# print(dims)
# print(jointsx)
# print(jointsy)
#if (jointsx<0).any():
# print(f'BAD X SMALL {index} {dims[0]}')
# print(dims)
# print(jointsx)
# print(jointsy)
#if (jointsy>=dims[1]).any():
# print(f'BAD Y BIG {index} {dims[1]}')
# print(dims)
# print(jointsx)
# print(jointsy)
#if (jointsy<0).any():
# print(f'BAD Y SMALL {index} {dims[1]}')
# print(dims)
# print(jointsx)
# print(jointsy)
return val_joints | 5bd20838c4b309b854cf0c93da68929f8f4226b3 | 46,208 |
def fsm_submit_button(transition):
"""
Render a submit button that requests an fsm state transition for a
single state.
"""
fsm_field_name, button_value, transition_name = transition
return {
'button_value': button_value,
'fsm_field_name': fsm_field_name,
'transition_name': transition_name,
} | e4fedef1a489dd96e573027639f0eec2cbf13711 | 46,209 |
def get_attrs():
"""get attrs."""
attrs = {
"enable_feature_library": True
}
return attrs | f5bd40f44f3565e421cb2c72cebbed1982ad2b41 | 46,211 |
def pointOnTrianglePlane(P, A, B, C):
"""
Projects the point P onto the plane containing traingle A, B, C
"""
N = (B - A).cross(C - A).normalize()
return P + (-(P - A).dot(N))*N | c70c02c31905208fa8a0942df7a12d33316f0afa | 46,212 |
def clean(code):
"""
Removes characters other than eight language commands.
"""
return filter(lambda x: x in ['.', ',', '[', ']', '<', '>', '+', '-'], code) | 8eeed2e5948a001639c1b935da196f30a156b9da | 46,213 |
def get_analysis_type(normal, umi):
""" return analysis type """
return "paired" if normal else "single" | d0da541419c2d7c3509d29e256431639cb20e87b | 46,214 |
import argparse
import os
def get_cleaner_ArgumentParser(clean_func):
"""Get an ArgumentParser instance for clean up functions.
Parameters
----------
clean_func : str
The name of the cleaner function to get arguments for. Must be one of:
"wrapper", "output", "logs".
Returns
-------
ap : ArgumentParser instance
A parser with the relevant options.
Raises
------
AssertionError
This is raised if `clean_func` is not a valid option.
"""
ap = argparse.ArgumentParser()
# check that function specified is a valid option
functions = ["wrapper", "output", "logs"]
if clean_func not in functions:
raise AssertionError("clean_func must be one of {}".format(",".join(functions)))
# choose options based on script name
if clean_func == "wrapper":
ap.prog = "clean_wrapper_scripts.py"
ap.add_argument(
"directory",
type=str,
nargs="?",
default=os.getcwd(),
help="Directory where wrapper files reside. Defaults to current directory.",
)
elif clean_func == "output":
ap.prog = "clean_output_files.py"
ap.add_argument(
"directory",
type=str,
nargs="?",
default=os.getcwd(),
help="Directory where output files reside. Defaults to current directory.",
)
elif clean_func == "logs":
ap.prog = "consolidate_logs.py"
ap.add_argument(
"directory",
type=str,
nargs="?",
default=os.getcwd(),
help="Directory where log files reside. Defaults to current directory.",
)
ap.add_argument(
"-o",
"--output",
default="mf.log",
type=str,
help="Name of output file. Default is 'mf.log'",
)
ap.add_argument(
"--overwrite",
action="store_true",
default=False,
help="Option to overwrite output file if it already exists.",
)
ap.add_argument(
"--save_original",
action="store_false",
dest="remove_original",
default=True,
help="Save original log files once combined in output.",
)
ap.add_argument(
"-z",
"--zip",
action="store_true",
default=False,
help="Option to zip resulting output file.",
)
return ap | 78a49bc36dbe75b8fbde50e933d007d30f5f1ce0 | 46,216 |
def list_to_ol(input_list):
"""
Creates a html list based on input list
@param input_list: list of items
@return: html for list of items
"""
return '<ol>{}</ol>'.format('\n'.join(['<li>%s</li>' % cell_type for cell_type in input_list])) | c0c59f07ab65760911977aa10d408b7e1dd6b59f | 46,217 |
from typing import Dict
from typing import Tuple
from typing import List
from typing import Any
def _traverse_foreign_key_tree(
tree: Dict[str, Dict[Tuple[str, ...], dict]], name: str, fields: Tuple[str, ...]
) -> List[Dict[str, Any]]:
"""Traverse foreign key tree.
Args:
tree: Foreign key tree (see :func:`_build_foreign_key_tree`).
name: Local resource name.
fields: Local resource fields.
Returns:
Sequence of foreign keys starting from `name` and `fields`:
* `fields` (List[str]): Local fields.
* `reference['resource']` (str): Reference resource name.
* `reference['fields']` (List[str]): Reference primary key fields.
"""
keys = []
if name not in tree or fields not in tree[name]:
return keys
ref = tree[name][fields]
keys.append({"fields": list(fields), "reference": ref})
if ref["resource"] not in tree:
return keys
for next_fields in tree[ref["resource"]]:
if set(next_fields) <= set(ref["fields"]):
for key in _traverse_foreign_key_tree(tree, ref["resource"], next_fields):
mapped_fields = [
fields[ref["fields"].index(field)] for field in key["fields"]
]
keys.append({"fields": mapped_fields, "reference": key["reference"]})
return keys | 37bafd03fed849031939159fab03bf5708ebffb7 | 46,219 |
from typing import List
from typing import Dict
def generate_currency_endowments(
agent_addresses: List[str], currency_ids: List[str], money_endowment: int
) -> Dict[str, Dict[str, int]]:
"""
Compute the initial money amounts for each agent.
:param agent_addresses: addresses of the agents.
:param currency_ids: the currency ids.
:param money_endowment: money endowment per agent.
:return: the nested dict of currency endowments
"""
currency_endowment = {currency_id: money_endowment for currency_id in currency_ids}
return {agent_addr: currency_endowment for agent_addr in agent_addresses} | 0dbf36a09c88eb3cdb1278862bdb37b2927fcec0 | 46,220 |
def key_to_hump(key):
"""
将变量转化为驼峰命名
:return: 返回驼峰命名变量
"""
return key.lower().title().replace("_", "") | b495a3c1d4a219134122a0c35c11eea0c485bac9 | 46,221 |
def constrained_domain(**namespace):
""" Returns e.g. periodic domain. """
return None | 7a5ee9981629ef159919f72f01d02314e91c170d | 46,222 |
def sort_list_by_other(to_sort, other, reverse=True):
"""Sort a list by an other."""
return [el for _, el in sorted(zip(other, to_sort), reverse=reverse)] | 9672bd28349fe0f3cc0d8d122f59724965d53f35 | 46,224 |
import plistlib
def uid_convert(obj):
"""To JSON uid convertor"""
if isinstance(obj, plistlib.UID):
return "UID(" + str(obj.data) + ")"
# return obj | ecdc8a0100d990809601636ded3d7368a423aba7 | 46,225 |
import random
def _uniq_id():
"""
Create a random 64-bit signed integer appropriate
for use as trace and span IDs.
XXX: By experimentation zipkin has trouble recording traces with ids
larger than (2 ** 56) - 1
"""
return random.randint(0, (2 ** 56) - 1) | 5e9b48fd15e07777f09ed21cbe53fec83dc3af50 | 46,227 |
def update_analyze_button(disabled):
"""
Updates the color of the analyze button depending on
its disabled status
:param disabled: if the button is disabled
"""
if not disabled:
style = {"width": "100%", "text-transform": "uppercase",
"font-weight": "700", "background": "green", "outline": "green"}
else:
style = {"width": "100%", "text-transform": "uppercase",
"font-weight": "700"}
return style | 5a0056879fd5ecde05b7b4fbadbeb7e3aeb1d679 | 46,228 |
import json
def add_environment_endpoint(app, config):
"""
Create a /_environ endpoint.
This creates a /_environ endpoint where expose some variables used in the
application to verify if they are ok.
:param app:
:param config:
:return: A json file that expose the configuration variables loaded on
the environment.
"""
@app.route("/_environ", methods=["GET"])
def environment(): # pylint: disable=unused-variable
"""
Create a /_environ endpoint.
This endpoint will expose some variables used in the application
to verify if they are ok.
:param app:
:param config:
:return: A json file that expose the configuration variables loaded
on the environment.
"""
return json.dumps({"app": {"environ": config.to_json(config)}}) | d492094cd2941a81f19062bf15e66081c797af47 | 46,229 |
from typing import Tuple
from pathlib import Path
import subprocess
def walksat(input_dimacs: str,
solver_exe: str = "binary/walksat_linux",
) -> Tuple[bool, list, float]:
"""
WalkSAT v56 (https://gitlab.com/HenryKautz/Walksat)
:param input_dimacs: Correctly formatted DIMACS file as string
:param solver_exe: Absolute or relative path to solver executable
:return: returns True if formula is satisfiable and False otherwise, solutions in form [1,2,-3, ...] and time
"""
exe_path = Path(solver_exe).resolve()
output = subprocess.run([str(exe_path), "-solcnf", "-gsat", "-cutoff", "500K"], input=input_dimacs,
stdout=subprocess.PIPE, universal_newlines=True)
if output.returncode != 0:
raise RuntimeError("WalkSAT: Unexpected return code ", output.returncode)
result = output.stdout.strip()
result = result.split("\n")
time_elapsed = [x for x in result if x.startswith("total elapsed seconds")][0]
time_elapsed = time_elapsed.strip().split(" = ")[-1]
time_elapsed = float(time_elapsed)
sat = [x for x in result if x == "ASSIGNMENT FOUND"]
if len(sat) > 1:
raise RuntimeError("Only one ASSIGNMENT FOUND should be present in output")
sat = bool(sat)
if not sat:
return sat, [], time_elapsed
solution = [x.strip().split(" ") for x in result if x.startswith("v ")]
solution = [int(x[-1]) for x in solution]
return sat, solution, time_elapsed | c349cc67b674e1373ff6a47fb01a08d276aea9a8 | 46,230 |
def format_id(ident):
"""
Convert a message ID to its canonical string form
"""
return '%016X' % ident | 633ce965ca74ff3dac2b7bf9910186c88e892111 | 46,232 |
import sys
import imp
def main_is_frozen():
"""
Returns whether or not it is frozen in an executable
#http://www.py2exe.org/index.cgi/HowToDetermineIfRunningFromExe
:rtype : bool
"""
return (hasattr(sys, "frozen") or # new py2exe
hasattr(sys, "importers") # old py2exe
or imp.is_frozen("__main__")) | 0f68a07a0de3254ac8d3941ee04587ed7babeafe | 46,233 |
def catch_all(path):
"""
Redirect arbitrary pages to homepage.
"""
return "we gotta help the pope" | cd1e76ac2c8bbaea724f7f96a8d7c538b22fb282 | 46,234 |
def read_classes(classes_path):
"""Reads classes from file.
Args:
classes_path (str):
Path to file containing names of all classes.
Returns:
list: List containing names of all classes.
"""
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names | fcfcd3f7391e096fd71a7091eb33e4a9c8a21581 | 46,235 |
import torch
def generate_sparse_one_hot(num_ents, dtype=torch.float32):
""" Creates a two-dimensional sparse tensor with ones along the diagnoal as one-hot encoding. """
diag_size = num_ents
diag_range = list(range(num_ents))
diag_range = torch.tensor(diag_range)
return torch.sparse_coo_tensor(
indices=torch.vstack([diag_range, diag_range]),
values=torch.ones(diag_size, dtype=dtype),
size=(diag_size, diag_size)) | 3a71c72acfca4fe9fbfe1a6848519e9726fe72d9 | 46,237 |
def __is_prefix(pref, word):
"""
SUMMARY
returns whether a string is prefix of another
PARAMETERS
pref: the string we check whether is prefix
word: the string we search the prefix in
RETURNS
boolean: True if <pref> is prefix of <word> otherwise False
"""
if word.find(pref) == 0:
return True
else:
return False | 4480f1efad3ba1eb8d5ae4ca7ebf369a336ca481 | 46,238 |
from typing import Optional
from typing import List
from typing import Dict
def generate_info_json(
prev_photo: Optional[str], next_photo: Optional[str], exif: List
) -> Dict:
"""Generate the content of the story metadata file."""
data = {
"title": {"en": "", "fi": "", "fr": ""},
"description": {"en": "", "fi": "", "fr": ""},
"story": "",
"dateTaken": exif[0].isoformat(),
"focalLength35mm": exif[1],
"exposureTime": exif[2],
"fNumber": exif[3],
"iso": exif[4],
}
if prev_photo is not None:
data["prev"] = int(prev_photo)
if next_photo is not None:
data["next"] = int(next_photo)
return data | b0ef5e1a78966014003311c87328d1c96a684cce | 46,239 |
def catGram(mp, categories):
""" Selects the grammatical categories of the minimal pairs """
pairs = mp[mp.cgram_1.isin(categories) & mp.cgram_2.isin(categories)]
return pairs | 761b93eb9e10695259697ca409f53944acd93da5 | 46,240 |
def splev_wikipedia(k: int, x: int, t, c, p: int):
"""Evaluates S(x).
Arguments
---------
k: Index of knot interval that contains x.
x: Position.
t: Array of knot positions, needs to be padded as described above.
c: Array of control points.
We will look at c[k-p .. k]
p: Degree of B-spline.
"""
# make sure I never reference c out of bounds
if k-p < 0: raise Exception("c referenced out of min bounds")
if k >= len(c): raise Exception("c referenced out of max bounds")
d = [c[j + k - p] for j in range(0, p+1)]
for r in range(1, p+1):
for j in range(p, r-1, -1):
alpha = (x - t[j+k-p]) / (t[j+1+k-r] - t[j+k-p])
d[j] = (1 - alpha) * d[j-1] + alpha * d[j]
return d[p] | 9a12eabc1cc64b8e1829d1af2f3866ae3aa00bf7 | 46,243 |
def merge_sort(arr):
""" Implementation of a merge_sort algorithm using recursion.
"""
def _merge(a, b):
""" This will be recalled recursevly.
Takes in two pre-sorted arrays and returns a single sorted array.
"""
i = 0
j = 0
k = 0
r = [[] for _ in range(len(a)+len(b))]
while i < len(a) and j < len(b):
# import pdb; pdb.set_trace()
if b[j] < a[i]:
r[k] = b[j]
j += 1
else:
r[k] = a[i]
i += 1
k += 1
while i < len(a):
r[k] = a[i]
k += 1
i += 1
while j < len(b):
r[k] = b[j]
k += 1
j += 1
return r
# end _merge helper function
if len(arr) > 1:
mid = len(arr) // 2
a = merge_sort(arr[:mid])
b = merge_sort(arr[mid:])
return _merge(a, b)
return arr | 12aee6e95346e4ac291edbdcec7e11c69ed2c154 | 46,245 |
import os
def get_folder_path(folder_name):
"""
Get path for temp folder with folder name.
"""
folder_path = os.path.join(str("."), str(folder_name))
if not os.path.exists(folder_path):
os.mkdir(folder_path)
return folder_path | 45f7e380727fd02f5d544f9b1d0105c5e9ff66d7 | 46,248 |
def split_string_by_fields(string, fields):
"""
Helper function to split a string by a set of ordered strings,
primarily used for Maccor metadata parsing.
>>>split_string_by_fields("first name: Joey last name Montoya",
>>> ["first name:", "last name"])
["Joey", "Montoya"]
Args:
string (str): string input to be split
fields (list): list of fields to split input string by.
Returns:
list: substrings corresponding to the split input strings.
"""
# A bit brittle, there's probably something more clever with recursion
substrings = []
init, leftovers = string.split(fields[0])
for field in fields[1:]:
init, leftovers = leftovers.split(field)
substrings.append(init)
substrings.append(leftovers)
return substrings | 7722f5fd80a581a113c18495d2ad21d1af41748d | 46,250 |
def footer(id1, id2 = None):
"""
Build SMT formula footer
Args:
id1 (str): ID of policy 1 in SMT formula
id2 (str, optional): ID of policy 2 in SMT formula. Defaults to None.
Returns:
str: SMT footer
"""
smt = '(assert {}.allows)\n'.format(id1)
if id2:
smt += '(assert (or {0}.denies {0}.neutral))\n'.format(id2)
smt += '(check-sat)\n'
smt += '(get-model)\n'
return smt | c9af2ad7453282e1611e7b2010f4269ca8ac0bc0 | 46,252 |
def area_square(length):
"""
Calculates the area of a square.
Parameters
----------
length (float or int) length of one side of a square
Returns
-------
area (float) - area of the square
"""
return length ** 2 | 122ce824bc47651aa56f424618b1b18c7fd0aa19 | 46,253 |
def select(func):
"""
select :: (a -> Bool) -> [a] -> [a]
Grab elements based on a filter function
Similar to builtins.filter()
"""
def imap(data):
if not isinstance(data, list):
return list(filter(func, [data]))
return list(filter(func, data))
return imap | 119dfc50027c329d64f23f33ad1984076d73da87 | 46,254 |
def create_final_conv(image_sizes, h_out = 1, w_out = 1):
"""
This function generates a convolution with h,w = 1. Used the formula from
here :https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html#torch.nn.Conv2d
----------
image_sizes : the h,w of the output of each convolution layer in a list
"""
h_prev = image_sizes[-1][0]
w_prev = image_sizes[-1][1]
padding = 0
stride = h_prev // h_out
kernel = h_prev - stride * (h_out - 1)
# returning 1 for h_out, w_out
return h_out, w_out, padding, kernel, stride | f8dd86db1d6449d3e948cc8bde7fb7e8dca27f0b | 46,255 |
def calculate_time(cents_per_kWh, wattage, dollar_amount):
""" Returns the time (in hours) that it would take to reach the monetary price (dollar_amount)
given the cost of energy usage (cents_per_kWh) and power (wattage) of a device using that energy.
"""
return 1 / (cents_per_kWh) * 1e5 * dollar_amount / wattage | 86c261d8b72089d39935468dd349eef8e9611cdd | 46,257 |
def flatten(filenames):
"""Takes a list which may contain other lists and returns a single,
flattened list
Args:
filenames (list): list of filenames
Returns:
flattened list of filenames
"""
flat_filenames = [file for i in filenames for file in i]
return flat_filenames | 5a81b0c3d9395142c4991052cf494226b3516bc3 | 46,258 |
def vals_vec_from_lmfit(lmfit_params):
"""Return Python list of parameter values from LMFIT Parameters object."""
vals = [value.value for value in lmfit_params.values()]
return vals | 6fac3ac8dd364dca3ae19a6cac36be072e32fdb7 | 46,259 |
def _get_yaml_path(path, parameter):
"""Compose the parameter path following the YAML Path standard.
Standard: https://github.com/wwkimball/yamlpath/wiki/Segments-of-a-YAML-Path#yaml-path-standard
"""
yaml_path = []
if path:
yaml_path.extend(path)
if parameter:
yaml_path.append(parameter)
return ".".join(yaml_path) | 7b4c8807cbc8a030ad2b541bb21dc59e3204324f | 46,260 |
import os
import warnings
def filter_valid_images(df, img_directory, x_col, img_format):
"""Keep only dataframe rows with valid filenames
# Arguments
df: Pandas dataframe containing filenames in a column
x_col: string, column in `df` that contains the filenames or filepaths
# Returns
absolute paths to image files
"""
filepaths = df[x_col].map(lambda fname: os.path.join(img_directory, f'{fname}.{img_format}'))
# mask = filepaths.apply(lambda x: os.path.isfile(x) and is_valid_image(x)) # Commented for performance reasons
mask = filepaths.apply(lambda x: os.path.isfile(x))
n_invalid = (~mask).sum()
if n_invalid:
warnings.warn(f'Found {n_invalid} invalid image filename(s) in x_col="{x_col}".'
f' These filename(s) will be ignored.')
return df[mask] | df3f6e4c2d9f1edd58d58b4808ebe634e93ffd71 | 46,261 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.