content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def xf_screenname(name):
"""Insure user screen name is prefixed with '@'."""
return '@' + name if name[0] != '@' else name
|
6229a22907c4b1c11b75f5cd0218e056d46111eb
| 14,343
|
import ast
def p_pyatom_dict(p, kv_pairs=2): # [key: value, ...], [:]
"""pyatom : LBRACKET dictents RBRACKET
pyatom : LBRACKET COLON RBRACKET"""
if kv_pairs != ':':
keys, values = map(list, zip(*kv_pairs))
else:
keys, values = [], []
return lambda: ast.Dict(keys, values)
|
81a50745298106c9919c080629851fb57b5f1d1f
| 14,344
|
import torch
def separation_loss_p(x, delta):
"""Computes the separation loss.
Args:
xyz: [batch, num_kp, 3] Input keypoints.
delta: A separation threshold. Incur 0 cost if the distance >= delta.
Returns:
The seperation loss.
"""
bs = x.size(0)
num_kp_p = x.size(1)
t1_p = x.repeat(1, num_kp_p, 1)
t2_p = x.repeat(1, 1, num_kp_p).view(t1_p.size())
diffsq_p = (t1_p - t2_p) ** 2
# -> [batch, num_kp ^ 2]
lensqr_p = torch.sum(diffsq_p, dim=2)
return torch.sum(torch.max(delta-lensqr_p, torch.zeros(lensqr_p.size()).float().cuda())) / (float(num_kp_p * bs * 2))
|
f277c84ecd14fe50b967937bb8b4f8d5f2b98118
| 14,347
|
def scan_object( uri, resource, partial_path ):
"""
Scans an object for a matching URI
Args:
uri: The URI to find
resource: The object to scan
partial_path: An array of strings containing the path discovered for the current resource
Returns:
True if there's a match; False otherwise
"""
# Scan the current object
for property, value in resource.items():
# If there is an @odata.id property and it matches the URI, we're done
if property == "@odata.id":
if value == uri:
return True
# Skip properties known to not contain subordinate resources
skipped_properties = [ "Links", "PoweredBy", "CooledBy", "RelatedItem", "OriginOfCondition", "MaintenanceWindowResource", "RedundancySet", "OriginResources" ]
if property in skipped_properties:
continue
# If the property is an object, check if it's a match
if type( value ) is dict:
partial_path.append( property )
if scan_object( uri, value, partial_path ):
return True
# No match; keep going
del partial_path[-1]
# If the property is an array, check if it contains objects and if there is a match within the object
if type( value ) is list:
partial_path.append( property )
for array_value in value:
if type( array_value ) is dict:
if scan_object( uri, array_value, partial_path ):
return True
# No match; keep going
del partial_path[-1]
# No matches
return False
|
f52d754b38da43ef345596353ae96009d9bf89fb
| 14,349
|
import os
def get_editor(repo):
"""
Returns the editor from env vars.
"""
return (repo.git.config("core.editor") or
os.environ.get("GIT_EDITOR") or
os.environ.get("VISUAL") or
os.environ.get("EDITOR", "vi"))
|
af29d1e52afea67d90460c0727a742b7d8146a78
| 14,351
|
def json_return(code, message = '', data ={}):
"""
將資料轉換成dict作為API回傳的統一格式
:param code:
:param message:
:param data:
:return:
"""
return {'data': data, 'msg':message, 'code': code}
|
70134c28c54d910c634c0ffd14b2416072ec730a
| 14,352
|
def expected(player1, player2):
"""
Calculate expected score of player1 vs player2 given elo rankings.
Args:
player1 (BaseAgent): Agent for whom the score is being calculated.
player2 (BaseAgent): Agent against whom player1 played.
Returns:
The expected score of the matchup.
"""
return 1 / (1 + 10 ** ((player2.elo - player1.elo) / 400))
|
90551b04b15ce62a1d2c5d7022d9402b3efdba60
| 14,354
|
from warnings import warn
from functools import reduce
def rreduce(fn, seq, default=None):
"""'readable reduce' - More readable version of reduce with arrity-based dispatch; passes keyword arguments
to functools.reduce"""
warn(DeprecationWarning(
"rreduce is deprecated and will be removed in future versions"))
# if two arguments
if default is None:
return reduce(fn, seq)
# if three arguments
return reduce(fn, seq, default)
|
bb82cdf712c87885103d61f7c37fa588e8bdac6b
| 14,355
|
def get_predictors(results):
"""Get predictors for logistic regression."""
assert len(results) == 200
y = []
x = []
for (_, trial1), (_, trial2) in zip(results.iloc[:-1].iterrows(), results.iloc[1:].iterrows()):
transition = 2 * int(trial1.common) - 1
reward = 2 * trial1.reward - 1
x.append([1, reward, transition, reward * transition])
y.append(int(trial1.choice == trial2.choice))
return x, y
|
240a0d32221e32eda58b86568b5975cdcdd3c475
| 14,357
|
import re
def check_password_format(password):
"""校验密码格式
Args:
password (str): 密码
Returns:
boolean: 格式符合返回True,格式错误返回False
"""
pattern = re.compile('\w{8,16}')
if pattern.match(password):
if re.search('[0-9]', password) and re.search('[a-z]', password):
return True
return False
|
b0923972f22b08298df8aa9c83314787ebefb8a8
| 14,358
|
def corpus(request):
"""
A utility fixture to merely execute the actual fixture logic as necessary.
Args:
request: The pytest indirect request object, which has a param object
for the underlying fixture argument.
Returns:
The value of the execution of the corpus fixture.
"""
return request.param()
|
3c6986296a17145bc3b40cc84e2b62acb5c8f00c
| 14,359
|
from math import floor, log10
def findSizeInt(number):
"""
#EN-US:
→ Calculates the number of digits in a number.
:param number: the number to be calculated.
:return: the number of digits of the number entered.
#PT-BR:
→ Calcula a quantidade de dígitos em um número.
:param number: o número a ser calculado.
:return: a quantidade de dígitos do número informado.
"""
number = abs(int(number))
return 1 if number == 0 else floor(log10(number)) + 1
|
8b174183520337f31f17bfb4163d5ed5ff90e896
| 14,360
|
import operator
def _get_operator(comp_str):
"""Returns the operator function corresponding to the given comparison.
Args:
comp_str: str. One of: '<', '<=', '=', '>=', '>'.
Returns:
callable. The binary operator corresponding to the comparison.
Raises:
ValueError. The comparison is not supported.
"""
if comp_str == '<':
return operator.lt
elif comp_str == '<=':
return operator.le
elif comp_str == '=':
return operator.eq
elif comp_str == '>=':
return operator.ge
elif comp_str == '>':
return operator.gt
else:
raise ValueError('Unsupported comparison operator: %s' % comp_str)
|
123ca1e2be8abf81387fb5d2ffa559082116b959
| 14,361
|
def clean_text_simple(string):
"""
Remove all \x19 from the string formatted with simple colors:
\x198
"""
pos = string.find('\x19')
while pos != -1:
string = string[:pos] + string[pos+2:]
pos = string.find('\x19')
return string
|
a067f164be5677f6fb2029d5b839849ca50a230e
| 14,362
|
import math
def knn(pnts, p, k):
"""
Calculates k nearest neighbours for a given point.
:param points: list of points
:param p: reference point
:param k: amount of neighbours
:return: list
"""
s = sorted(pnts,
key=lambda x: math.sqrt((x[0]-p[0])**2 + (x[1]-p[1])**2))[0:k]
return s
|
2da3d4481db78910548eee04e0532e701e4c4201
| 14,363
|
import requests
from bs4 import BeautifulSoup
def scrape(start, end, logging=True):
"""
Scrape all the reviews from dealerrater.com for the McKaig
Chevrolet Buick dealership.
Parameters:
start: the page of reviews to start scraping
end: the last page of reviews to scrape
Returns:
texts: a list of strings that are the reviews from the website
"""
PAGE_START = start
PAGE_END = end
texts = []
# Scrape the data from pages 1-5
for page in range(PAGE_START, PAGE_END + 1):
if logging:
print("Scraping page"+str(page)+"...")
url = "https://www.dealerrater.com/dealer/McKaig-Chevrolet-Buick-A-Dealer-For-The-People-dealer-reviews-23685/page"+str(page)+"/?filter=ALL_REVIEWS#link"
res = requests.get(url)
soup = BeautifulSoup(res.content, "html.parser")
# Get the reviews on this page
for p in soup.select("p.review-content"):
texts.append(p.get_text())
return texts
|
03dfd059a82c4c56dec47d772ef2e6ade905fac7
| 14,364
|
import os
import sys
import six
import gzip
import shutil
def maybe_download(filename, data_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not os.path.exists(data_directory):
print('Not found data directory, create directory ', data_directory)
os.makedirs(data_directory)
filepath = os.path.join(data_directory, filename)
def download_progress(count, block_size, total_size):
sys.stdout.write("\r>> Downloading %s %.1f%%" % (filename, float(count * block_size) / float(total_size) * 100.))
sys.stdout.flush()
if os.path.isfile(filepath):
print("file {} already download and extracted.".format(filename))
return filepath
elif os.path.isfile(filepath+'.gz'):
print("file {} already download, now extract it.".format(filepath+'.gz'))
else:
print('Not found {}, world downloaded from {}'.format(filepath, source_url))
filepath, _ = six.moves.urllib.request.urlretrieve(source_url, filepath+'.gz', download_progress)
print()
print('Successfully Downloaded', filename)
with gzip.open(filepath+'.gz', 'rb') as f_in, open(filepath, 'wb') as f_out:
print('Extracting ', filename)
shutil.copyfileobj(f_in, f_out)
print('Successfully extracted')
return filepath
|
a48989a2b8d21aa7d4c56bbf16b6731de75b5df1
| 14,365
|
import re
def multiple_replace(string, replacements):
"""
Given a string and a dictionary of replacements in the format:
{ <word_to_replace>: <replacement>, ... }
Make all the replacements and return the new string.
From: http://stackoverflow.com/questions/2400504/
"""
pattern = re.compile('|'.join(replacements.keys()))
return pattern.sub(lambda x: replacements[x.group()], string)
|
bdaf05f2f9c5de2c0742c12219e2984ed3e7699e
| 14,366
|
import argparse
def get_args():
"""
--mode train/test/deploy
--cores 1/2/3/4/5/6/7/8
--load_model <path_to_model>
### only for train mode ###
--save_top 1/2/3/4/5/6
--batch_size
--epochs
--learning_rate
###########################
"""
parser = argparse.ArgumentParser(description='LeNet-5 model generator/runner')
parser.add_argument('-m','--mode', help='choose between train/test/deploy', required=True)
parser.add_argument('-c','--cores', help='select core count to use, default is 1', type=int, default=1)
parser.add_argument('-lm','--load_model', help='path to pre-trained model')
parser.add_argument('-sb','--save_top', help='selected number will be used for saving top <count> models while training, default is 3',type=int, default=3)
parser.add_argument('-b','--batch_size', help='batch size for traing, default is 128', type=int, default=128)
parser.add_argument('-e','--epoch', help='epoch size, default is 10',type=int, default=10)
parser.add_argument('-lr','--learning_rate', help='learning rate for the model, default is 0.001',type=float, default=0.001)
parser.add_argument('-tp','--traning_percent', help='give a percentage between 0-100 to split train/test data, default is 80',type=int, default=80)
return parser.parse_args()
|
a508679f14943afd801b40562c0d9182326d5e44
| 14,370
|
from typing import Dict
from pathlib import Path
from typing import Optional
import csv
def write_results(
result_dict: Dict[str, Dict[str, str]],
trans_dict: Dict[str, str],
input_filepath: Path,
output_filepath: Path,
write_csv: Optional[bool] = False,
) -> Dict[str, Dict[str, str]]:
"""
Returns processed output by combining results_dict (predicted values) and trans_dict (compound IDs).
Optionally writes results to a CSV file.
"""
# obtaining all possible column names
acd_columns = []
counter = 0
for key, value in result_dict.items():
for col, value1 in value.items():
if col not in acd_columns:
acd_columns.append(col)
counter += 1
if counter == 10 ** 4:
break
# filling in missing columns
for key, value in result_dict.items():
for col in acd_columns:
if col not in value:
result_dict[key][col] = "NaN"
# translating ID back to original IDs as provided in input file
trans_result_dict = {}
for cp_id, props in result_dict.items():
trans_result_dict[trans_dict[cp_id]] = props
# writting to csv
if write_csv is True:
acd_columns.append("compound_id")
with open(output_filepath, "w") as f:
w = csv.DictWriter(f, acd_columns)
w.writeheader()
for k in trans_result_dict:
w.writerow(
{col: trans_result_dict[k].get(col) or k for col in acd_columns}
)
return trans_result_dict
|
a791f47c9d16b451db14cd599cf2f15f04f0637c
| 14,371
|
def mass_function_abc(m, a, b, c):
"""The parametrized surpression function of the halo mass function"""
return (1 + (a / m)**b )**c
|
d4e0b1d39a67baa121a28fc644b75c9bfd714c5e
| 14,373
|
def upload_album(
self,
photos,
caption=None,
upload_id=None,
from_video=False,
options={},
user_tags=None,
):
"""Upload album to Instagram
@param photos List of paths to photo files (List of strings)
@param caption Media description (String)
@param upload_id Unique upload_id (String). When None, then
generate automatically
@param from_video A flag that signals whether the photo is loaded from
the video or by itself (Boolean, DEPRECATED: not used)
@param options Object with difference options, e.g.
configure_timeout, rename (Dict)
Designed to reduce the number of function arguments!
This is the simplest request object.
@param user_tags
@return Boolean
"""
self.small_delay()
result = self.api.upload_album(
photos, caption, upload_id, from_video, options=options, user_tags=user_tags
)
if not result:
self.logger.info("Photos are not uploaded.")
return False
self.logger.info("Photo are uploaded.")
return result
|
8b9e0a92c973ea90018f1356c44fe6e9047229f3
| 14,374
|
def retain(foo):
"""This method is very important for live source code manipulation. It ensures, that every reference to an object's
method is going to change when the method has been edited (and therefore been overridden). If a reference to a
method is made by providing the method object without calling retain(self.mymethod) or M(self.mymethod), changing
this method in Ryven will not result in different behavior when the previously created reference is being called."""
return lambda *args, **kwargs: getattr(foo.__self__, foo.__name__)(*args, **kwargs)
|
f615cd295648bc0ef266e9e1f25e60985cc1a1d5
| 14,375
|
import json
def loadMetadata(fn):
"""
Load Metadata JSON File
Parameters:
-------------
fn : str - filename
Returns:
-------------
data : dict
"""
data = json.load(open(fn))
return data
|
4fa9aceed53cab0c076d4bdc474a08ebccae5ef6
| 14,377
|
def update_tuple(origin_tuple, update_value, update_index):
"""Update tuple/namedtuple for specified update_index and update_value."""
# Namedtuple is inherit from tuple.
if not isinstance(origin_tuple, tuple):
raise ValueError("Only tuple/namedtuple supported. Origin_tuple type: "
"%s." % type(origin_tuple))
if update_index >= len(origin_tuple):
raise ValueError("Update index is out of range. Length of original tuple "
"%s, Update index: %s." %
(len(origin_tuple), update_index))
values = []
for index, item in enumerate(origin_tuple):
if index == update_index:
values.append(update_value)
else:
values.append(item)
def _is_namedtuple(x):
base = type(x).__bases__
if len(base) == 1 and base[0] == tuple:
return True
return False
if _is_namedtuple(origin_tuple):
return type(origin_tuple)(*values)
return tuple(values)
|
1956777840fe30db01f1a4526e8e79d0ceac8534
| 14,378
|
def get_variation(variation_key, variations_dict, defaults_dict):
"""Convert a string to a tuple of integers.
If the passed variation_key doesn't follow this pattern '0 100', it will
return default values defined in defaults_dict.
This is currently used for defining the variation data of the A/B
experiment regarding the multi-steps form.
"""
try:
# We want to create a tuple of integers from a string containing
# integers. Anything else should throw.
rv = tuple(int(x) for x in variations_dict.get(variation_key)
.strip().split())
if (len(rv) != 2):
raise ValueError('The format is incorrect. Expected "{int} {int}"')
except Exception as e:
print('Something went wrong with AB test configuration: {0}'.format(e))
print('Falling back to default values.')
rv = defaults_dict.get(variation_key)
return rv
|
24e069fabcb9bd4dbfbd3c1ea17e859f27fdcceb
| 14,379
|
def standardize(train_data, test_data):
"""
Standardize Data Set
"""
train_mean = train_data.mean()
train_std = train_data.std()
train_data = (train_data - train_mean) / train_std
test_data = (test_data - train_mean) / train_std
return train_data, test_data
|
b66a363cd37f7ebc2accd021866b7d34c415f528
| 14,380
|
import torch
def spread_feature(container, learned_uv, feature, mask1c):
"""
:param container: B,C,R,R
:param learned_uv: B,2,H,W
:param feature: B,C,H,W aligned with latent uv map
:param mask1c: B,1,H,W used to mask latent uv and feature
:return: container
"""
assert float(mask1c.max()) < (1.0 + 1e-9)
assert container.shape[1] == feature.shape[1]
c = container.shape[1]
res = container.shape[2]
_learned_uv = learned_uv * mask1c.repeat(1, 2, 1, 1)
_feature = feature * mask1c.repeat(1, c, 1, 1)
learned_uv = torch.clamp((_learned_uv * res).long(), 0, res - 1)
learned_uv = learned_uv.reshape(learned_uv.shape[0], 2, -1)
learned_uv = learned_uv[:, 0, :] * res + learned_uv[:, 1, :] # B, R*R
learned_uv = learned_uv.unsqueeze(1).repeat(1, c, 1) # B,C,R*R
container = container.reshape(container.shape[0], container.shape[1], -1)
container = container.scatter(2, learned_uv, _feature.reshape(feature.shape[0], c, -1))
container = container.reshape(container.shape[0], container.shape[1], res, res)
return container
|
38f7602124792bf3f0e3395cd8952aec428ab43e
| 14,381
|
def init(id, cfg):
"""Previous version init function.
..note:: This function is still supported for backwards compatibility when
the init_standard function is missing. When init_standard is
present this function SHOULD be omitted to avoid confusion to the
reader.
"""
return True
|
8076f694908bbe8edb8e0297d62a35b26fc1afcb
| 14,383
|
import math
def radiacion_solar_onda_larga(tmax, tmin, p_vapor_real, r_solar, r_solar_cielo_despejado):
"""
calcula la radiación neta de onda larga saliente en MJ/m2 y día
param: tmax : temperatura máxima
param: tmin : temperatura mínima
param: p_vapor_real : presión de vapor real
param: r_solar : radiación solar o de onda corta
param: r_solar_cielo_despejado : radiación solar con cielo despejado
"""
return 4.903*(10**-9)*((tmax+273.16)**4+(tmin+273.16)**4)*(0.34-0.14*math.sqrt(p_vapor_real))*(1.35*(r_solar/r_solar_cielo_despejado)-0.35)/2
|
5404500ff08e5a886f3e000fd8d301faec78cff7
| 14,384
|
def requirements(filename):
"""Reads requirements from a file."""
with open(filename) as f:
return [x.strip() for x in f.readlines() if x.strip()]
|
81e5fa3d2a11b9152be6f55ab879b1875fd0b07d
| 14,385
|
def branch_uptodate(branch, true_on_missing_origin=True):
"""Return True is branch is up to date with origin, otherwise False,
also returns True if no remote defined"""
if branch['upstream']:
if branch['ahead'] or branch['behind']:
return False
return True
if true_on_missing_origin:
return True
return False
|
2c0db03ea469b6f75e94ab6a73ace360e73948a3
| 14,386
|
import hashlib
import base64
def encode(text):
"""[summary]
Base64 Encoder.
Args:
text (str): String to be encoded.
Returns:
str: base64.urlsafe_b64encode
"""
fill_up = hashlib.blake2s(f"{text}".encode("utf-8"), digest_size=8).hexdigest()
text = f"{ text }::{ fill_up }"
return base64.urlsafe_b64encode(text.encode("utf-8")).decode("utf-8")
|
f9da7bf0ba9455fd37277c8617332d78b11223bd
| 14,387
|
import argparse
def parse_arguments():
"""Parse command line arguments
Args:
Returns:
Command line arguments
Raises:
"""
parser = argparse.ArgumentParser(
prog='pypgmon',
)
parser.add_argument(
'-l',
'--logger',
default='pypgmon/conf/logger.yml',
)
parser.add_argument(
'-t',
'--targets',
default='pypgmon/conf/targets.yml',
)
arguments = parser.parse_args()
return arguments
|
d9f571335611b3170a78dfd03b349ecb56784157
| 14,388
|
def multiply_ith_dimension(Pi, i, X):
"""If Pi is a matrix, multiply Pi times the ith dimension of X and return"""
X = X.swapaxes(0, i)
shape = X.shape
X = X.reshape((shape[0], -1))
# iterate forward using Pi
X = Pi @ X
# reverse steps
X = X.reshape((Pi.shape[0], *shape[1:]))
return X.swapaxes(0, i)
|
98edc89389910d4361232350b0ad440fd785f74a
| 14,389
|
import six
import socket
import contextlib
def is_port_open(port_or_url, timeout=1):
"""Check if TCP port is open."""
if isinstance(port_or_url, six.string_types):
url = six.moves.urllib.parse.urlparse(port_or_url)
port = url.port
host = url.hostname
else:
port = port_or_url
host = "127.0.0.1"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with contextlib.closing(sock):
sock.settimeout(timeout)
result = sock.connect_ex((host, port))
return result == 0
|
ad8d6e80cc2eaee6a5955f4f5bc0e69cac8f60b2
| 14,390
|
import sys
def get_name(category, gender, choice):
"""
Gets the name of the file corresponding to the category, gender, and option requested.
:param category: Type of file. Can be "number", "reason", or "ending".
:param gender: 'm' or 'f'.
:param choice: Specifies the file being requested for.
If the category is "number", then the choice can be any number from 0 to 9. ('8')
If the category is "reason", then the choice can be any letter choice that is valid for the specified gender. ('b')
If the category is "ending", then the choice can be any letter choice that is valid for the specified gender. ('a')
:return: Returns the name of the file requested as a string.
"""
if category == "number":
if choice in ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9"):
return choice + ".mp3"
else:
print("Get Name Error! (Invalid number)")
sys.exit()
elif category == "reason":
if gender == "m":
names = {'a': "m-r1-building.mp3", 'b': "m-r2-cracking_walnuts.mp3",
'c': "m-r3-polishing_monocole.mp3", 'd': "m-r4-ripping_weights.mp3"}
if choice in names:
return names[choice]
else:
print("Get Name Error! (Invalid reason)")
sys.exit()
elif gender == "f":
names = {'a': "f-r1-ingesting_old_spice.mp3", 'b': "f-r2-listening_to_reading.mp3",
'c': "f-r3-lobster_dinner.mp3", 'd': "f-r4-moon_kiss.mp3", 'e': "f-r5-riding_a_horse.mp3"}
if choice in names:
return names[choice]
else:
print("Get Name Error! (Invalid reason)")
sys.exit()
else:
print("Get Name Error! (Invalid gender)")
sys.exit()
elif category == "ending":
if gender == "m":
names = {'a': "m-e1-horse.mp3", 'b': "m-e2-jingle.mp3", 'c': "m-e3-on_phone.mp3",
'd': "m-e4-swan_dive.mp3", 'e': "m-e5-voicemail.mp3"}
if choice in names:
return names[choice]
else:
print("Get Name Error! (Invalid ending)")
sys.exit()
elif gender == "f":
names = {'a': "f-e1-she_will_get_back_to_you.mp3", 'b': "f-e2-thanks_for_calling.mp3"}
if choice in names:
return names[choice]
else:
print("Get Name Error! (Invalid ending)")
sys.exit()
else:
print("Get Name Error! (Invalid gender)")
sys.exit()
else:
print("Get Name Error! (Invalid category)")
sys.exit()
|
db939f515ea9696e68ae12524b1a8b8783d2f150
| 14,392
|
def should_sync_locations(last_sync, location_db):
"""
Determine if any locations (already filtered to be relevant
to this user) require syncing.
"""
if not last_sync or not last_sync.date:
return True
for location in location_db.by_id.values():
if not location.last_modified or location.last_modified >= last_sync.date:
return True
return False
|
d61612473f2842f69628d273198549a55abf66be
| 14,393
|
def get_feature_dimensions(parameters):
""" Returns dimensions (`int`s) of all node features.
"""
n_atom_types = len(parameters["atom_types"])
n_formal_charge = len(parameters["formal_charge"])
n_numh = int(
not parameters["use_explicit_H"]
and not parameters["ignore_H"]
) * len(parameters["imp_H"])
n_chirality = int(parameters["use_chirality"]) * len(parameters["chirality"])
return n_atom_types, n_formal_charge, n_numh, n_chirality
|
f1409d1dc2f4785bb2c2452b9bc08395a598f16d
| 14,394
|
def removeGenesWithExcessiveReplicationVariance(df_X, max_var=None):
"""
Removes Genes with excessive variation in the variance of their
trinary values. Assumes a single digit replication.
Parameters
----------
df_X: DataFrame
index: str <instance>.replication-digit
ex: T10.2
max_var: float
Returns
-------
DataFrame (Trinary features)
"""
df = df_X.copy()
if max_var is None:
return df
#
df.index = [i[0:-2] for i in df_X.index]
df = df.sort_index()
ser = df.groupby(df.index).std().sum()
ser = ser.sort_values()
ser_sub = ser[ser <= max_var]
columns = list(ser_sub.index)
return df_X[columns]
|
e6b6950e2694792e03940caadbeda85c8e32717c
| 14,395
|
def _py_lazy_and(cond, b):
"""Lazy-eval equivalent of "and" in Python."""
return cond and b()
|
872f382ac72d8253c61043dfe146d05775b4748d
| 14,397
|
def parse_id(hardware_id):
"""Parse Nuki ID."""
return hex(hardware_id).split("x")[-1].upper()
|
483fb1c7a864242335288f53653c461a50eab638
| 14,399
|
from typing import Dict
from typing import Any
from typing import OrderedDict
def sort_dict(item: dict) -> Dict[str, Any]:
"""
Sort nested dict
Input: {"b": 1, "a": {"c": 1,"b": 2}, "c": "c_st[ring]"}
Output: OrderedDict([
('a', OrderedDict([('b', 2), ('c', 1)])),
('b', 1),
('c', 'c_st[ring]')
])
"""
return OrderedDict(
(k, sort_dict(v) if isinstance(v, dict) else v)
for k, v in sorted(item.items())
)
|
0f4c042df4ea2f00dbda249f9d2c7c3488824559
| 14,400
|
import json
def load_dict(filename):
"""
Loads a dictionary stored in JSON format
:param filename:
:return:
"""
data = json.load(open(filename))
return data
|
88f286417bbdd43d4499e83750c92843a9f6231a
| 14,401
|
def nWaveRiseTime(pmax, patm=101e3, csnd=341, lamb=6.8e-8):
"""
Calculate N-wave rise time
Parameters
----------
pmax -- N-wave overpressure amplitude in Pa
patm -- atmospheric pressure in Pa
csnd -- speed of sound in m/s
lamb -- air molecular mean free path in m
Returns
-------
trise -- N-wave rise time in s
"""
trise = (lamb / csnd) * (patm / pmax)
return trise
|
cbcae9d8e3ddaeb6daab5a02a722cd7302ebf562
| 14,402
|
from typing import List
import inspect
def get_class_names(module) -> List[str]:
"""Get class names of a module."""
ans = []
for m in inspect.getmembers(module):
if inspect.isclass(m[1]):
ans.append(m[0])
ans.sort()
return ans
|
4ae36e8823f28d00693eac3d70affe7f0591a745
| 14,404
|
import os
def get_int_filename(filename):
""" Get the integer value of a filename """
name = os.path.splitext(filename)[0]
try:
return int(name)
except ValueError:
return 0
|
b10d19ad016411a78454bbd010855c6ffb0dac71
| 14,405
|
def find_address_of_type(host_elem, type_):
"""Return the host's address of the given type, or `None` if there
is no address element of that type.
"""
address_elem = host_elem.find('./address[@addrtype="{}"]'.format(type_))
if address_elem is not None:
return address_elem.get('addr')
|
ff821703d076865da3c6efe0d2d760c4fcb2c997
| 14,407
|
import re
def IsValidAtom(atom):
"""Test if an atom conforms to the JHM definition of an atom"""
return re.match("([_a-zA-Z][_\-a-zA-Z0-9]*)?",atom)
|
218e846db1559bb91ab1ab47c5955d43b8274866
| 14,408
|
import re
def fuzzyfinder(user_input,collection):
"""
fuzzy matching, to obtain a fuzzy matched list.
>>>collection = [
"user_name",
"api_user",
"school",
"email"
]
>>>fuzzyfinder("user",collection)
["user_name","api_user"]
"""
suggestions = []
pattern = ".*?".join(user_input)
regex = re.compile(pattern)
for item in collection:
match = regex.search(item)
if match:
suggestions.append((len(match.group()),match.start(),item))
return [x for _, _, x in sorted(suggestions)]
|
db01f13f3caf5dc9a21cfb180776c22b589f1a28
| 14,409
|
def bq_token_file_path_exists_mock(token_path):
"""
Mock bq_token_file_path_exist to return True
"""
return True
|
54958444c16c4244ab349d60b0add39723ba6d8b
| 14,410
|
def masa_strugotine(volumen_strugotine, gustoca_materijala):
"""
"""
return 1e-9*volumen_strugotine*gustoca_materijala
|
06e65566ff2a9c13fabe1316a7a290c1145a741c
| 14,411
|
from datetime import datetime
def check_realistic_birth_date(date):
"""
Function to check if the inserted date of birth is not futurist.
"""
date = date.split("/")
today = datetime.strftime(datetime.now(), "%d/%m/%Y").split("/")
logic_1 = (
int(date[0]) > int(today[0]) and # Day
int(date[1]) >= int(today[1]) and # Month
int(date[2]) == int(today[2])) # Year
logic_2 = (
int(date[1]) > int(today[1]) and # Month
int(date[2]) == int(today[2])) # Year
logic_3 = int(date[2]) > int(today[2]) # Year
if logic_1 or logic_2 or logic_3:
return False
return True
|
3b92d322881b9f4dafac6678a1627cec90c8cd50
| 14,414
|
def getBoundingBoxCoordinates(box, height, width):
"""
returns coordinates of bounding box in pixels
:param box: a Tensor returned of tensorflow object detection prediction of 1 set of ymin, xmin, ymax, xmax
:param height: height of image
:param width: width of image
:return: ymin, xmin, ymax, xmax coordinates in image of bounding box
"""
# coordinates return the percent of height or width
ymin, xmin, ymax, xmax = tuple(box.tolist())
# convert to coordinate points
ymin = int(ymin * height)
ymax = int(ymax * height)
xmin = int(xmin * width)
xmax = int(xmax * width)
return ymin, xmin, ymax, xmax
|
14f6e94b68d62893d3235c73c0c4943e6edaf4d2
| 14,415
|
def calc_dh(eta):
"""Calculate the first derivative of the interpolation function
Args:
eta: the phase field
Returns:
the value of dh
"""
return 30 * eta ** 2 * (eta - 1) ** 2
|
557f3d932c0b380ad291f2af081d1ebe8fcd5baf
| 14,416
|
def parse_http_response(data):
""" Parses HTTP response data into a tuple in the form (cmd, headers).
@param data: HTTP response data
@type data: string
@return: (cmd, headers) for the given data
@rtype: tuple
"""
header, payload = data.split('\r\n\r\n')
lines = header.split('\r\n')
cmd = lines[0].split(' ')
lines = [x.replace(': ', ':', 1) for x in lines[1:]]
lines = [x for x in lines if len(x) > 0]
headers = [x.split(':', 1) for x in lines]
headers = dict([(x[0].lower(), x[1]) for x in headers])
return cmd, headers
|
da4680acd9dd429fdbffbe6927586ba23256286e
| 14,418
|
def isChildUri(parentUri, childUri):
"""Return True, if childUri is a child of parentUri.
This function accounts for the fact that '/a/b/c' and 'a/b/c/' are
children of '/a/b' (and also of '/a/b/').
Note that '/a/b/cd' is NOT a child of 'a/b/c'.
"""
return parentUri and childUri and childUri.rstrip("/").startswith(parentUri.rstrip("/")+"/")
|
a28a386c38a4d6722f6b7c8bcbeafb8092dbbe77
| 14,419
|
def size(b):
"""
Returns the size in bytes of the first netstring in the provided bytes object.
WARNING: This function doesn't check for netstring validity.
THROWS:
ValueError if cannot determine size
"""
try:
slen = b[:b.find(b':')].decode('ascii')
return 2 + len(slen) + int(slen)
except:
raise ValueError
|
2b50a555e29c6d7cbdc419d3dfb336d60f907d0c
| 14,420
|
import torch
def get_output_size(model, input_shape=(1, 3, 224, 224), device="cpu", dtype='float32'):
"""
Returns the shape of the convolutional features in output to the model.
Parameters
----------
model pytorch model,
neural network model.
input_shape: tuple of int,
shape of the images in input to the model in the form (batch_size, channels, height, width).
Defaults to (1, 3, 224, 224).
device: string,
device for the gradient computation. Choose between "cpu" and "gpu:x", where x is the number of the GPU device.
dtype: string,
datatype for the model. Choose between 'float32' and 'float16'. Defaults to 'float32'.
Return
------
output_size : int,
shape of the flattened convolutional features in output to the model.
Note: It si not possible to do model(x) on CPU with f16. To avoid problems, the model is cast to float32 for this
computation and then it is converted back to float16.
"""
if dtype == "float16":
model.float()
dummy_input = torch.ones(input_shape).to(device)
if model.name[0:12] == "efficientnet":
output_size = model.extract_features(dummy_input).shape[1:].numel()
else:
output_size = model(dummy_input).shape[1:].numel()
if dtype == "float16":
model.half()
return output_size
|
f9c4e79e2c38a424c723cfed6fc73fc63ddc3142
| 14,421
|
def str_function(connectable) -> str:
"""__str__ function for OutputBase and InputBase."""
infos = []
if connectable.owner:
infos.append('owner: %s' % connectable.owner)
if connectable.connected:
infos.append('connected')
else:
infos.append('not connected')
return '%s(%s)' % (type(connectable).__name__, ', '.join(infos))
|
5ed2ba8575314c0fdf9046988f3977ab262ff0af
| 14,423
|
from typing import List
import os
import re
def find_all_matched_files(directory: str, patternStr: str) -> List[str]:
"""
List the paths of files that match patternStr under the specified directory.
"""
if patternStr == "*.*":
pattern = "\.*.*$"
else:
pattern = patternStr.replace("*", ".*").replace(".", "\.") + "$"
fileList = []
for root, _, files in os.walk(directory):
for file in files:
res = re.search(pattern, file)
if res is not None:
fileList.append(os.path.join(root, file))
fileList.sort()
return fileList
|
362d74d199700a4c1934b1b6aad0423d327377e4
| 14,424
|
import ssl
def get_ssl_context():
"""Create an SSL context."""
context = ssl.SSLContext()
context.verify_mode = ssl.CERT_NONE
return context
|
182b18996632f335b1cdf4d73e8dd502881b0f73
| 14,425
|
def split_order_book(order_book_id: str):
"""
对合约拆分成品种和年份俩个部分
:param order_book_id:
:return:
"""
return order_book_id[:-4], order_book_id[-4:]
|
4692e95eb814de50f9effe7ed289f94712e265d6
| 14,427
|
def get_template_from_path(path: str) -> str:
"""Convert a normal path back to its template representation."""
# replace all path parts with the template tags
path = path.replace("\\", "/")
return path
|
1a62acca2c0531563fb4493bc06708671a601cda
| 14,429
|
import re
def check_email(email):
"""
Checks an email format against the RFC 5322 specification.
"""
# RFC 5322 Specification as Regex
regex = """(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|\"
(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])
*\")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:
(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(2(5[0-5]|[0-4][0-9])|1
[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a
\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])"""
if re.search(regex, email):
return True
return False
|
522547471730c6975246c301492f5b7676032c75
| 14,430
|
import pandas
def getCumSumForIntervalData(actualTimeSeries, predictedValuesOnLaggedData, intervalSize=365):
"""
:param actualTimeSeries: Object of DataFrame TimeSeries
:param predictedValuesOnLaggedData: Object of DataFrame TimeSeries
:param intervalSize: Period or Seasonality
:return: Cumulative Sum for given Interval (dtype will be Float please consider properly)
"""
arrayOfSeasonal = pandas.Series(actualTimeSeries[:intervalSize], copy=True)
for index, value in enumerate(predictedValuesOnLaggedData):
predictedValuesOnLaggedData[index] = value + arrayOfSeasonal[index % intervalSize]
arrayOfSeasonal[index % intervalSize] = predictedValuesOnLaggedData[index]
return pandas.Series(actualTimeSeries[:intervalSize], actualTimeSeries.index, copy=True).add(
predictedValuesOnLaggedData,
fill_value=0)
|
3bcbca64aff1652239b32f2c2454e897d8968334
| 14,431
|
def calc_check_digit(number):
"""Calculate the check digit for the number."""
alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
cutoff = lambda x: x - 9 if x > 9 else x
s = sum(
cutoff(alphabet.index(n) * 2) if i % 2 == 0 else alphabet.index(n)
for i, n in enumerate(number[::-1]))
return str((10 - s) % 10)
|
967d9ac7eae3e45b42c6bb4e366345fd34207d0d
| 14,433
|
def calcul_positions_virages(coordonnees_du_personnage, orientation_du_personnage, profondeur_vision, liste_plan_du_labyrinthe):
"""
Prends en argument :
- La liste correspondant aux coordonnées du personnage, 'coordonnees_du_personnage';
- La chaîne de caractères correspondant à l'orientation du personnage, 'orientation_du_personnage';
- L'entier correspondant à la profondeur de la vision du personnage, 'profondeur_vision';
- La liste correspondant à la liste du plan du labyrinthe, 'liste_plan_du_labyrinthe'.
Calcule les positions des virages se trouvant à gauche et à droite du personnage.
Renvoie:
- Une liste contenant les positions des virages à gauche du personnage, 'liste_positions_virages_gauche';
- Une liste contenant les positions des virages à droite du personnage, 'liste_positions_virages_droite'.
"""
x_personnage = coordonnees_du_personnage[0]
y_personnage = coordonnees_du_personnage[1]
profondeur_vision += 1
compteur = 0
liste_positions_virages_gauche = []
liste_positions_virages_droite = []
## Calcul des positions des virages à droite et à gauche du personnage
# Si le personnage est orienté vers le Nord
if orientation_du_personnage == 'nord':
while compteur != profondeur_vision:
if liste_plan_du_labyrinthe[y_personnage - compteur][x_personnage - 1] == '.':
liste_positions_virages_gauche.append(compteur)
if liste_plan_du_labyrinthe[y_personnage - compteur][x_personnage + 1] == '.':
liste_positions_virages_droite.append(compteur)
compteur += 1
# Si le personnage est orienté vers l'Est
elif orientation_du_personnage == 'est':
while compteur != profondeur_vision:
if liste_plan_du_labyrinthe[y_personnage - 1][x_personnage + compteur] == '.':
liste_positions_virages_gauche.append(compteur)
if liste_plan_du_labyrinthe[y_personnage + 1][x_personnage + compteur] == '.':
liste_positions_virages_droite.append(compteur)
compteur += 1
# Si le personnage est orienté vers le Sud
elif orientation_du_personnage == 'sud':
while compteur != profondeur_vision:
if liste_plan_du_labyrinthe[y_personnage + compteur][x_personnage + 1] == '.':
liste_positions_virages_gauche.append(compteur)
if liste_plan_du_labyrinthe[y_personnage + compteur][x_personnage - 1] == '.':
liste_positions_virages_droite.append(compteur)
compteur += 1
# Si le personnage est orienté vers l'Ouest
elif orientation_du_personnage == 'ouest':
while compteur != profondeur_vision:
if liste_plan_du_labyrinthe[y_personnage + 1][x_personnage - compteur] == '.':
liste_positions_virages_gauche.append(compteur)
if liste_plan_du_labyrinthe[y_personnage - 1][x_personnage - compteur] == '.':
liste_positions_virages_droite.append(compteur)
compteur += 1
return liste_positions_virages_gauche, liste_positions_virages_droite
|
1fd818452750e98b0f24a269a19be324484fe6fc
| 14,435
|
def unique_filename(name):
"""
Returns unique file name based on "name" argument.
"""
return name
|
1145ec89e9bbadf5944bc787ff1fda2805bfde1b
| 14,436
|
def left_child(node, new_node=None):
""" Set left child: left_child(node, new_left_child); Get left node: left_child(node). """
if new_node is not None:
node[1] = new_node
return node[1]
|
baabfbeb4acb7e8a2846b9a4a45b9d993619a1e4
| 14,437
|
from datetime import datetime
def prettify_timestamp(value: str) -> str:
"""
Returns a pretty version of a timestamp object.
Current format:
- %b short name of month like Mar, Jun
- %d day of the month from 1 to 31
- %Y year in 4 digit format
"""
return datetime.utcfromtimestamp(float(value)).strftime("%b %d %Y")
|
82faefee2f98db33665c81c309e531d6dacab099
| 14,438
|
def contains(text, pattern):
"""Return a boolean indicating whether pattern occurs in text."""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
#funny storytime! initially did not pass because I falsely remembered a lack of explicitly written "else" statement resulting in a default return of "False". In truth, it defaults to "None"
#Runtime: O(n) because it must go through text to find the pattern. Best case would be O(1), like if the pattern/needle was at the very front of the text/haystack.
#Space: O(1) because it will always be simply true or false. Boolean, boom!
if pattern in text:
return True
else:
return False
|
4eff4e8d69e7843e2a4e4043df29139d92ded772
| 14,439
|
def custom_subparser_builder(subparser, subparser_id, description, options_func, runner_func, setup_func, shutdown_func, error_handler_func):
"""
Util to add subparser options
:param options_func: Function that will add args and options to Parser instance F(subparser) -> None
:param runner_func: Function to run F(pargs) -> Int (exit-code)
:param setup_func: Function to run before the exe (pargs, log-instance) -> Unit
:param shutdown_func: Func to run post execution. F(exit_code, started_at, log-instance) -> Int (exit_code)
:param error_handler_func: Func to handle unhandled exceptions in runner_func F(exit_code, started_at, log-instance, exception) -> Int (exit code)
Note, `pargs` in the above func signatures will be the results of p.parse_args()
"""
p = subparser.add_parser(subparser_id, help=description)
options_func(p)
# this is a bit of abusing dicts/python
p.set_defaults(runner_func=runner_func)
p.set_defaults(setup_func=setup_func)
p.set_defaults(shutdown_func=shutdown_func)
p.set_defaults(error_handler_func=error_handler_func)
return p
|
0d5914646e07be26792fae321c32f3338d6b467b
| 14,440
|
from typing import List
def one_minus(data: List[float]) -> List[float]:
"""
return 1 - each element in list
"""
return list(map(lambda elem: 1 - elem, data))
|
bb5c9a3d27866b408e519541778c7cdb8bd9ac13
| 14,441
|
def call_method(obj, methodName):
"""
Execute a method of an object using previously passed arguments with setarg filter
"""
method = getattr(obj, methodName)
if hasattr(obj, '__call_arguments'):
ret = method(*obj.__call_arguments)
del obj.__call_arguments
return ret
return method()
|
7bd94cdf81f448a8b73bbf3bcdda47cf4f5580cd
| 14,442
|
def get_bracketing_pattern_feature(dataset):
"""
Return bracketing_pattern feature (sorted tuple of adj_quantities seen).
"""
grouped = dataset.groupby(
['tube_assembly_id', 'supplier', 'quote_date'])
bracketing_pattern = [None] * len(dataset)
for t_s_q, indices in grouped.groups.iteritems():
if len(indices) > 1:
bracket = tuple(sorted(dataset.adj_quantity[indices].values))
else:
bracket = ()
for index in indices:
bracketing_pattern[index] = bracket
return bracketing_pattern
|
9b262fcff8e227dc50ea769ac8953df0aeba62b7
| 14,443
|
def exercise_output(template):
"""Outputs the way specified in the exercise"""
result = """Marca: {0}
Modelo: {1}
Preco: {2}
Motor: {3}
Ano: {4}
Km: {5}
Combustivel: {6}
Cambio: {7}
Direcao: {8}
Cor: {9}
Ar-cond: {10}
Opcionais: {11}\n"""
return result.format(
template["Brand"],
template["Model"],
template["Price"],
template["Motor"],
template["Year"],
template["Odometer"],
template["Fuel"],
template["Gear"],
template["Steering"],
template["Color"],
template["Air-Conditioning"],
template["Optionals"]
)
|
0944b47d28c8b95c7eab32b799066af14b2b95ea
| 14,444
|
def get_table(tables, var, phi):
"""
Table Searching function
"""
if phi>=50:
phi = 50
#First find phi index
idx=0
while(True):
if tables['phi'][idx]>=phi:
break
idx+=1
if(tables['phi'][idx-1]==phi):
return tables[var][idx-1]
else:
#Let's interpolate
return (tables[var][idx]-tables[var][idx-1])/(tables['phi'][idx]-tables['phi'][idx-1])*(phi-tables['phi'][idx-1])+tables[var][idx-1]
|
55a0feae3d67c960f815a46bb0bc43f0ed7cf5a9
| 14,446
|
def _GetMetdataValue(metadata, key):
"""Finds a value corresponding to a given metadata key.
Args:
metadata: metadata object, i.e. a dict containing containing 'items'
- a list of key-value pairs.
key: name of the key.
Returns:
Corresponding value or None if it was not found.
"""
for item in metadata['items']:
if item['key'] == key:
return item['value']
return None
|
e4ec468fe1e79605d3d7199a703981bae02bfaa3
| 14,447
|
import re
def match_is_ipv4_address(value):
"""Match given value as a valid dotted-quad IPv4 address."""
# Apply the dotted-quad pattern to the string and detect a mismatch
try:
match = re.search(r'^(\d+)\.(\d+)\.(\d+)\.(\d+)$', value)
except TypeError:
return u'{0} must be a string in IPv4 dotted-quad notation'.format(
repr(value))
if not match:
return u'"{0}" must be in IPv4 dotted-quad notation'.format(
value)
# Validate the range of each octet
octets = [int(x) for x in match.groups()]
for idx, octet in enumerate(octets):
if octet > 255:
return '{0} octet of "{1}" exceeds 255'.format(
['1st', '2nd', '3rd', '4th'][idx], value)
return None
|
d4573d5919d1811b83c26928f3e403d070c41f37
| 14,449
|
import math
def smooth(low, high, x):
"""Smoothly goes from low to high"""
return low + (high - low) * (1 + math.cos(math.pi * (1-x))) / 2
|
3278891277048a4d6a19f7a5450a0a8a5f1d0ad9
| 14,450
|
def _parse_header(path):
"""Parses all GAMMA header file fields into a dictionary"""
with open(path) as f:
text = f.read().splitlines()
raw_segs = [line.split() for line in text if ':' in line]
# convert the content into a giant dict of all key, values
return dict((i[0][:-1], i[1:]) for i in raw_segs)
|
93e512388f3c5c5f8ee0e9f0365fa5328b0ea864
| 14,452
|
def parse_trousers_input_args(s):
"""Parses Trspi family's input arguments.
Given a string from of input arguments of a trousers API, the input arguments
parsed into tokens and then convert to tuples. For example:
"BYTE *s, unsigned *len"
->
[("BYTE *", "s"), ("unsigned *", "len")]
Args:
s: String representation of the input arguments of a certain Trspi function.
Returns:
A list of tuples in form of (data type, variable name).
"""
arr = s.split(',')
for i, p in enumerate(arr):
p = p.strip()
# are stick with the variable name, e.g., UINT64 *offset, so the separator
# could be last ' ' or '*'.
pos = p.strip().rfind('*')
if pos == -1:
pos = p.rfind(' ')
if pos == -1:
pos = len(p)
var_type, var_name = p[:pos + 1].strip(), p[pos + 1:].strip()
arr[i] = (var_type, var_name)
return arr
|
366edb8d3b8f1369b8c4b095e037a2192b056e0d
| 14,453
|
def encode_color(rgb):
"""rgb to 8-color pallete"""
r = "1" if rgb[0] > 127 else "0"
g = "1" if rgb[1] > 127 else "0"
b = "1" if rgb[2] > 127 else "0"
for i in range(8):
if r + g + b == format(i, '03b'):
return i
|
81c04e6307eb3d36459cd24c1780e26d792ea6ab
| 14,455
|
from pathlib import Path
def make_unique_filename(name):
""" Creates filename that does not collide with existing files.
May add random stuff between name and extension to make filename unique.
Returns Path object.
May return original name when fails to construct a unique name.
"""
name = Path(name)
parent, stem, ext = name.parent, name.stem, name.suffix
result = name
for counter in range(0, 1000): # Very large number.
if not result.exists():
break
result = parent/(stem + '.{0}'.format(counter) + ext)
return result
|
6da56fc220ca7cea6f4dec9ab1c9fb760c174178
| 14,456
|
def columnsBySubmission(submissions, columns):
""" create map submissionName -> set(columnNames)
"""
columnsBySubm = {}
for submission in submissions.keys():
template = submissions[submission]
columnsBySubm[submission] = set(columns[template].keys())
return columnsBySubm
|
8364faef492941fb31008e5b31ee570bd2d29a04
| 14,457
|
def useScaleGlyph(unicodeValue, glyphList):
""" Determines whether or not to use scaled glyphs for glyphs in passed glyph_list """
for i in glyphList:
if isinstance(i, tuple):
if i[0] <= unicodeValue <= i[1]:
return True
else:
if unicodeValue == i:
return True
return False
|
37fc8ee07ccbf58520b558f0d48ba96f50ce3ae3
| 14,459
|
def is_password_valid_with_old_rules(dataset):
""" Validate password according to the old rules """
letter_count = dataset['password'].count(dataset['letter'])
return int(dataset['first']) <= letter_count and letter_count <= int(dataset['last'])
|
99a63ece8e3b8520fd71028bd01bb3a1a7e677a7
| 14,462
|
import glob
import os
def get_img_path(dir_list):
"""
get all images for style transferring, raw images are .jpg files
:param dir_list: a list of path point to paths like [training/a/abbey, training/a/airfield, ...]
:return: a dict of paths of raw images, key is image name and value is original directory and path
"""
raw_images = {}
for path in dir_list:
for img in glob.glob(os.path.join(path, "*.jpg")):
img_name = img.split("/")[-1]
raw_images[img_name] = [path, img]
return raw_images
|
0db59a81fc58ef6320b57f084c4f8ba241c3ac0e
| 14,463
|
import os
def version():
"""
Service version endpoint
"""
return os.environ.get('VERSION'), 200
|
24ebb860f5ae5fa0ab5617835d0107fd6e1db7c1
| 14,464
|
def _f(x, t, s, cls):
"""Return True if x equals t,
or x is an instance of cls and x equals cls(t),
or x.lower().startswith(s)"""
return x == t or \
(isinstance(x, cls) and x == cls(t)) or \
(isinstance(x, str) and x.lower().startswith(s))
|
91d9c518b58528f01034ca67738c220a17f7f40d
| 14,465
|
import threading
import functools
def synchronized(wrapped):
"""Simple synchronization decorator.
Decorating a method like so:
.. code-block:: python
@synchronized
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
"""
lock = threading.RLock()
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
with lock:
return wrapped(*args, **kwargs)
return wrapper
|
85af7bbd8b7d72f13bfaecc5f1df459745358ab5
| 14,466
|
def solve(lists, n):
"""
Get the missing list.
Parameters
----------
lists : list of lists
2*n - 1 lists with integers
n : int
Returns
-------
strictly increasing list of n integers.
"""
numbers = {}
for list_ in lists:
for el in list_:
if el in numbers:
numbers[el] += 1
else:
numbers[el] = 1
missing = []
for number, count in numbers.items():
if count % 2 == 1:
missing.append(number)
assert len(missing) == n
return sorted(missing)
|
001645ff1a9ae00169ab3565eaed5cb7764f0c7a
| 14,467
|
def counter(countables):
"""
Counter for counting the values inside a particular list or dict.
This is just a scratch/vanilla version of collections.Counter
Args:
countables: List of countables to be counted.
"""
counts = dict()
for k in countables:
if not k in list(counts.keys()):
counts[k] = 1
else:
counts[k] += 1
return counts
|
42bcf22f000de70e0453f9f9078ea8d7c5f74db2
| 14,468
|
def get_alignment_pdb_id(alignment):
"""
Returns a string of the four letter PDB ID found in alignment
:param alignment:
:return: 'pdb id'
"""
pdb_chain_id = alignment.hit_def.encode('ascii').split()[0]
pdb_id = pdb_chain_id.split('_')[0].lower()
return pdb_id
|
35a6abcd18e411328e5b7fb8c5a4f1dba8fe2391
| 14,470
|
def post_inner_loop_update(temp_storage, this_np):
"""
This function updates the storage at the end of a step of the outer-loop, i.e., after all steps in the inner loop.
:param temp_storage: (dictionary) the dictionary with all the information and storage required by the inner loop
:param this_np: (NumPy) the numpy that is used to calculate the cumulative sum
:return:
"""
# Energy from driver
temp_storage["series"]["mean_cum_de2"] = temp_storage[
"mean_cum_de2_previous"
] + this_np.cumsum(temp_storage["series"]["mean_de2"])
# See if E_driver = E_f + E_e
temp_storage["series"]["mean_t_plus_e2_minus_cum_de2"] = temp_storage["series"][
"mean_T"
] + (temp_storage["series"]["mean_e2"] - temp_storage["series"]["mean_cum_de2"])
temp_storage["series"]["mean_t_plus_e2_plus_cum_de2"] = (
temp_storage["series"]["mean_T"]
+ temp_storage["series"]["mean_e2"]
+ temp_storage["series"]["mean_de2"]
)
temp_storage["mean_cum_de2_previous"] = temp_storage["series"]["mean_cum_de2"][-1]
return temp_storage
|
f07b931ed999e18d73741020614d5faafb1664c3
| 14,472
|
import socket
def str_to_inet(ip: str) -> bytes:
"""
Converts a string representation of IP address to binary representation.
:param ip: IP like - "123.45.67.89"
:return: 32 bit representation of "123.45.67.89" like - '{-CY'
"""
try:
return socket.inet_pton(socket.AF_INET, ip)
except OSError:
return socket.inet_pton(socket.AF_INET6, ip)
|
3ab520701ec0271499d3512425a939803ac0adc9
| 14,473
|
import csv
def load_promoter_characterizations (filename, samples):
""" Load promoter characterization data from file
"""
pro_data_perf = {}
pro_data_full = {}
data_reader = csv.reader(open(filename, 'rU'), delimiter='\t')
# Ignore header
next(data_reader)
# Process each line
for row in data_reader:
cur_chrom = row[0]
cur_pu = row[1]
cur_sample = row[2]
cur_perf = float(row[5])
cur_all = float(row[4])
if cur_sample in samples:
# Characterised increase from promoter
if cur_sample not in pro_data_perf.keys():
pro_data_perf[cur_sample] = {}
if cur_chrom not in pro_data_perf[cur_sample].keys():
pro_data_perf[cur_sample][cur_chrom] = {}
pro_data_perf[cur_sample][cur_chrom][cur_pu] = cur_perf
# Total downstream reads
if cur_sample not in pro_data_full.keys():
pro_data_full[cur_sample] = {}
if cur_chrom not in pro_data_full[cur_sample].keys():
pro_data_full[cur_sample][cur_chrom] = {}
pro_data_full[cur_sample][cur_chrom][cur_pu] = cur_all
return pro_data_perf, pro_data_full
|
2029c23d33768fb72bc0fb20e6a3dae7d0338887
| 14,474
|
import os
import fnmatch
def find(pattern, path):
"""
Looks for files that match a pattern
Parameters
----------
pattern: str;
Pattern to look for.
path: str;
Where to look.
Returns
-------
result: list;
A list containing the files found.
"""
result = []
for root, _, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
|
668765169cd11da033565a20fa193af4524e3702
| 14,475
|
def get_r77_id(cookie):
"""
get a Room 77 id from a cookie
"""
try:
r77_id, _ = cookie.split('%3B') # URI-encoded semicolon
except ValueError:
raise ValueError('Invalid cookie') # no semicolon
return int(r77_id)
|
8513f1080eee73d342fcfee03df5f7927aca1197
| 14,476
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.