content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def get_html_name(form, name):
"""Return the name used in the html form for the given form instance and field name. """
return form.add_prefix(name)
|
8ae42f5abbcf9e8131b0edb6868414e1af5a29d8
| 18,388
|
def to_lower_camel_case(string):
"""
Converts string to lower camel case.
Args:
string (str): input string in any case
Returns:
str: string converted to lower camel case
Example:
>>> to_lower_camel_case('snake_case_string')
'snakeCaseString'
"""
components = string.split('_')
# lower first letter in the first component
if components[0]:
components[0] = components[0][0].lower() + components[0][1:]
# join other components with first capitalized first letter
return components[0] + ''.join(x.title() for x in components[1:])
|
6d8ba39e1de7fdc0453712d6bbc0221685163ad5
| 18,389
|
def parse_clf_kwargs(params):
"""Parse the classifier constructor keyword arguments specified from a list
of "<key>=<value>" strings. These values are typically supplied at the
command-line.
"""
def parse_val(s):
val = s
if s in set(("True", "False")):
val = (s == "True")
else:
for _type in (int, float):
try:
val = _type(s)
break
except ValueError:
continue
return val
clf_kwargs = {}
for k, s in (p.split("=") for p in params):
clf_kwargs[k] = parse_val(s)
return clf_kwargs
|
ad0f2925fc9be019c5edc1d97466316b77458c4b
| 18,390
|
def matrix_multiply(a, b):
"""
Multiply a matrix of any dimension by another matrix of any dimension.
:param a: Matrix as list of list
:param b: Matrix as list of list
:return:
"""
return [[sum(_a * _b for _a, _b in zip(a_row, b_col)) for b_col in zip(*b)] for a_row in a]
|
1357f93e91a511e2ecfb3b1c23aa08967c01220a
| 18,391
|
def ip_key(ip):
"""
Return an IP address as a tuple of ints.
This function is used to sort IP addresses properly.
"""
return tuple(int(part) for part in ip.split('.'))
|
69082fe54aae5b060cbc95b5290d73fdb2bf275b
| 18,392
|
def get_ordering_field(view, method):
""" If the APIs have the LIST method; for the view of LIST method, add the
Ordering field for the users.
"""
if 'list' in method and view.serializer_class:
model_fields = [field.name for field in view.queryset.model._meta.fields]
serializer_fields = [
field.source or field_name
for field_name, field in view.serializer_class().fields.items()
if not getattr(field, 'write_only', False)]
valid_fields = list(set(model_fields).intersection(set(serializer_fields)))
return valid_fields
else:
return None
|
2e83c1abc9a73551ad2402711da52d45ef3f7fac
| 18,393
|
def _ListCategories(node):
"""Returns the categories within a fetched (non-ndb) node."""
if node['categories']:
return node['categories'].replace(' ', '').split(',')
|
6ac7a7d59e0f3622ed1f12ccfb4e5867815bbb29
| 18,395
|
import os
def build_log_filenames(basename, path, max_number):
"""Create all of the logfile names."""
filenames = []
name = os.path.join(path, basename)
filenames.append(name)
for i in range(1, max_number):
filenames.append(name + "." + str(i))
return filenames
|
5cc0058bd0c19ebf0caff992d16414a0c6e67520
| 18,396
|
def df_move_column(df, column_name, new_location):
"""Move a dataframe column to a new location based on integer index
"""
df = df.copy()
columns = df.columns.tolist()
columns.insert(new_location, columns.pop(columns.index(column_name)))
return df[columns]
|
2b978af20f9cc8d89c91450e136e46947028f741
| 18,400
|
def beta2_mu(four_mass2_over_q2_):
"""Calculate β_μ^2"""
return 1.0 - four_mass2_over_q2_
|
64770e7e023fe6fff7a3391a8d6385f0cddd6dde
| 18,402
|
def is_component(param):
"""Rules of thumb to guess if a type annotation can be considered a component.
"""
type_ = param.annotation
return hasattr(type_, "parameters") or callable(type_)
|
c907a65144fec6b882d00367be2c8d6a8db5b5b7
| 18,405
|
import hashlib
def md5_for_file(filename, block_size=2**20):
"""
Calculates the MD5 of the given file. See `source <http://stackoverflow.com/questions/1131220/get-md5-hash-of-a-files-without-open-it-in-python>`_.
:param filename: The file to read in
:param block_size: How much of the file to read in at once (bytes).
Defaults to 1 MB
:returns: The MD5
"""
md5 = hashlib.md5()
f = open(filename,'rb')
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
return(md5)
|
a972b43c7fc15e92c897101b9bff23e0392152be
| 18,406
|
import math
def similar_distance(data, name1, name2):
"""
计算用户之间的相似度距离
:param data: 数据
:param name1: 用户1
:param name2: 用户2
:return:
"""
same_movie_name_dict = {}
for m, s in data[name2].items():
if m in data[name1]:
same_movie_name_dict[m] = 1
if len(same_movie_name_dict) == 0:
return 0
movie_set = set()
[movie_set.add(m) for m, s in data[name1].items()]
[movie_set.add(m) for m, s in data[name2].items()]
distance = 0
for movie in movie_set:
score_1 = 0
if movie in data[name1]:
score_1 = data[name1][movie]
score_2 = 0
if movie in data[name2]:
score_2 = data[name2][movie]
distance = distance + math.pow((score_1 - score_2), 2)
return 1 / (1 + math.sqrt(distance))
|
fda2cfd9ec30e7f0071af7e36ec9a6b1aa674f06
| 18,407
|
import os
def extract_info_from_annotation_filename(filename):
"""Extract info from annotations filename.
Args:
filename (str) : path to annotations filename or short representation
(e.g golf_FC_2021-04-22T07:03:36.892736Z_dynamic_objects_0)
Returns:
anno_info (list) : list with info decoded from filename
vehicle_id (str) : vehicle ID
lidar_ts (int) : lidar pointcloud timestamp in unix format
camera_ts (int) : camera frame timestamp in unix format
camera_name (str) : camera name
"""
parts = os.path.basename(filename).split(".")[0].split("_")
if len(parts) != 5:
raise ValueError(
f"Filename {filename} does not fit the format "
"<vehicle>_<camera_name>_<camera_unix_ts>_<annotation_project>_<sequence_id>"
)
try:
vehicle_id = parts[0]
camera_name = parts[1]
camera_ts = int(parts[2])
sequence_id = int(parts[4])
return vehicle_id, camera_name, camera_ts, sequence_id
except ValueError as error:
raise ValueError(
f"Filename {filename} does not fit the format "
"<vehicle>_<camera_name>_<camera_unix_ts>_<annotation_project>_<sequence_id>"
) from error
|
5bbccd456b8088e9fdd4bb509fda4e42d977a126
| 18,408
|
from pathlib import Path
def find_root_folder(start_file: Path):
"""
Find the root package folder from a file within the package
"""
# Get starting location
package_path = start_file if start_file.is_dir() else start_file.parent
# Check current location isn't a path
if not (package_path / "__init__.py").exists():
raise ValueError("File not part of a package")
# Walk up folders to find top level package path
while (package_path.parent / "__init__.py").exists():
package_path = package_path.parent
return package_path
|
c3e7e2af6d7ec40359ca30443b0323044314fe32
| 18,409
|
import os
def list_subdir(a_dir):
"""List immediate subdirectories of a_dir"""
# https://stackoverflow.com/a/800201
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
|
1bafea9eb25910b19d66be04c297fcdc2c430f80
| 18,410
|
from typing import List
def join_url_path(*components, join_empty: bool = False) -> str:
"""Join given URL path components and return absolute path starting with '/'."""
new_comps: List[str] = []
for comp in components:
comp = comp.strip("/")
if comp in ("", "."):
continue
else:
new_comps.append(comp)
if components and components[-1] == "" and join_empty:
trailing_slash = True
elif components and components[-1] == "/":
trailing_slash = True
else:
trailing_slash = False
if trailing_slash:
new_comps.append("")
path = "/".join(new_comps)
return "/" + path
|
70dcb0b941382d08158d2752bb5e94ffe33d456f
| 18,413
|
def paginator_page_list(number, page_range):
"""获得分页展示的页面范围
number -- 当前页码
"""
PAGE_SHOW = 7
number = int(number)
if len(page_range) <= PAGE_SHOW:
return page_range
else:
if number <= 4:
return page_range[0:PAGE_SHOW]
elif (len(page_range) - number) < 4:
return page_range[(len(page_range) - PAGE_SHOW):len(page_range)]
else:
return page_range[(number - 4):(number + 3)]
|
0d90bace05a9815c8222ca32994acd22514052e4
| 18,414
|
def praw_settings(settings, cassette_exists):
"""Settings needed to use Api client"""
if cassette_exists:
settings.OPEN_DISCUSSIONS_REDDIT_CLIENT_ID = "client_id"
settings.OPEN_DISCUSSIONS_REDDIT_SECRET = "secret"
settings.OPEN_DISCUSSIONS_REDDIT_URL = "https://reddit.local"
settings.OPEN_DISCUSSIONS_REDDIT_VALIDATE_SSL = False
settings.OPEN_DISCUSSIONS_CHANNEL_POST_LIMIT = 25
return settings
|
ee3f71b902412b392510d59fe9cef7bd0b47d711
| 18,415
|
import numpy
def color_convert(orig, convert_fn):
"""
convert_fn should be a function that accepts a 3-tuple of the color
in original color space and returns a 3-tuple in new color space.
"""
n = orig.shape[1]
new = numpy.zeros((3, n))
for i in range(n):
point = orig[:, i]
new_point = convert_fn(point[0], point[1], point[2])
new[:, i] = new_point
return new
|
e7768e35a766d7f7c4d80b511ac293b250914d92
| 18,416
|
import os
def generate_random_long(signed=True):
"""Generates a random long integer (8 bytes), which is optionally signed"""
return int.from_bytes(os.urandom(8), signed=signed, byteorder='little')
|
28185c141012b4fc1f3c7b9b7fdfdacf5d1134b1
| 18,417
|
def keep_diversity(population, generate_chromosome, min_length_chromosome, max_length_chromosome, possible_genes,
repeated_genes_allowed, check_valid_chromosome):
""" This function is called when it is wanted to do a great emphasis in the diversity of the population. When an individual is repeated in the population it is substituted by a completely new and randomly generated individual. As well the worst 25% of the population is substituted by completely new and random individuals. Note that before calling this function the population MUST be sorted by fitness.
:param population: (list of Individuals) Sorted population by fitness (from best to worst).
:param generate_chromosome: (function) Function to generate a new chromosome.
:param min_length_chromosome: (int) Minimum allowed length of the chromosome.
:param max_length_chromosome: (int) Maximum allowed length of the chromosome.
:param possible_genes: (list of ...) List with the all the possible values that the genes can take.
:param repeated_genes_allowed: (bool) It is a boolean that indicates whether the genes can be repeated in the chromosome (repeated_genes_allowed = 1) or they cannot be repeated (repeated_genes_allowed = 0).
:param check_valid_chromosome: (function) Function that receives the chromosome and returns True if it creates a valid individual and False otherwise.
:return:
(list of chromosomes) List of chromosomes that will represent the next generation.
"""
new_population = [population[0].chromosome] # List that will hold the new population
list_chromosomes = [ind.chromosome for ind in population] # List with all the chromosomes
copy_list_chromosomes = list_chromosomes.copy()
try:
for i in range(len(list_chromosomes)):
chrom = list_chromosomes[i]
if len(chrom):
if type(chrom[0]) == int or type(chrom[0]) == float or type(chrom[0]) == str or type(chrom[0]) == chr:
chrom.sort() # So repeated individuals are correctly eliminated
elif type(chrom[0]) == dict:
list_chromosomes[i] = sorted(chrom, key=lambda i: (list(i.keys())[0], list(i.values())[
0])) # Not 100% sure it will work and sort uniquely the individuals, but it may do the trick
except:
print('Sorting error in keep_diversity.')
list_chromosomes = copy_list_chromosomes.copy() # If some error in the sorting, just copy the original list_chromosomes
for i in range(1, int(len(list_chromosomes) / 4)):
if population[i].chromosome in list_chromosomes[:i]:
new_ind = generate_chromosome(min_length_chromosome, max_length_chromosome, possible_genes,
repeated_genes_allowed) # Generate a new individual
while not check_valid_chromosome(new_ind):
new_ind = generate_chromosome(min_length_chromosome, max_length_chromosome, possible_genes,
repeated_genes_allowed) # Check if the new individual is valid
new_population.append(new_ind)
else:
new_population.append(population[i].chromosome)
while len(new_population) < len(
population): # Add new randomly generated individuals in substitution to the worst 25%.
new_ind = generate_chromosome(min_length_chromosome, max_length_chromosome, possible_genes,
repeated_genes_allowed) # Generate a new individual
if check_valid_chromosome(new_ind):
new_population.append(new_ind)
return new_population
|
bd115ce69d78a7c0e39fdd92d6685c096e394d1b
| 18,418
|
import importlib
def get_simulator_api(api, reload=False):
""" Get the BioSimulators API for a simulator
Args:
api (:obj:`str`): module which implements the API for the simulator
reload (:obj:`bool`, optional): whether to reload the API
Returns:
:obj:`types.ModuleType`
"""
module = importlib.import_module(api)
if reload:
importlib.reload(module)
return module
|
088b129ce31d246af4d85800d0192ee3cf44092e
| 18,419
|
def compute_chain_x_axis(obj, bone_names):
"""
Compute the x axis of all bones to be perpendicular
to the primary plane in which the bones lie.
"""
eb = obj.data.edit_bones
assert(len(bone_names) > 1)
first_bone = eb[bone_names[0]]
last_bone = eb[bone_names[-1]]
# Compute normal to the plane defined by the first bone,
# and the end of the last bone in the chain
chain_y_axis = last_bone.tail - first_bone.head
chain_rot_axis = first_bone.y_axis.cross(chain_y_axis)
if chain_rot_axis.length < first_bone.length/100:
return first_bone.x_axis.normalized()
else:
return chain_rot_axis.normalized()
|
fc0ccc25c8e3ac964b5e9dedd5be4d752f08b5d3
| 18,420
|
def find_best_row(m):
"""
This function finds the list | row that has more 0 in the matrix to
make less calculus in the adjuct method.
@params:
m => a matrix to find best row
@return:
res[0] => the number of the best row
Alejandro AS
"""
res = [0, 0] # [best_row, amount of 0]
for i in range(len(m)):
count = 0
for j in range(len(m[i])):
if m[i][j] == 0:
count += 1
if res[1] < count:
res = [i, count]
return res[0]
|
b949c935c0917809f4938249e9f8d0f1b666c84d
| 18,421
|
from typing import Union
from typing import Dict
def make_rules(rules: Union[Dict[str, str], str]) -> Dict[str, str]:
"""Creates rules dict."""
if isinstance(rules, str):
split = rules.split()
rules = dict(zip(split[::2], split[1::2]))
return rules
|
0f6bc31d5a3e2c569d62fe4a64c03a5f8bf5bed6
| 18,422
|
import time
import socket
def recv_timeout(the_socket, timeout=1):
""" Socket read method """
the_socket.setblocking(0)
total_data = []
data = ''
begin = time.time()
while True:
# if you got some data, then break after wait sec
if total_data and time.time() - begin > timeout:
break
# if you got no data at all, wait a little longer
elif time.time() - begin > timeout * 2:
break
try:
data = the_socket.recv(1024) # 8192
if data:
total_data.append(data.decode('utf-8'))
begin = time.time()
else:
time.sleep(0.1)
except socket.error as e:
if not e.errno == 11:
raise
return ''.join(total_data)
|
c568ca203e550ada5e8b36809793e1b14e3dba70
| 18,425
|
def determine_travel_modes(drive_time, transit_time, plane_time):
"""based on the time it would take to travel from one spot to another with each mode of
transportation, only add reasonable modes of transportation to the travel dictionary."""
travel = {}
if(drive_time < 24):
travel["drive"] = drive_time
if(transit_time < 10):
travel["transit"] = transit_time
if(plane_time > 2):
travel["plane"] = plane_time
return travel
|
c40e99d02a804fc963b8fe041505fcd648bbcf2d
| 18,426
|
def list_intersection(lst1, lst2):
"""
Intersection of two lists.
From:
https://stackoverflow.com/questions/3697432/how-to-find-list-intersection
Using list comprehension for small lists and set() method with builtin
intersection for longer lists.
Parameters
----------
lst1, lst2 : list
Python lists
Returns
-------
list
List with common elements in both input lists.
Examples
--------
>>> lst1 = [ 4, 9, 1, 17, 11, 26, 28, 28, 26, 66, 91]
>>> lst2 = [9, 9, 74, 21, 45, 11, 63]
>>> print(Intersection(lst1, lst2))
[9, 11]
"""
if (len(lst1) > 10) or (len(lst2) > 10):
return list(set(lst1).intersection(lst2))
else:
return [ ll for ll in lst1 if ll in lst2 ]
|
bc9a416dd4fb4d143308c95407ef7f8b5ce52fc0
| 18,427
|
def deriv_lorenz(X, t, sigma, beta, rho):
"""The Lorenz equations."""
x, y, z = X
dx_dt = -sigma*(x - y)
dy_dt = rho*x - y - x*z
dz_dt = -beta*z + x*y
return dx_dt, dy_dt, dz_dt
|
5eb5aa5640223fe5e92667f5a87d53a5f55d7370
| 18,428
|
def checkSafe(board, x, y, turn):
"""Check whether a position is safe, returns true if safe"""
tmp=True
count=1
while count<len(board)-1-y and count<len(board)-1-x and tmp==True:#Checks SE
if board[y-1+count][x-1+count]!='OO':
if board[y-1+count][x-1+count][0]!=turn:
if board[y-1+count][x-1+count][1] in 'BQ':
tmp=False
elif count==1:
if board[y-1+count][x-1+count][1]=='K':
tmp=False
elif turn=='B' and board[y-1+count][x-1+count][1]=='P':
tmp=False
break
count+=1
count=1
while count<len(board)-1-y and count<x and tmp==True:#Checks SW
if board[y-1+count][x-1-count]!='OO':
if board[y-1+count][x-1-count][0]!=turn:
if board[y-1+count][x-1-count][1] in 'BQ':
tmp=False
elif count==1:
if board[y-1+count][x-1-count][1]=='K':
tmp=False
elif turn=='B' and board[y-1+count][x-1-count][1]=='P':
tmp=False
break
count+=1
count=1
while count<y and count<len(board)-1-x and tmp==True:#Checks NE
if board[y-1-count][x-1+count]!='OO':
if board[y-1-count][x-1+count][0]!=turn:
if board[y-1-count][x-1+count][1] in 'BQ':
tmp=False
elif count==1:
if board[y-1-count][x-1+count][1]=='K':
tmp=False
elif turn=='W' and board[y-1-count][x-1+count][1]=='P':
tmp=False
break
count+=1
count=1
while count<y and count<x and tmp==True:#Checks NW
if board[y-1-count][x-1-count]!='OO':
if board[y-1-count][x-1-count][0]!=turn:
if board[y-1-count][x-1-count][1] in 'BQ':
tmp=False
elif count==1:
if board[y-1-count][x-1-count][1]=='K':
tmp=False
elif turn=='W' and board[y-1-count][x-1-count][1]=='P':
tmp=False
break
count+=1
count=1
while count<x and tmp==True:#Checks W
if board[y-1][x-1-count]!='OO':
if board[y-1][x-1-count][0]!=turn:
if board[y-1][x-1-count][1] in 'RQ':
tmp=False
elif count==1:
if board[y-1][x-1-count][1]=='K':
tmp=False
break
count+=1
count=1
while count<len(board)-1-x and tmp==True:#Checks E
if board[y-1][x-1+count]!='OO':
if board[y-1][x-1+count][0]!=turn:
if board[y-1][x-1+count][1] in 'RQ':
tmp=False
elif count==1:
if board[y-1][x-1+count][1]=='K':
tmp=False
break
count+=1
count=1
while count<y and tmp==True:#Checks N
if board[y-1-count][x-1]!='OO':
if board[y-1-count][x-1][0]!=turn:
if board[y-1-count][x-1][1] in 'RQ':
tmp=False
elif count==1:
if board[y-1-count][x-1][1]=='K':
tmp=False
break
count+=1
count=1
while count<len(board)-1-y and tmp==True:#Checks S
if board[y-1+count][x-1]!='OO':
if board[y-1+count][x-1][0]!=turn:
if board[y-1+count][x-1][1] in 'RQ':
tmp=False
elif count==1:
if board[y-1+count][x-1][1]=='K':
tmp=False
break
count+=1
xCount=-2
while xCount<=2 and tmp==True:#Checks horses
if xCount!=0:
yCount=-2
while yCount<=2 and tmp==True:
if yCount!=0 and xCount!=yCount and y+yCount in range(1, len(board)) and x+xCount in range(1, len(board)):
if board[y-1+yCount][x-1+xCount][0]!=turn and board[y-1+yCount][x-1+xCount][1]=='H':
tmp=False
yCount+=1
xCount+=1
return tmp
|
217697cebad58bddde3c3f083b296dcce096d8a0
| 18,429
|
def inc(self, register: int) -> int:
"""
Name: Increment index register.
Function: The 4 bit content of the designated index register is
incremented by 1.
The index register is set to zero in case of overflow.
Syntax: INC <register>
Assembled: 0110 <RRRR>
Symbolic: (RRRR) +1 --> RRRR
Execution: 1 word, 8-bit code and an execution time of 10.8 usec.
Side-effects: The carry bit is not affected.
"""
self.increment_register(register)
self.increment_pc(1)
return self.REGISTERS[register]
|
727d3b5ae8a2f8a8c31c1ea8ec9cdb0af799d4f3
| 18,430
|
def hook_get_load_tx_query(table: str) -> str:
"""Returns the query that loads the transactions to aggregate.
It loads all customer transactions and aggreagets it into a single row, so
they are prepared for prediction. Ideally it's loading from the table
BQ_LTV_ALL_PERIODIC_TX_TABLE with the suffix corresponding to a specific date.
Args:
table: A string representing the full path of the BQ table where the periodic
transactions are located. This table is the BQ_LTV_ALL_PERIODIC_TX_TABLE with
the suffix corresponding to a specific date. It usually has multiple lines
per customer.
Returns:
A string with the query.
Example:
query = '''
SELECT
date as date
,time as time
,clientId as clientId
,cookieClientId as cookieClientId
,timeOnSite as timeOnSite
,deviceCategory as deviceCategory
,mobileDeviceBranding as mobileDeviceBranding
,operatingSystem as operatingSystem
,browser as browser
,parentTransactionId as parentTransactionId
,productSKU as productSKU
,productName as productName
,productCategory as productCategory
,category AS category
,productQuantity as productQuantity
,productBrand as productBrand
,productPrice as productPrice
,productRevenue as productRevenue
,productCouponCode as productCouponCode
,productVariant as productVariant
,cm.index as cmIndex
,cm.value as cmValue
,city
,gclid
,channelGrouping
,campaignId
FROM `{0}`
'''
return query.format(table)
"""
del table # Unused by default
return ""
|
940582234455274e2583599863d925f128c80982
| 18,432
|
def longest_in_list( l:list ) ->str:
"""
Returns the longest item's string inside a list
"""
longest :str = ''
for i in range( len(l) ):
if len( str(l[i]) ) > len(longest):
longest = l[i]
return longest
|
43de1bd7a237b336cdb5cb90eb7ecaa6663be2de
| 18,433
|
import logging
def add_console_handler(formatter, level=logging.INFO):
"""Creates and returns a handler that streams to the console at the logging level specified"""
ch = logging.StreamHandler()
ch.setFormatter(formatter)
ch.setLevel(level)
return ch
|
2bfdfe427ea32ed206f36f78a105437b98014f67
| 18,434
|
from typing import Dict
def create_query_string(query_dict: Dict[str, str]):
"""Create query string with dictionary"""
query_string = '?'
for key, item in query_dict.items():
query_string += '{}={}&'.format(key, item)
return query_string[:-1]
|
a83ded8a5cc516ea79547c161e84fc4caec4fe71
| 18,435
|
def root():
"""
(0, 1, 2) The most basic web app in the world.
"""
# print(request.headers)
# name = request.args.get('name')
# vp = float(request.args.get('vp') or 0)
# rho = float(request.args.get('rho') or 0)
# return "Impedance: {}".format(vp * rho)
return "Hello world"
|
ff36b8ac37bab9adfd72f9fd476d5ede0d419411
| 18,436
|
def PosNegZero(x):
""" Teste de tricotomia com apenas uma variável;
A função retorna uma resposta dizendo se um número inteiro é positivo, negativo ou zero;
int -> str """
if x>0:
return str(x) + " e positivo"
elif x<0:
return str(x) + " e negativo"
else:
return str(x) + " e zero"
|
3c363bdaa3618b670c3388b97f7b23f6fb2c5b59
| 18,439
|
from typing import Any
from contextlib import suppress
def cursorless_surrounding_pair(m) -> dict[str, Any]:
"""Expand to containing surrounding pair"""
try:
surrounding_pair_scope_type = m.cursorless_surrounding_pair_scope_type
except AttributeError:
surrounding_pair_scope_type = "any"
scope_type = {
"type": "surroundingPair",
"delimiter": surrounding_pair_scope_type,
}
with suppress(AttributeError):
scope_type["forceDirection"] = m.cursorless_delimiter_force_direction
return {
"type": "containingScope",
"scopeType": scope_type,
}
|
3e4f8013386bbc36a26b31f3ea50aa6209fe3d02
| 18,441
|
def change_coords(h, coords):
"""
updates coords of home based on directions from elf
"""
if h == '^':
coords[1] += 1
elif h == '>':
coords[0] += 1
elif h == 'v':
coords[1] -= 1
elif h == '<':
coords[0] -= 1
return coords
|
d576a10072d1fa792baccb35795134e1faf4df9e
| 18,443
|
def expected(decorator, func):
""" Decorate ``func`` with ``decorator`` if ``func`` is not wrapped yet. """
return decorator(func) if not hasattr(func, '_api') else func
|
b2ea15907529245e50f5f0181e30b12706fa5f1e
| 18,444
|
def pytest_ignore_collect(path, config):
""" Only load tests from feature definition file. """
if path.ext != ".toml":
return True
return False
|
80d193ff28a7f2f903ec5d4dd09d13973e066dcf
| 18,446
|
def _resolve_role(current_user, profile):
"""What is the role of the given user for the given profile."""
if current_user:
if profile.user == current_user:
return "OWNER"
elif current_user.is_authenticated:
return "ADMIN"
else:
return "ANONYMOUS"
else:
return "SYSTEM"
|
627f39af34bab4a1fab386496042261a45af77d8
| 18,447
|
from typing import Optional
import warnings
def process_max_id(max_id: Optional[int], num_embeddings: Optional[int]) -> int:
"""Normalize max_id."""
if max_id is None:
if num_embeddings is None:
raise ValueError("Must provide max_id")
warnings.warn("prefer using 'max_id' over 'num_embeddings'", DeprecationWarning)
max_id = num_embeddings
elif num_embeddings is not None and num_embeddings != max_id:
raise ValueError("Cannot provide both, 'max_id' over 'num_embeddings'")
return max_id
|
44d09cd6101cb9037529b831f82969da6c40ce63
| 18,448
|
import math
def circle_touching_line(center, radius, start, end):
""" Return true if the given circle intersects the given segment. Note
that this checks for intersection with a line segment, and not an actual
line.
:param center: Center of the circle.
:type center: Vector
:param radius: Radius of the circle.
:type radius: float
:param start: The first end of the line segment.
:type start: Vector
:param end: The second end of the line segment.
:type end: Vector
"""
C, R = center, radius
A, B = start, end
a = (B.x - A.x)**2 + (B.y - A.y)**2
b = 2 * (B.x - A.x) * (A.x - C.x) \
+ 2 * (B.y - A.y) * (A.y - C.y)
c = C.x**2 + C.y**2 + A.x**2 + A.y**2 \
- 2 * (C.x * A.x + C.y * A.y) - R**2
discriminant = b**2 - 4 * a * c
if discriminant < 0:
return False
elif discriminant == 0:
u = v = -b / float(2 * a)
else:
u = (-b + math.sqrt(discriminant)) / float(2 * a)
v = (-b - math.sqrt(discriminant)) / float(2 * a)
if u < 0 and v < 0: return False
if u > 1 and v > 1: return False
return True
|
51dd2c4d9f07bb68e326a7ea1d2c25e65fe93513
| 18,453
|
def file_loader(filepath):
"""Reads in file and returns data"""
with open(filepath, "r") as file_descriptor:
data = file_descriptor.read()
return data
|
6890f239b0a24864893a1e8673326e8297ec2476
| 18,455
|
import functools
import operator
def prod(collection):
"""Product of all elements in the collection"""
return functools.reduce(operator.mul, collection)
|
225c9d437e1ade873de26bb6ef6b157daa3545a0
| 18,457
|
def user_defined_descriptions(path):
"""Returns a dict consisting of (unicode_char, description) tuples"""
try:
lines = [line.rstrip() for line in open(path).readlines()]
return dict([x.split(maxsplit=1) for x in lines])
except FileNotFoundError:
return dict()
|
b79e54ead84b0e3ccae08d1e6efaf18924122e63
| 18,458
|
async def infer_type_return_(engine, x):
"""Infer the return type of return_."""
return await x['type']
|
10b9041df4b79160df5f6604f8017eb1d60c952e
| 18,459
|
def relative_article_path_from(article_root, absolute_path_to_article) -> str:
"""Do three things: strip out the article root, remove the preceding slash,
and remove the extension.
"""
return (
absolute_path_to_article.replace(article_root, "").lstrip("/").rsplit(".", 1)[0]
)
|
13afb29a3c91f60b358a3c471c3f4b9fbb861185
| 18,460
|
import re
def get_tendermint_version():
"""Extracts the current Tendermint version from version/version.go"""
pattern = re.compile(r"TMCoreSemVer = \"(?P<version>([0-9.]+)+)\"")
with open("version/version.go", "rt") as version_file:
for line in version_file:
m = pattern.search(line)
if m:
return m.group('version')
return None
|
8147eb5e26c0b6675087d2801f5b7ad2b64df9bf
| 18,463
|
def dummy_database_injection_manager(dummy_database_injection_bindings, injection_manager):
"""Fixture for context manager with client and db injection."""
def _inner(client):
return injection_manager(dummy_database_injection_bindings({"bindings": {}, "constructor_bindings": {}}))
return _inner
|
c49166208f85bfe22374e5eba51b62910866f4e3
| 18,464
|
import torch
def to_device(obj, device, non_blocking=False):
"""Copy to device."""
if isinstance(obj, torch.Tensor):
return obj.to(device, non_blocking=non_blocking)
if isinstance(obj, dict):
return {k: to_device(v, device, non_blocking=non_blocking)
for k, v in obj.items()}
if isinstance(obj, list):
return [to_device(v, device, non_blocking=non_blocking)
for v in obj]
if isinstance(obj, tuple):
return tuple([to_device(v, device, non_blocking=non_blocking)
for v in obj])
|
c1128db2473000b03e95ec80d57168b0d56de0ba
| 18,465
|
def step(request):
"""step keyword argument for rolling window operations."""
return request.param
|
19633e9cbc493f96b898a880486595551fbcfccb
| 18,466
|
def is_increasing(channel_indices):
"""Check if a list of indices is sorted in ascending order.
If not, we will have to convert it to a numpy array before slicing,
which is a rather expensive operation
Returns: bool
"""
last = channel_indices[0]
for i in range(1, len(channel_indices)):
if channel_indices[i] < last:
return False
last = channel_indices[i]
return True
|
5ca1656169d24d2427ac0d52bf467162bed41f58
| 18,467
|
def beautify_name(name: str):
"""Return human readable string."""
return name.replace("_", " ").title()
|
7b50a9bf63cca7bbf343f354f4e3123b49204d03
| 18,468
|
import os
import sys
import subprocess
def rm(pool_path, name, network=False):
"""
Remove a container a clean its namespace
:param pool_path: The path to the pool where the container is stored
:param name: The name of the container
:param network: Whether the container has a network interface attached
:return: True if the container was successfully removed
"""
container_path = os.path.join(pool_path, name)
if not os.path.exists(container_path):
print("Error: This container could not be found in path %s" % container_path, file=sys.stderr)
return False
namespace_path = os.path.join(container_path, "namespace.pid")
with open(namespace_path, 'r') as f:
namespace_pid = f.readline().strip()
f.close()
if network:
namespace_command = "/usr/bin/nsenter -t %s --user --mount --net sh -c" % namespace_pid
else:
namespace_command = "/usr/bin/nsenter -t %s --user --mount sh -c" % namespace_pid
scope_command = "/usr/bin/systemd-run --user --scope --quiet %s" % namespace_command
args = scope_command.split(" ")
args.append("chown -R root:root %s" % container_path)
output = subprocess.run(args=args, stdout=subprocess.PIPE, stderr=sys.stderr)
if output.returncode != 0:
print("Error: while trying change ownership of container files", file=sys.stderr)
return None
command = "kill %s" % namespace_pid
output = subprocess.run(args=command.split(" "), stdout=subprocess.PIPE)
if output.returncode != 0:
print("Error: While cleaning namespace", file=sys.stderr)
return False
command = "rm -rf %s" % container_path
output = subprocess.run(args=command.split(" "), stdout=subprocess.PIPE)
if output.returncode != 0:
print("Error: While cleaning rootfs", file=sys.stderr)
return False
return True
|
693c632cfd2b630ee9e99e21c1ac8c49a3ed5145
| 18,469
|
def limit(self, start_or_stop=None, stop=None, step=None):
"""
Create a new table with fewer rows.
See also: Python's builtin :func:`slice`.
:param start_or_stop:
If the only argument, then how many rows to include, otherwise,
the index of the first row to include.
:param stop:
The index of the last row to include.
:param step:
The size of the jump between rows to include. (`step=2` will return
every other row.)
:returns:
A new :class:`.Table`.
"""
if stop or step:
s = slice(start_or_stop, stop, step)
else:
s = slice(start_or_stop)
rows = self._rows[s]
if self._row_names is not None:
row_names = self._row_names[s]
else:
row_names = None
return self._fork(rows, row_names=row_names)
|
b101ed9eba1b5771b7acbd555ae41c4365cea1d3
| 18,471
|
import importlib
def load_encoder(name):
"""
Creates an instance of the given encoder.
An encoder converts a message from one format to another
"""
encoder_module = importlib.import_module(name)
encoder_class = getattr(encoder_module, "Encoder")
# Return an instance of the class
return encoder_class()
|
7ea1125dc5be2387eefaf3690386e317f5f4558f
| 18,474
|
def format_datetime_iso(obj):
"""Return datetime obj ISO formatted."""
return obj.strftime('%Y-%m-%dT%H:%M:%S')
|
88d87a81d387f7dab906b2a9775208a1b82b3fce
| 18,475
|
def _verbosity_from_log_level(level):
"""Get log level from verbosity."""
if level == 40:
return 0
elif level == 20:
return 1
elif level == 10:
return 2
|
da3333c218eebdb380dc3df6869d299c485a428e
| 18,476
|
def _hasprefix(line, prefixes):
""" helper prefix test """
# if not isinstance(prefixes, tuple):
# prefixes = [prefixes]
return any(line == p or line.startswith(p + ' ') for p in prefixes)
|
bef141696e6385261545d30da527db8bfc4ad7cb
| 18,477
|
import os
import subprocess
def gen_func_anat_xfm(func_, ref_, xfm_, interp_):
"""Transform functional file (std dev) into anatomical space.
Parameters
----------
func_ : string
functional scan
ref_ : string
path to reference file
xfm_ : string
path to transformation mat file
interp_ : string
interpolation measure string
Returns
-------
new_fname : string
path to the transformed scan
"""
new_fname = os.path.join(os.getcwd(), 'std_dev_anat.nii.gz')
cmd = ['applywarp', '--ref={0}'.format(ref_), '--in={0}'.format(func_),
'--out={0}'.format(new_fname), '--premat={0}'.format(xfm_),
'--interp={0}'.format(interp_)]
retcode = subprocess.check_output(cmd)
return new_fname
|
8cdd35eca9d8a57fbb2f65c0a6efd2c1ddac8962
| 18,478
|
import requests
def get_external_ip():
"""Get your external IP address as string.
Uses httpbin(1): HTTP Request & Response Service
"""
return requests.get("http://httpbin.org/ip").json().get('origin')
|
525e23d76d44a94f3dce155dade4146c3d6d0b24
| 18,479
|
import tempfile
def _nipype_execution_config(
stop_on_first_crash=False, stop_on_first_rerun=False, crashdumpTempDirName=None
):
"""
This Function takes in...
:param stop_on_first_crash:
:param stop_on_first_rerun:
:param crashdumpTempDirName:
:return:
"""
stop_crash = "false"
stop_rerun = "false"
if stop_on_first_crash:
stop_crash = "true"
if stop_on_first_rerun:
# This stops at first attempt to rerun, before running, and before deleting previous results
stop_rerun = "true"
if crashdumpTempDirName is None:
crashdumpTempDirName = tempfile.gettempdir()
print("*** Note")
print((" Crash file will be written to '{0}'".format(crashdumpTempDirName)))
return {
"stop_on_first_crash": stop_crash,
"stop_on_first_rerun": stop_rerun,
"hash_method": "timestamp", # default
"single_thread_matlab": "true", # default # Multi-core 2011a multi-core for matrix multiplication.
# default # relative paths should be on, require hash update when changed.
"use_relative_paths": "false",
"remove_node_directories": "false", # default
"remove_unnecessary_outputs": "true", # remove any interface outputs not needed by the workflow
"local_hash_check": "true", # default
"job_finished_timeout": 25,
"crashdump_dir": crashdumpTempDirName,
}
|
224394b2b15cbdd7b41bcc041faa4f592049dbad
| 18,481
|
def convert_BSoup__to_text(soup):
"""
THis function converts the BeautifulSoup object into string using a generator
"""
return ''.join([str(x) for x in soup.findAll(text=True)])
|
74a07110e0bbad0ee4a19354037f75816fc0e2e3
| 18,482
|
import os
def first_launch():
""" Simply checks if there is diary file. If not, returns True. """
if os.path.exists('diary.db'):
return False
else:
return True
|
5c217725be2f72214280e200a64c694b086409a5
| 18,483
|
def interp_azimuth(az, az_0, az_1, dat_0, dat_1, fillvalue=-999.):
"""
在两个方位角或者距离之间进行插值
"""
if (dat_0 != fillvalue) and (dat_1 != fillvalue):
return ((az_1 - az)*dat_0 + (az - az_0) * dat_1)/(az_1 - az_0)
elif dat_0 == fillvalue:
return dat_1
else:
return dat_0
|
45676cebc7ebe37d4dd176ff86811b53bdee723d
| 18,484
|
import json
import random
import time
from typing import Union
import pathlib
def save_json_file(json_file: Union[str, pathlib.Path], dictionary_to_save: dict, retries: int = 3) -> None:
"""
Writes a new JSON file to disk. If the file exists, it will be overwritten.
:param json_file: JSON file to write into
:param dictionary_to_save:
:param retries: If file is locked for any reason, retry writing this number of times
:return: None
"""
assert isinstance(retries, int), "Retries parameter must be an integer"
assert retries >= 0, "Retries must be a positive integer"
assert isinstance(json_file, (str, pathlib.Path)), "json_file must be a pathlib.Path() or a string path"
file_path = pathlib.Path(json_file)
while retries >= 0:
retries -= 1
try:
with file_path.open("w") as file:
return json.dump(dictionary_to_save, file, ensure_ascii=False)
except PermissionError:
wait_time = random.random()
time.sleep(wait_time)
raise PermissionError(f"Permission issue while writing JSON: {file_path}")
|
6145c3b8d68bcdeaa5db9ec7771eebcdb65461ab
| 18,485
|
def remove_stop_words(lines, stop_words):
"""
Remove stop words from segments
:param lines: list of segments [id, word, word..]
:param stop_words: list of stop words (from spacy)
:return: list of segments with stop_words removed
"""
filtered_lines = []
for line in lines:
if len(line) < 2:
print('empty line')
filtered_lines.append(line)
continue
id = [line[0]]
tokens = line[1:]
filtered_tokens = [token for token in tokens if token not in stop_words]
if '404e' in filtered_tokens:
print(filtered_tokens)
filtered_line = id + filtered_tokens
filtered_lines.append(filtered_line)
return filtered_lines
|
a97f69a159ba5f7c5ea247e8d136f497f578349c
| 18,486
|
def convert_empty_value_to_none(event, key_name):
""" Changes an empty string of "" or " ", and empty list of [] or an empty dictionary of {} to None so it will be NULL in the database
:param event: A dictionary
:param key_name: The key for which to check for empty strings
:return: An altered dictionary
Examples:
.. code-block:: python
# Example #1
event = {'a_field': ' '}
event = convert_empty_value_to_none(event, key_name='a_field')
event = {'a_field': None}
# Example #2
event = {'a_field': '{}'}
event = convert_empty_value_to_none(event, key_name='a_field')
event = {'a_field': None}
# Example #3
event = {'a_field': {}}
event = convert_empty_value_to_none(event, key_name='a_field')
event = {'a_field': None}
"""
if key_name in event:
if type(event[key_name]) == str and (event[key_name] == '' or event[key_name].strip() == '' or event[key_name] == '{}' or event[key_name] == '[]'):
event[key_name] = None
# Converts an empty list or dictionary to None
if not event[key_name]:
event[key_name] = None
return event
|
075b6fb14f22e201392539623454e0166b4c7448
| 18,490
|
def build_env_file(conf):
"""
Construct the key=val string from the data structurr.
Parameters
----------
conf : dict
The key value's
Returns
-------
str
The key=val strings.
"""
return "\n".join(['{}={}'.format(k, v) for k, v in conf.items()])
|
aa6dca869becec055392fef010e504559f276bb7
| 18,492
|
import os
def setup_entries_map(username):
"""
This creates a map of genres to giveaways that have been previously entered in that genre.
Crawls through the existing log files and then populates the maps with the corresponding
information.
:param username: The current user.
:return: A map containing all the previously entered giveaways for the user.
"""
if not os.path.exists("logs"):
os.makedirs("logs")
entries_map = {}
if not os.path.exists("logs/" + username):
os.makedirs("logs/" + username)
if not os.path.exists("logs/" + username + "/successful"):
os.makedirs("logs/" + username + "/successful")
if not os.path.exists("logs/" + username + "/failure"):
os.makedirs("logs/" + username + "/failure")
for log_folder in os.listdir("logs/" + username):
for log_file in os.listdir("logs/" + username + "/" + log_folder):
for line in open("logs/" + username + "/" + log_folder + "/" + log_file):
line = line.replace("\n", "")
giveaway = line[:line.find(",")]
genre = log_file[:-4]
if genre in entries_map:
entries_map[genre].append(giveaway)
else:
entries_map[genre] = []
return entries_map
|
79171346a5c6e42c0620bc1ad06d572cf1b15a45
| 18,494
|
import hashlib
def get_md5(str_):
"""
hash function --md5
:param str_:origin str
:return:hex digest
"""
md5 = hashlib.md5()
md5.update(str_.encode('utf-8'))
return md5.hexdigest()
|
fb905d673ac7407fcaa3f70a822620dd38dbb5e6
| 18,495
|
def jsontopath_metric(path_metric):
""" a functions that reads resulting metric from json string
"""
output_snr = next(e['accumulative-value']
for e in path_metric if e['metric-type'] == 'SNR-0.1nm')
output_snrbandwidth = next(e['accumulative-value']
for e in path_metric if e['metric-type'] == 'SNR-bandwidth')
output_osnr = next(e['accumulative-value']
for e in path_metric if e['metric-type'] == 'OSNR-0.1nm')
# ouput osnr@bandwidth is not used
# output_osnrbandwidth = next(e['accumulative-value']
# for e in path_metric if e['metric-type'] == 'OSNR-bandwidth')
power = next(e['accumulative-value']
for e in path_metric if e['metric-type'] == 'reference_power')
path_bandwidth = next(e['accumulative-value']
for e in path_metric if e['metric-type'] == 'path_bandwidth')
return output_snr, output_snrbandwidth, output_osnr, power, path_bandwidth
|
e2fadb9e089b727ee99c6aea2a9819396f088dfa
| 18,496
|
def stringify_value(value):
"""Convert any value to string.
"""
if value is None:
return u''
isoformat = getattr(value, 'isoformat', None)
if isoformat is not None:
value = isoformat()
return type(u'')(value)
|
47c3939f06a667eb8e5f8951be827ea1dff325b7
| 18,497
|
def add_to_config(d, config):
"""Add attributes to config class."""
for k, v in d.iteritems():
if isinstance(v, list) and len(v) == 1:
v = v[0]
setattr(config, k, v)
return config
|
85ea7a17266ab996644b70cc9093cba21fb2f427
| 18,498
|
def _anz_spalt(self):
"""Spaltenanzahl"""
return self.cols
|
e570afdf074d4e71bcb3deb418acdeb606dfba51
| 18,499
|
def create_windows(data):
"""Create windows feature."""
data['Infissi'] = (data['Altre_caratteristiche']
.apply(lambda x: str([y for y in x if 'Infissi' in y]))
.str.extract('(doppio|triplo)', expand=False)
.fillna('singolo'))
return data
|
baef67bdfd9b4e98db1716b4d12b4924724c7a20
| 18,501
|
def make_linear_function(p1,p2):
"""
Returns the linear function defined by two points, p1 and p2
For example make_linear_function((1,3), (2,5))
returns the function f(x) = 2x + 1.
"""
m = (p2[1]-p1[1])/(p2[0]-p1[0])
k = p1[1] - p1[0] * m
return lambda x: x * m + k
|
a358e1757cfa1cd8e7e0a729027659349ec22985
| 18,502
|
from typing import Tuple
from typing import Any
from typing import Callable
from typing import List
def expand_params_factory(
expander: Tuple[Any, ...],
) -> Callable[..., List[Tuple[Any, ...]]]:
"""Factory to generate a function which can expand testing parameters.
Args:
expander (Tuple[Any]): a tuple of values to expand ``params`` by
Returns:
engine (callable): engine to add all items in ``expander`` to ``params``.
"""
def engine(*params: Any) -> List[Tuple[Any, ...]]:
expanded = list()
for s in expander:
for p in params:
if isinstance(p, (list, tuple)):
expanded.append((*p, s))
else:
expanded.append((p, s))
return expanded
return engine
|
7955c10d7d4dbde922921898ee3fd9518107f10b
| 18,504
|
def _squash_devicegroup(device_group, device_group_hierarchy_children):
"""Recursive function for determining all of a device group's child device groups"""
result = [device_group]
if device_group in device_group_hierarchy_children:
for child_dg in device_group_hierarchy_children[device_group]:
result += _squash_devicegroup(child_dg, device_group_hierarchy_children)
return sorted(result)
|
7c95a8373764eb920ab799d2618f98eea2af311c
| 18,505
|
import os
def _replace_file_suffix(file_name, suffix):
"""Replace the .XXX suffix in file_name with suffix."""
return os.path.splitext(file_name)[0] + suffix
|
f0b94ee666d81e6d6dd09d7231ec37609f87b22a
| 18,506
|
def plot_perm(filey):
"""Loads permutation information from file to plot"""
cutoff = 0.0
openfile = open(filey)
for line in openfile:
line = line.rstrip().split("\t")
current = float(line[1])
if current > cutoff:
cutoff = current
return(cutoff)
|
6bdd65faa2c25543ca422488e343eb01fc6ea3f4
| 18,507
|
def time2mins(time_taken):
"""Convert time into minutes.
Parameters
----------
time_taken : float
Time in seconds
Returns
-------
float
Minutes
"""
return time_taken / 60.
|
11877ff009e010f6fa633abf0aff87aabbd44ce0
| 18,509
|
def osim_vector_to_list(array):
"""Convert SimTK::Vector to Python list.
"""
temp = []
for i in range(array.size()):
temp.append(array[i])
return temp
|
a7aa2cd14ebf8c94a52a90a7add541dfa6db4cde
| 18,510
|
import sympy
import six
def _coprime_density(value):
"""Returns float > 0; asymptotic density of integers coprime to `value`."""
factors = sympy.factorint(value)
density = 1.0
for prime in six.iterkeys(factors):
density *= 1 - 1 / prime
return density
|
1017464c175a68e1ae510a8edf3d2f4ee4b74ba5
| 18,511
|
def get_binary_representation(n, num_digits):
"""
Helper function to get a binary representation of items to add to a subset,
which combinations() uses to construct and append another item to the powerset.
Parameters:
n and num_digits are non-negative ints
Returns:
a num_digits str that is a binary representation of n
"""
result = ''
while n > 0:
result = str(n%2) + result
n = n//2
if len(result) > num_digits:
raise ValueError('not enough digits')
for i in range(num_digits - len(result)):
result = '0' + result
return result
|
2b2d1e8bc4f964d805e48a8dd0525ee19aa7ab4e
| 18,514
|
def voltageDivision(v_in, r_list_ordered, showWork=False):
"""
Voltage is divided among the resistors in direct proportion to their resistances;
the larger the resistance, the larger the voltage drop.
"""
r_total = sum(r_list_ordered)
voltages = [r/r_total*v_in for r in r_list_ordered]
if showWork:
print("Resistor ordered voltage division: ", voltages)
print("Adjust directions as necessary after getting result.")
return voltages
|
19bd294e0c5444365e5f3c27267938947ace460c
| 18,517
|
import os
def get_files(rootdir, suffix):
"""Get the filepaths with a given suffix
Parameters
----------
rootdir : str
The root directory to look under
suffix : str
The file suffix of the files to keep
Returns
-------
fps : list, str
List of file paths for all of the
sample fasta files
Note
----
This only looks at the directory names under the
root directory. This assumes that the sample names
correspond to the folders within the base folder
"""
fps = []
for root, dirs, files in os.walk(rootdir, followlinks=True):
for _file in files:
if _file.endswith(".%s" % suffix):
fps.append(os.path.join(root, _file))
return fps
|
134a1fb44d5d4034546cf8a4eb3f47f7b6aa43e2
| 18,520
|
def merge(list1, list2):
"""
Merge two sorted lists.
Returns a new sorted list containing those elements that are in
either list1 or list2.
Iterative, because recursive would generate too many calls for
reasonably sized lists.
"""
merged_list = []
copy_list1 = list(list1)
copy_list2 = list(list2)
# While both lists still have contents (until one runs out)
while (len(copy_list1) > 0) and (len(copy_list2) > 0):
# Add the lower ranked word to create ascending order
if copy_list1[0] >= copy_list2[0]:
merged_list.append(copy_list2.pop(0))
else:
merged_list.append(copy_list1.pop(0))
# Add contents from remaining list (if any)
if len(copy_list1) > 0:
merged_list.extend(copy_list1)
if len(copy_list2) > 0:
merged_list.extend(copy_list2)
return merged_list
|
c4398faf890337d10400f9c71b49f2de7376f826
| 18,521
|
def parse_named_columns(output):
"""Return rows from a table string `output` as a sequence of dicts.
The first row should contain whitespace delimited column names.
Each subsequent row should contain whitespace delimited column values.
Given tabular `output` as found in many k8s commands:
col1_name col2_name ...
col1_row1_val col2_row1_val ...
col1_row2_val col1_row2_val ...
...
Returns [ {col1_name: col1_row1_val, col2_name: col2_row1_val, ...},
{col1_name: col1_row2_val, col2_name: col2_row2_val, ...},
... ]
Each dict in the returned sequence is suitable as a namespace for eval()
"""
lines = output.splitlines()
columns = lines[0].split()
rows = []
for line in lines[1:]:
d = dict(zip(columns, line.split()))
d["_"] = d
rows.append(d)
return rows
|
8feefde468c94bdd61cc8b4ec8be9ce54a128355
| 18,522
|
import os
def load_camera_params(hf, base_path):
"""Load h36m camera parameters.
Args:
hf (file object): HDF5 open file with h36m cameras data
path (str): Path or key inside hf to the camera we are interested in.
Returns:
R (numpy.array): 3x3 Camera rotation matrix.
T (numpy.array): 3x1 Camera translation parameters.
f (numpy.array): 2x1 Camera focal length.
c (numpy.array): 2x1 Camera center.
k (numpy.array): 3x1 Camera radial distortion coefficients.
p (numpy.array): 2x1 Camera tangential distortion coefficients.
name (str): String with camera id.
"""
R = hf[os.path.join(base_path, "R")][:]
R = R.T
T = hf[os.path.join(base_path, "T")][:]
f = hf[os.path.join(base_path, "f")][:]
c = hf[os.path.join(base_path, "c")][:]
k = hf[os.path.join(base_path, "k")][:]
p = hf[os.path.join(base_path, "p")][:]
name = hf[os.path.join(base_path, "Name")][:]
name = "".join(chr(item) for item in name)
return R, T, f, c, k, p, name
|
0c4f802f84ff51b6e7d8dd26603cea9712d45d51
| 18,523
|
def string_address(address):
"""Make a string representation of the address"""
if len(address) < 7:
return None
addr_string = ''
for i in range(5):
addr_string += (format(address[i], '02x') + ':')
addr_string += format(address[5], '02x') + ' '
if address[6]:
addr_string += ' random '
else:
addr_string += ' public '
return addr_string
|
23995bca8ce57ae341113eb9273ab2fcca7fbe96
| 18,525
|
from typing import Callable
def combine_predicates(*predicates: Callable[..., bool]) -> Callable[..., bool]:
"""
Combine multiple predicates into a single one. The result is true only if all of the predicates are satisfied.
"""
def check_all(*args, **kwargs) -> bool:
return all(map(lambda f: f(*args, **kwargs), predicates))
return check_all
|
5805c4bb884dc7c2797353d258bf29c35104b95d
| 18,527
|
def get_centers(img_coordinates):
"""
Get the center of the given coordinate
"""
min_row, min_col, max_row, max_col = img_coordinates
center_row = int((max_row + min_row) / 2)
center_col = int((max_col + min_col) / 2)
row_diameter = int((max_row - min_row) / 2)
col_diameter = int((max_col - min_col) / 2)
return [center_row, center_col, row_diameter, col_diameter]
|
4e70d1c95e0a705fb03bf2ea1a109cfb276a0fa4
| 18,528
|
def getLineAndColumnFromSyntaxItem(syntaxItem):
"""
Returns a tupel of the line and the column of a tree node.
"""
line = False
column = False
while line is False and column is False and syntaxItem:
line = syntaxItem.get("line", False)
column = syntaxItem.get("column", False)
if syntaxItem.hasParent():
syntaxItem = syntaxItem.parent
else:
syntaxItem = None
line = None if line is False else line
column = None if column is False else column
return line, column
|
9094b11865b7d8a477df7b5f480673c06c1981c7
| 18,530
|
def PolsAccHosp(t):
"""Number of policies: Accidental Hospitalization"""
return 0
|
3116844ee9170f283b17b1d834b55594535b63ea
| 18,531
|
def get_rdml_lib_version():
"""Return the version string of the RDML library.
Returns:
The version string of the RDML library.
"""
return "1.0.0"
|
f851f3a51cc341010e993aac04bf8ffc40168e74
| 18,532
|
import fnmatch
def fnmatch_mult(name,patterns):
"""
will return True if name matches any patterns
"""
return any(fnmatch.fnmatch(name,pat) for pat in patterns)
|
d2edf50c42405c4231d075f232b4b9ac7d3a180a
| 18,533
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.