content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def needs_escaping(text: str) -> bool:
"""Check whether the ``text`` contains a character that needs escaping."""
for character in text:
if character == "\a":
return True
elif character == "\b":
return True
elif character == "\f":
return True
elif character == "\n":
return True
elif character == "\r":
return True
elif character == "\t":
return True
elif character == "\v":
return True
elif character == '"':
return True
elif character == "\\":
return True
else:
pass
return False
|
43110dacad107ab835e76997edc56bba113694b8
| 695,854
|
import torch
def _update_mem(inp_tokens, memory):
"""This function is for updating the memory for transformer searches.
it is called at each decoding step. When being called, it appends the
predicted token of the previous step to existing memory.
Arguments:
-----------
inp_tokens : tensor
Predicted token of the previous decoding step.
memory : tensor
Contains all the predicted tokens.
"""
if memory is None:
return inp_tokens.unsqueeze(1)
return torch.cat([memory, inp_tokens.unsqueeze(1)], dim=-1)
|
ef4dc9ed0fd32d207cf584a3c180bf0bb3d6082f
| 695,856
|
def CalculateBoxSize(nmol, molwt, density):
"""
Calculate the size of a solvent box.
Parameters
----------
nmol : int
Number of molecules desired for the box
molwt : float
Molecular weight in g/mol
density : float
Estimated density in kg/m3 (this should be about 40-50% lower than the real liquid density)
Returns
-------
float
Length of a cubic solvent box in nm.
"""
# Calculate total mass of the box in kg
mass = nmol * molwt / 1000 / 6.022e23
volume = mass / density
length = volume**(1./3)/1e-9
return length
|
9508b740c07edd78e33be9cf6f30414e32ab8953
| 695,858
|
def tril_count_from_matrix_dim(matrix_dim: int):
"""Computes the number of lower triangular terms in a square matrix of a given
dimension `(matrix_dim, matrix_dim)`.
Args:
matrix_dim (int): Dimension of square matrix.
Returns:
int: Count of lower-triangular terms.
"""
tril_count = (matrix_dim ** 2 - matrix_dim) // 2 + matrix_dim
return tril_count
|
d1e350986a09c239959de77b821003b5189f6c98
| 695,859
|
def get_bound_indices(str1, str2):
"""Returns the aligned and not bound indices of str1, str2
:param str str1: the first string to align
:param str str2: the second string to align
Caution: order matters, a lot! --> str1 is going to be traversed the other way around # noqa
:return: the free along with the bound indices
:rtype: tuple(list, list), tuple(list, list)
"""
free_str1_indices = []
free_str2_indices = []
bound_str1_indices = []
bound_str2_indices = []
# we need to traverse str1 reversed and use the reversed indices
for i in range(0, len(str1)):
if str1[i] != "-":
bound_str1_indices.append(i)
else:
free_str1_indices.append(i)
for i in range(0, len(str2)):
if str2[i] != "-":
bound_str2_indices.append(i)
else:
free_str2_indices.append(i)
return (
free_str1_indices,
free_str2_indices,
bound_str1_indices,
bound_str2_indices,
)
|
8734e23aff50c855cf88044106f501bc90b7e5dc
| 695,860
|
def sgd(l_rate, parameters, grads):
"""
Stochastic Gradient Descent.
Parameters
----------
:type lr: theano.tensor.scalar
:param lr: Initial learning rate
:type parameters: theano.shared
:params parameters: Model parameters to update
:type grads: Theano variable
:params grads: Gradients of cost w.r.t to parameters
"""
updates = []
for param, grad in zip(parameters, grads):
updates.append((param, param - l_rate * grad))
return updates
|
aaa1d11788669801b4edd89aca29b38258043ff1
| 695,861
|
def formatResponse(subject, response):
"""Formata a resposta do server e do cliente para a exibição.
Parameters
----------
subject : str
Identifica se é CLIENT ou SERVER.
response : str
Resposta do servidor/client
Returns
-------
formatedResponse : str
Resposta formatada.
"""
global clientHitsCounter
temp = response.split(" ")
if temp[0] == "Acertou":
formatedResponse = "\n{}: \n{} o tiro \nTipo navio acertado: {} \nTiro do Server: {} {} \
\nFaltam {} acertos".format(
subject, temp[0], temp[1], temp[3], temp[4], temp[2]
)
else:
formatedResponse = "\n{}: \n{} o tiro \nTiro do Server: {} {} \
\nFaltam {} acertos".format(
subject, temp[0], temp[3], temp[4], temp[2]
)
return formatedResponse
|
a87baba551ee8afe684c11b9f3be4a33c4033a90
| 695,863
|
import os
import subprocess
def createThumbnail(originPath, destinationPath, size='200x200') :
"""Creates a thumbnail image out of originPath in destinationPath of size (which defaults to maxWidth:200, maxHeight:200) and keeps ratio. The function creates directory if not exists already. Returns the subprocess's response."""
print('Create thumbnail -> %s' % destinationPath)
destDirPath = os.path.split(destinationPath)[0]
if not os.path.exists(destDirPath) :
os.makedirs(destDirPath)
return subprocess.run(['convert', originPath, '-auto-orient', '-thumbnail', size, destinationPath], timeout=3)
|
da9691d9fb02cf6b98e9be911d96a5844315ac20
| 695,864
|
def nearest(last, unvisited, D):
"""Return the index of the node which is closest to 'last'."""
near = unvisited[0]
min_dist = D[last, near]
for i in unvisited[1:]:
if D[last,i] < min_dist:
near = i
min_dist = D[last, near]
return near
|
8b9ad31fbcba52ee9b9bfbf7c2b0caa78959e6dc
| 695,865
|
import logging
import sys
def config_logging():
"""Configure base logging
Function set default logging level and configure outut to stdout
Returns:
logging -- configured loggger
"""
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
formatter = logging.Formatter(
"[%(asctime)s][%(levelname)s] %(name)s %(filename)s:%(funcName)s:%(lineno)d | %(message)s")
zabbix_handler = logging.StreamHandler(sys.stdout)
zabbix_handler.setFormatter(formatter)
root_logger.addHandler(zabbix_handler)
return root_logger
|
1c1ffff361912b6f4d0875813998a8f2a24c4ad3
| 695,866
|
import hashlib
def md5_file_hash(file_path):
"""
A method generating MD5 hash of the provided file.
:param file_path: file's path with an extension, which will be opened for reading and generating md5 hash
:return: hex representation of md5 hash
"""
hash_md5 = hashlib.md5()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
|
be8241fd0c254bbfc4d0e1e4330cf36228e0a040
| 695,868
|
def line_coeff_from_endpoints(p1, p2):
"""Given two points on a line, find the coefficients of the line
equation ax + by + c = 0"""
a = p1.y - p2.y
b = p2.x - p1.x
c = -a * p1.x - b * p1.y
return a, b, c
|
fdbed65cbd3dbabd920817367005c8f05c7dadaf
| 695,869
|
import math
def butterfly(theta: float) -> float:
"""Butterfly function"""
return math.e ** math.sin(theta) - 2 * math.cos(4 * theta)
|
5d48df3df396fd666770f61e841b3dee2e967ab9
| 695,870
|
from typing import List
from typing import Optional
from typing import Set
def create_exp_name(flags_and_values: List[str],
flag_skip_set: Optional[Set[str]] = None,
skip_paths: bool = False) -> str:
"""
Creates an experiment name based on the command line arguments (besides dataset and paths).
Example: "--dataset multinews --sinkhorn" --> "dataset=multinews_sinkhorn"
:param flags_and_values: The command line flags and their values.
:param flag_skip_set: A set of flags to skip.
:param skip_paths: Whether to skip paths (i.e. any flag with a value with a "/" in it).
:return: An experiment name created based on the command line arguments.
"""
# Remove "-" from flags
flag_skip_set = flag_skip_set or set()
flag_skip_set = {flag.lstrip('-') for flag in flag_skip_set}
# Extract flags and values, skipping where necessary
args = {}
current_flag = None
for flag_or_value in flags_and_values:
if flag_or_value.startswith('-'):
flag = flag_or_value.lstrip('-')
current_flag = flag if flag not in flag_skip_set else None
if current_flag is not None:
args[current_flag] = []
elif current_flag is not None:
args[current_flag].append(flag_or_value)
# Handle paths
if skip_paths:
for key, values in list(args.items()):
if any('/' in value for value in values):
del args[key]
# Handle boolean flags
for key, values in args.items():
if len(values) == 0:
values.append('True')
exp_name = '_'.join(f'{key}={"_".join(values)}' for key, values in args.items())
if exp_name == '':
exp_name = 'default'
return exp_name
|
e8c6c114a62146a5a3200e88315b1921c28b258f
| 695,871
|
def extract_plotly_plot_from_html(html_string, div_id):
"""
extract_plotly_plot_from_html
=============================
Method to extract the Plotly javascript string from an html string create from the python plotly .to_html() function
Only the Plotly javascript string will be extract and returned. The rest of the html string will be discarded.
The div id for the Plotly plot will be updated based on the "div_id" provided
Parameters:
-----------
1) html_string: (str) An html string create for a plotly plot using the python plotly function '.to_html'
2) div_id: (str) A div id to use for the extracted plot
Returns:
+++++++
1) (str) A string that represents the plotly javascript script for a plot with an updated div id
"""
plot_string = "Plotly.newPlot("
end_brace = ")"
## Extract plotly js
plotly_plot_string = html_string.strip().split(plot_string)[1].split(end_brace)[-2]
## Replace the div id
plotly_plot_string = plotly_plot_string.replace(
plotly_plot_string[0 : plotly_plot_string.find(",")], "'" + div_id + "'"
)
## Return the plotly js string
return plot_string + plotly_plot_string + end_brace
|
eb78d196411151fed84921261de218d0c1f432ea
| 695,872
|
def bisect_left(func, val, low, high):
"""
Like bisect.bisect_left, but works on functions.
Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in a[i:] have e >= x. So if x already appears
in the list, a.insert(x) will insert just before the leftmost x already there. Optional args lo (default 0) and hi
(default len(a)) bound the slice of a to be searched.
>>> bisect_left([1,2,3,3,4].__getitem__, 3, 0, 4)
2
>>> bisect_left([1,2,3,3,4].__getitem__, 4, 0, 4)
4
>>> bisect_left([1,2,3,6,8].__getitem__, 4, 0, 4)
3
"""
a = low
b = high
while b > a:
guess = (a+b)//2
if val > func(guess):
a = guess+1
else:
b = guess
return a
|
de7b72585657c183176b4cd1c6b5301e0f837a01
| 695,873
|
def is_shared(resource):
"""Checks if a resource is shared
"""
return resource['object'].get('shared', False)
|
dd54a631cff0e79b00942ca4b1e43b1fb2a70c04
| 695,874
|
def _escape(message):
"""Escape some characters in a message. Make them HTML friendly.
Positional arguments:
message -- the string to process.
Returns:
Escaped string.
"""
translations = {
'"': '"',
"'": ''',
'`': '‘',
'\n': '<br>',
}
for k, v in translations.items():
message = message.replace(k, v)
return message
|
f92b394fcefc9cc9859f4bf8df2c5ea959ddac93
| 695,875
|
def three_digits_palindrome():
"""Function that returns all the possibles aplindromes made from the product
of two three-digit numbers"""
palindrome = []
for index_i in range(100, 999):
for index_ii in range(100, 999):
if str(index_i*index_ii)[::-1] == str(index_i*index_ii):
palindrome.append(index_i*index_ii)
return sorted(palindrome)
|
7438e3952ee089d5f1b8c4c0c1562a63dc1a0ef5
| 695,877
|
def mongo_query(**kwargs):
"""Create a MongoDB query based on a set of conditions."""
query = {}
if 'start_date' in kwargs:
if not ('CreationDate' in query):
query['CreationDate'] = {}
query['CreationDate']['$gte'] = kwargs['start_date']
if 'end_date' in kwargs:
if not ('CreationDate' in query):
query['CreationDate'] = {}
query['CreationDate']['$lt'] = kwargs['end_date']
if 'exclude_closed' in kwargs:
query['Closed'] = kwargs['exclude_closed']
return query
|
6a6636c8d44eb267f6e33fafbe46b8ccf438e37b
| 695,878
|
def h1(curr_state, goal_dict):
"""
Heuristic for calculating the distance of goal state using Manhattan distance
Parameters:
curr_state(np.ndarray): A 3x3 numpy array with each cell containing unique elements
goal_dict(dict[int, tuple[int, int]]): A mapping of cell contents to a tuple
contianing its indices
Returns:
h(int): Heuristic value
"""
h = 0
for i in range(curr_state.shape[0]):
for j in range(curr_state.shape[1]):
value = curr_state[i][j]
x = goal_dict[value][0]
y = goal_dict[value][1]
h += abs(i-x) + abs(j-y)
return h
|
e7d353dcfe5dacee5319dc7b8c4fbae43294acd3
| 695,879
|
import importlib
def plugin_import(plugin):
"""Import a plugin from string.
:param plugin: Python import in dot notation.
:type plugin: String
:returns: Object
"""
return importlib.import_module(plugin, package="directord")
|
147c1c053eda10935c1f597cbde1a2d71451f843
| 695,880
|
def arrayManipulation(n, queries):
"""
Args:
n (int): len of zero arr.
queries (list): 2d list with queries
Returns:
int: max element"""
arr = [0] * (n + 1)
# increase first el by query amount and decrease last of query amount
for i in queries:
arr[i[0] - 1] += i[2]
arr[i[1]] -= i[2]
# this way it's easy compute each el resulting value in one run
ans = 0
current = 0
for i in arr:
current += i
if current > ans:
ans = current
return ans
|
14ace75fd238fd0d7086dca9b973b399831e94cd
| 695,881
|
def to_selector(labels):
"""
Transfer Labels to selector.
"""
parts = []
for key in labels.keys():
parts.append("{0}={1}".format(key, labels[key]))
return ",".join(parts)
|
fe17b745f99fb2a5452bbe31264b69c206ece14a
| 695,882
|
def inconsistent_info2():
"""
ALso ensures that b and c are very likely to be different
"""
uvw_list = [
('a', 'b', 0.8),
('c', 'd', 0.8),
('b', 'c', 0.001),
('a', 'c', 0.2),
('a', 'd', 0.2),
('b', 'd', 0.99999),
]
pass_values = [
[0, 1, 1, 1],
[1, 1, 0, 1],
[1, 1, 1, 1],
[0, 1, 2, 1],
]
fail_values = [[0, 0, 1, 1]]
return uvw_list, pass_values, fail_values
|
36fa9ec414449f375817cb6ebd17e2b7410a26ae
| 695,883
|
def read_unitig_order_file(unitig_order_file):
"""Read unitig directions"""
unitig_order = {}
with open(unitig_order_file) as f:
for line in f:
line = line.rstrip()
tokens = line.split(',')
unitig = tokens[0]
if tokens[1] != "None":
start = int(tokens[1])
else:
start = None
if tokens[2] != "None":
end = int(tokens[2])
else:
end = None
if tokens[3] == "True":
dirn = True
else:
dirn = False
unitig_order[unitig] = (start,end,dirn)
return unitig_order
|
a2dad25d5ed119fcdde6291fb734c23b9b6d637d
| 695,884
|
def __get_odata_parameter(top=0, skip=0, format="", orderby="", filter=""):
"""統一整理odata的固定參數指定回傳
Keyword Arguments:
top {int} -- 回傳幾筆 (default: {0})
skip {int} -- 跳過前面幾筆 (default: {0})
format {str} -- 回傳格式 json or xml (default: {""})
orderby {str} -- 排列順序, 傳入response欄位名稱 (default: {""})
filter {str} -- 篩選條件 (default: {""})
Returns:
[type] -- odata parameter的querystring
"""
param = {'top': top, 'skip': skip, 'orderby': orderby,
'format': format, 'filter': filter}
result = ""
if top > 0:
result += "&$top={top}"
if skip > 0:
result += "&$skip={skip}"
if orderby:
result += "&$orderby={orderby}"
if format:
result += "&$format={format}"
if filter:
result += "&$filter={filter}"
return result.format(**param)
|
93820d1ac8c16574694331c6bfc60662fc7d89f0
| 695,885
|
def search_escape(url):
"""Escape URLs such that preexisting { and } are handled properly.
Will obviously trash a properly-formatted qutebrowser URL.
"""
return url.replace('{', '{{').replace('}', '}}')
|
ad0a1d3ff499cbf2706838c44f03df60bd40e1a9
| 695,886
|
def Edges_Exist_Via(G, p, q):
"""Helper for del_gnfa_states
---
If G has a direct edge p--edgelab-->q, return edgelab.
Else return "NOEDGE". We maintain the invariant of
at-most one edge such as edgelab for any p,q in the GNFA.
"""
edges = [ edge
for ((x, edge), States) in G["Delta"].items()
if x==p and q in States ]
if len(edges) == 0:
return "NOEDGE"
else:
return edges
|
53af339eb5317321a8f125a289215bff89a95b5d
| 695,887
|
import csv
import requests
def get_words(min_length=5,max_length=5,capitalization='lower',use_file=''):
"""Gets a list of english words from instructables of a desired length.
Args:
min_length (int, optional): Keep words of this length or longer. Defaults to 5.
min_length (int, optional): Keep words of this length or shorter. Defaults to 5.
capitalizaton (string, optional): Capitalization rules of the word list to return (lower, upper, title). Defaults to lower.
use_local (boolean, optional): Alternatively, use a local copy for faster reference
Returns:
List: returns a lower case list of words meeting length requirements.
"""
WordList = []
if len(use_file) > 0:
with open(f'Docs/{max_length}Words.csv', newline='') as f:
for row in csv.reader(f):
WordList.append(row[0])
else:
InitialList = requests.get("https://content.instructables.com/ORIG/FLU/YE8L/H82UHPR8/FLUYE8LH82UHPR8.txt").text
InitialList = str.splitlines(InitialList)
for word in InitialList:
if len(word) >= min_length and len(word) <= max_length:
if capitalization.lower() == 'upper':
WordList.append(word.upper())
elif capitalization.lower() == 'title':
WordList.append(word.title())
else:
WordList.append(word.lower())
return WordList
|
ceeacd7772ced20c86d3a66cb966cb25ea286d85
| 695,888
|
import hashlib
import binascii
def multipart_etag(digests):
"""
Computes etag for multipart uploads
:type digests: list of hex-encoded md5 sums (string)
:param digests: The list of digests for each individual chunk.
:rtype: string
:returns: The etag computed from the individual chunks.
"""
etag = hashlib.md5()
count = 0
for dig in digests:
count += 1
etag.update(binascii.a2b_hex(dig))
return f"'{etag.hexdigest()}-{count}'"
|
1d6d13d3f28cdbae6a56fe903329bd8f91b53000
| 695,889
|
def Tsorties_echangeur(Te1,Te2,mf1,mf2,Cp1,Cp2,eff):
"""
Calcul les températures au niveau des sorties d'un échangeur thermique'
Parameters
----------
Te1 : Température d'entrée du fluide chaud
Te2 : Température d'entrée du fluide froid
mf1 : Débit massique du fluide chaud
mf2 : Débit massique du fluide froid
Cp1 : Capacité calorifique massique du fluide chaud
Cp2 : Capacité calorifique massique du fluide froid
eff : efficacité de l'échangeur
Returns
-------
Ts1 : Température de sortie du fluide chaud
Ts2 : Température de sortie du fluide froid
"""
if (mf1*Cp1)<=(mf2*Cp2):
Ts1=Te1-eff*(Te1-Te2)
Ts2=Te2+(mf1*Cp1/(mf2*Cp2))*(Te1-Ts1)
else:
Ts2=Te2+eff*(Te1-Te2)
Ts1=Te1+(mf2*Cp2/(mf1*Cp1))*(Te2-Ts2)
return Ts1,Ts2
|
ebae4e1f99bc0eea1941e85dbd5087f825bb5105
| 695,890
|
import json
def load_schema(filename):
"""Load schema from a JSON file.
Parameters
----------
filename : str
The path to your file.
Returns
-------
schema : dict
A dictionary containing the schema for your table.
"""
with open(filename) as f:
schema = json.load(f)
return schema
|
55b475a4cc7bfb184c0f2db3e41d6e3408b888e6
| 695,891
|
def define_path(use_jaad=True, use_pie=True, use_titan=True):
"""
Define the correct paths to datasets'annotations and images
"""
all_anns_paths = {'JAAD': {'anns': '../../DATA/annotations/JAAD/JAAD_DATA.pkl',
'split': '../../DATA/annotations/JAAD/splits'},
'PIE': {'anns': '../../DATA/annotations/PIE/PIE_DATA.pkl'},
'TITAN': {'anns': '../../DATA/annotations/TITAN/titan_0_4',
'split':'../../DATA/annotations/TITAN/splits' }
}
all_image_dir = {'JAAD': '../../DATA/JAAD/images/',
'PIE': '../../DATA/PIE/images/',
'TITAN': '../../DATA/TITAN/images_anonymized/'
}
anns_paths = {}
image_dir = {}
if use_jaad:
anns_paths['JAAD'] = all_anns_paths['JAAD']
image_dir['JAAD'] = all_image_dir['JAAD']
if use_pie:
anns_paths['PIE'] = all_anns_paths['PIE']
image_dir['PIE'] = all_image_dir['PIE']
if use_titan:
anns_paths['TITAN'] = all_anns_paths['TITAN']
image_dir['TITAN'] = all_image_dir['TITAN']
return anns_paths, image_dir
|
7113772b29ded254059451b8f5317c91ea4c2e18
| 695,892
|
import inspect
def get_linenumbers(functions, module, searchstr='def {}(image):\n'):
"""Returns a dictionary which maps function names to line numbers.
Args:
functions: a list of function names
module: the module to look the functions up
searchstr: the string to search for
Returns:
A dictionary with functions as keys and their line numbers as values.
"""
lines = inspect.getsourcelines(module)[0]
line_numbers = {}
for function in functions:
try:
line_numbers[function] = lines.index(
searchstr.format(function)) + 1
except ValueError:
print(r'Can not find `{}`'.format(searchstr.format(function)))
line_numbers[function] = 0
return line_numbers
|
b2cc1bc104cdae6bbbfb3680ac940540396db08a
| 695,893
|
def cron_to_dict(cron_keys, time_data):
"""
将crontab风格的时间, 组成dict
:param time_data: str cron风格的时间
:param cron_keys: str cron 使用的时间关键词
:return: dict cron按照给定的时间key组成dict
"""
cron_dict = dict(zip(cron_keys, time_data.split()))
return cron_dict
|
1a75999198fdb7383fd87e3b6f15e5900648cdb5
| 695,894
|
def get_lrs(optimizer):
"""Return the learning-rates in optimizer's parameter groups."""
return [pg['lr'] for pg in optimizer.param_groups]
|
6e32c90e42321d070cc1f444a6f117b72ad59adb
| 695,895
|
def highlight_min(s):
"""
highlight the minimum in a Pandas dataframe series yellow
"""
is_min = s == s.min()
return ["background-color: yellow" if v else "" for v in is_min]
|
ee74a19721fc7312744847b0c4d6de9255f312b0
| 695,896
|
import torch
def Rotz(t):
"""
Rotation about the z-axis.
np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])
-- input t shape B x 1
-- return B x 3 x 3
"""
B = t.shape[0]
Rz = torch.zeros((B, 9, 1), dtype=torch.float)
c = torch.cos(t)
s = torch.sin(t)
ones = torch.ones(B)
Rz[:, 0, 0] = c
Rz[:, 1, 0] = -s
Rz[:, 3, 0] = s
Rz[:, 4, 0] = c
Rz[:, 8, 0] = ones
Rz = Rz.reshape(B, 3, 3)
return Rz
|
6a078e033f6ad6b497da05999433ef745c76d2ed
| 695,897
|
def __get_edge_dict(uid, source, target, color, edge_type):
"""
Create dictionary for edges
:param uid:
:param source:
:param target:
:param color:
:param edge_type:
:return:
"""
return {
'id': uid,
'source': source,
'target': target,
'color': color,
'edge_type': edge_type
}
|
3ef239f3f19d584b9157b8e320074a7d4bf6f630
| 695,898
|
def calc_Flesh_Kincaid_Grade_rus_adapted(n_syllabes, n_words, n_sent, X, Y, Z):
"""Метрика Flesh Kincaid Grade для русского языка с параметрами"""
# n = 0.59 * (float(n_words) / n_sent) + 6.2 * (float(n_syllabes) / n_words) - 16.59
if n_words == 0 or n_sent == 0: return 0
n = X * (float(n_words) / n_sent) + Y * (float(n_syllabes) / n_words) - Z
return n
|
a3fd7cc72bf701f8b8f2ff47bb123eca855dba5a
| 695,899
|
import sys
def is_booted_volume():
"""
Returns True if the script / package (pkg) is being executed on
the booted volume, or False if being executed on a targeted volume.
"""
# sys.argv will only have more than 3 arguments when running within a pkg
# (which means scripts are always targeting the booted bolume)
if len(sys.argv) < 4:
return True
# grab the target destination
target = sys.argv[3]
local_disk = "/"
# true if installing pkg to booted volume, otherwise false
return target is local_disk
|
534c857c42a93f0eeb36bfc67771f8b6679398c7
| 695,900
|
import tokenize
import codecs
def read_pyfile(filename):
"""Read and return the contents of a Python source file (as a
string), taking into account the file encoding."""
with open(filename, "rb") as pyfile:
encoding = tokenize.detect_encoding(pyfile.readline)[0]
with codecs.open(filename, "r", encoding=encoding) as pyfile:
source = pyfile.read()
return source
|
a6fce0f2acdb5156872ef572ab7e77ba6507873d
| 695,901
|
def parse_version_string(version):
"""
Returns a tuple containing the major, minor, revision integers
"""
nums = version.split(".")
return int(nums[0]), int(nums[1]), int(nums[2])
|
a8d50804ebe82541e57c83a813843d255b3b6fbb
| 695,902
|
def interpc(coef, lat):
""" linear interpolation (lat step=15) """
i = int(lat/15.0)
if i < 1:
return coef[:, 0]
if i > 4:
return coef[:, 4]
d = lat/15.0-i
return coef[:, i-1]*(1.0-d)+coef[:, i]*d
|
f5ae562febeacbe51aaba5f9c187f47591a936fc
| 695,903
|
def choosePokemon():#a function that allow user choose a pokemon and store its value
""" """
Pokemon=""
while Pokemon != "charmander" and Pokemon != "squirtle" and Pokemon != "bulbasaur":#allow user choose again if the input is not one of the three
Pokemon = input("Which Pokemon would you like to choose (Charmander or Squirtle or Bulbasaur): ")
Pokemon = Pokemon.lower()#allow user to put lower capital input
return Pokemon
|
ce18f020ed12a4e8b1a78f4ebbdb750775925b5d
| 695,904
|
import os
def greet():
"""
Greet our user.
"""
return {
'message': 'Hi there fella',
'hostname': os.environ.get('HOSTNAME'),
}
|
63068865c91504c6fc408ff22ecf0329d22b464c
| 695,905
|
def autocomplete(s: str, d: list):
"""
:param s: query
:param d: words
:return:
"""
return [word for word in d if word[:len(s)] == s]
|
b23df7fcf1bab63e0f4bda3a064bd9da40416949
| 695,906
|
import functools
def dual_decorator(func):
"""This is a decorator that converts a paramaterized decorator for no-param use.
source: http://stackoverflow.com/questions/3888158.
"""
@functools.wraps(func)
def inner(*args, **kw):
if ((len(args) == 1 and not kw and callable(args[0]) and
not (type(args[0]) == type and issubclass(args[0], BaseException)))):
return func()(args[0])
else:
return func(*args, **kw)
return inner
|
145c640fd9a6a7fd4dd9b0fba4b0c9e42b30b734
| 695,907
|
def get_building_coords(town):
"""
Generates a dictionary of all (x,y) co-ordinates that are within buildings
in the town, where the keys are the buildings' numbers (or "pub" for the
pub) and the values are lists of co-ordinates associated with the building.
Data must have 25 houses (numbered as multiples of 10 from 10 to 250) and
1 pub.
Parameters
----------
town : list
List (cols) of lists (rows) representing raster data of the town.
Returns
-------
building_coords : dict
Keys are the buildings' numbers (or "pub" for the pub) and the values
are lists of all co-ordinates that are within the building.
"""
#Create empty dictionary to collect building co-ordinates
building_coords = {}
# Create list of co-ordinates for each building in the town
# Dictionary key is either "pub" or building number and value is list of
# coords
for n in [1, *range(10, 260, 10)]:
if n == 1:
building_name = "pub"
else:
building_name = n
building_coords[building_name] = []
for y in range(len(town)):
for x in range(len(town[y])):
if town[y][x] == n:
building_coords[building_name].append((x, y))
return building_coords
"""
# Make pub clearer for plotting
for i in range(len(town)):
for j in range(len(town[i])):
if town[i][j] == 1:
town[i][j] = -50
"""
|
085c95d40d9d84569180155f5b0b150334dbc526
| 695,908
|
import inspect
def is_simple_scheduler(scheduler_class):
"""
Determines whether a scheduler is a "simple" scheduler, i.e. it doesn't consider feedback from the runner.
"""
methods = inspect.getmembers(scheduler_class, inspect.isfunction)
method_names = [name for name, _ in methods]
return not all(scheduler_method in method_names for scheduler_method in ["before_request", "after_request", "next"])
|
c7ee776ad9305c52d851e2f02c934f161e5b49ec
| 695,909
|
import os
def get_document_assets_data(current_version):
"""
Retorna os dados dos ativos da versão atual de um documento registrado
no Kernel
"""
LAST_VERSION = -1
# agrupa items que representam o mesmo ativo
assets_by_prefix = {}
assets_data = []
assets = current_version.get("assets") or {}
for asset_id, asset in assets.items():
prefix, ext = os.path.splitext(asset_id)
prefix = prefix.replace(".thumbnail", "")
assets_by_prefix[prefix] = assets_by_prefix.get(prefix) or []
try:
_uri = asset[LAST_VERSION][1]
except IndexError:
_uri = None
uri = {
"asset_id": asset_id,
"uri": _uri,
}
assets_by_prefix[prefix].append(uri)
assets_data.append(uri)
# cria lista dos grupos de ativos digitais
assets_grouped_by_id = []
for prefix, asset_alternatives in assets_by_prefix.items():
assets_grouped_by_id.append(
{
"prefix": prefix,
"uri_alternatives": [
alternative["uri"]
for alternative in asset_alternatives
if alternative["uri"]
],
"asset_alternatives": asset_alternatives,
}
)
return assets_data, assets_grouped_by_id
|
53b2da615cea5eba926e56a1656fbea11e637d30
| 695,910
|
def promptConfirm(message: str) -> bool:
""" Prompts confirming a message. Defaults to "no" (False).
:param message: Message to prompt.
:return: Whether the prompt was confirmed.
"""
result = input(message + " (y/N): ").strip().lower()
return result == "y" or result == "yes"
|
4d0ba40150231939571915676740a1e6b5857f0d
| 695,912
|
def IntegerConversionFunction(character):
"""
Helper function for file reading.
"""
if(character == ' '):
return 0
elif(character == '+'):
return 1
elif(character == '#'):
return 2
|
6d2bf6a32cd78405b95722cc043bd529b77da0c4
| 695,913
|
def _check_state(monomer, site, state):
""" Check a monomer site allows the specified state """
if state not in monomer.site_states[site]:
args = state, monomer.name, site, monomer.site_states[site]
template = "Invalid state choice '{}' in Monomer {}, site {}. Valid " \
"state choices: {}"
raise ValueError(template.format(*args))
return True
|
8f61c91ddae5af378503d98377401446f69c37db
| 695,914
|
def get_merged_areas_recursion(merge, areas):
"""
Merge nodes recursively according to area superposition
"""
for index, n1 in enumerate(merge):
if len(merge) != 1:
n2 = merge[index + 1]
if min(n2[0]) <= max(n1[0]):
if min(n1[1]) <= min(n2[1]) <= max(n1[1]) or min(n1[1]) <= max(n2[1]) <= max(n1[1]):
merge.insert(index, ((min(n1[0] + n2[0]), max(n1[0] + n2[0])),
(min(n1[1] + n2[1]), max(n1[1] + n2[1]))))
merge.pop(index + 1)
merge.pop(index + 1)
return get_merged_areas_recursion(merge, areas)
areas.append(n1)
merge.pop(0)
if merge:
return get_merged_areas_recursion(merge, areas)
return areas
|
9ab0c392ab3dc824ce9b6445ec0648da4e32ec7e
| 695,915
|
import unicodedata
def unicode_to_ascii(s):
"""
Takes in a unicode string, outputs ASCII equivalent
:param s: String of unicode
:return: String of ASCII
"""
return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')
|
03781e1286aef5a67ea389dad4fcf59cc9317f23
| 695,916
|
def read_value(value):
"""Helper function to asynchronously call read() on a Calculator::Value and
return a promise for the result. (In the future, the generated code might
include something like this automatically.)"""
return value.read().then(lambda result: result.value)
|
6a4176950210cec9b13aff1d88deb420894a5971
| 695,917
|
def wrap_fn_hessian(fn, i, j, **kwargs):
"""
A wrapper for the QCA batch downloader for Hessians
"""
# out_str = kwargs["out_str"]
# kwargs.pop("out_str")
# prestr = "\r{:20s} {:4d} {:4d} ".format(out_str, i, j)
# elapsed = datetime.now()
objs = [obj for obj in fn(**kwargs)]
# objs = [obj for obj in fn(**kwargs)]
# elapsed = str(datetime.now() - elapsed)
# N = len(objs)
# poststr = "... Received {:6d} | elapsed: {:s}".format(N, elapsed)
# out_str = prestr + poststr
return objs
# return objs, out_str, N
|
d46626c8f5d01f66f397a5688826cefa8af5501b
| 695,918
|
def check_form(form_data):
"""
Function that perform a small check on submited form data
"""
# check if there are any empty fields
for key, value in form_data:
if value == "":
return None
return form_data
|
366c5edcf37e453b2f10b29610c898cac6adc3a3
| 695,919
|
import subprocess
def cli(path):
"""
Run a test coverage report
Args:
path (str): test coverage path
:return: Subprocess call result
"""
cmd = 'py.test {0}'.format(path)
return subprocess.call(cmd, shell=True)
|
03188c67ccd38ec7f93922f01b7a6f7f54a076c8
| 695,920
|
def make_stun(action):
"""Return a new action method that does nothing.
action -- An action method of some Bee
"""
# BEGIN Problem EC
def new_action(colony):
pass
return new_action
# END Problem EC
|
fbabcedc4a325f7557fa4ab13e784612086a6276
| 695,921
|
def building_block(case_data):
"""
A :class:`.BuildingBlock` instance.
"""
return case_data.building_block
|
5619f1363919072012123db906a84b9dfc744b77
| 695,922
|
def image_layers(state):
"""Get all image layer names in the state
Parameters
----------
state : dict
Neuroglancer state as a JSON dict
Returns
-------
names : list
List of layer names
"""
return [l["name"] for l in state["layers"] if l["type"] == "image"]
|
6c843855e01957386871f90d54eee69800f08c37
| 695,923
|
import numpy
def fill_with_zeros(evecs, from_res, to_res, width):
"""
In the IC case we have an initial residue with only one angle
"""
real_from_res = from_res - 1
real_to_res = to_res - 1
real_width = (width*2)-1
ending_left_pad_index = (real_from_res*2)-1
start_right_pad_index = (real_to_res*2)+1
left_padding = [0.]*ending_left_pad_index
right_padding = [0.]*(real_width - start_right_pad_index)
new_evecs = []
for evec in evecs:
new_evec = []
new_evec.extend(left_padding)
new_evec.extend(evec)
new_evec.extend(right_padding)
new_evecs.append(new_evec)
return numpy.array(new_evecs)
|
45cf3ecc1b5fe86f40d56547969ec076d099ec29
| 695,924
|
def station_name_filter(name: str, filter_dict: dict[str, str]) -> str:
"""Applies filters to the names of stations as described in generate_filter. But it also adds
' station' to the end of each station name.
filter_dict: filter generated by generate_filter
returns: modified station name
"""
keys = filter_dict.keys()
if name in keys:
name = filter_dict[name]
return name.replace("-", "–") + " station"
|
b5ac0dc011d07021b299b28f10bda413063f8536
| 695,925
|
def task_compile_prod_requirements():
"""generate prod requirements for service"""
return {
"file_dep": ["../requirements.prod.in"],
"actions": ["../venv/bin/pip-compile -o ../requirements.txt ../requirements.prod.in"],
}
|
90a3b91c58eab31f997e948e2462f694963f7328
| 695,926
|
import requests
def coordinate_finder(result: requests.Response) -> tuple:
"""
One filter function for the send_get_request function. This filter function is used for
getting a tuple (lat, long) from the wikipedia geoData api with type props=coordinate.
More information found: https://www.mediawiki.org/wiki/Extension:GeoData
"""
json_data = result.json()
page_values = json_data['query']['pages'].values()
coordinates = list(page_values)[0]['coordinates'][0]
return (coordinates['lat'], coordinates['lon'])
|
3069e7c3782d292fc708f79a7d178a4b4f40ebb7
| 695,928
|
import argparse
import sys
def Get_Arguments():
"""
Parse command-line arguments. Imported with argparse.
Returns: object of command-line arguments.
"""
parser = argparse.ArgumentParser(description="Converts directory of FASTA files to input file for CLADES for species delimitation", add_help=False)
required_args = parser.add_argument_group("Required Arguments")
optional_args = parser.add_argument_group("Optional Arguments")
## Required Arguments
required_args.add_argument("-d", "--dir",
type=str,
required=True,
help="Specify directory containing only input FASTA files.")
required_args.add_argument("-o", "--outfile",
type=str,
required=True,
help="String; Specify output CLADES filename")
required_args.add_argument("-p", "--popmap",
type=str,
required=True,
help="String; Specify two-column tab-delimited popmap file: IndID\tPopID; no header line.")
optional_args.add_argument("-h", "--help",
action="help",
help="Displays this help menu")
# If no arguments specified print help and die.
if len(sys.argv)==1:
print("\nExiting because no command-line options were called.\n")
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
return args
|
163bccb6c0b1b64e4e7fb185f11b3d7dfd90538c
| 695,929
|
def filter_merge_clusters(clusters, max_block_size_multi=5, min_block_pop=50, buffer_amount=150):
"""
The vectors created by create_clusters() are a single square for each raster pixel.
This function does the follows:
- Remove overly large clusters, caused by defects in the input raster.
- Remove clusters with population below a certain threshold.
- Buffer the remaining clusters and merge those that overlap.
Parameters
----------
clusters: geopandas.GeoDataFrame
The unprocessed clusters created by create_clusters()
max_block_size_multi: int, optional
Remove clusters that are more than this many times average size. Default 5.
min_block_pop: int, optional
Remove clusters with below this population. Default 50.
buffer_amount: int, optional
Distance in metres by which to buffer the clusters before merging. Default 150.
Returns
-------
clusters: geopandas.GeoDataFrame
The processed clusters.
"""
# remove blocks that are too big (basically artifacts)
clusters['area_m2'] = clusters.geometry.area
clusters = clusters[clusters['area_m2'] < clusters['area_m2'].mean() * max_block_size_multi]
# remove blocks with too few people
clusters = clusters[clusters['raster_val'] > min_block_pop]
# buffer outwards so that nearby blocks will overlap
clusters['geometry'] = clusters.geometry.buffer(buffer_amount)
# and dissolve the thousands of blocks into a single layer (with no attributes!)
clusters['same'] = 1
clusters = clusters.dissolve(by='same')
# To get our attributes back, we convert the dissolves polygon into singleparts
# This means each contiguous bubble becomes its own polygon and can store its own attributes
crs = clusters.crs
clusters = clusters.explode()
clusters = clusters.reset_index()
# no longer needed in GeoPandas >= 0.4.0
# clusters['geometry'] = clusters[0]
# clusters = gpd.GeoDataFrame(clusters)
# clusters.crs = crs
clusters = clusters.drop(columns=['same', 'level_1', 'raster_val']) # raster_val is no longer meaningful
# And then add the polygon's area back to its attributes
clusters["area_m2"] = clusters['geometry'].area
return clusters
|
8b6091baeb55e0c72c468aa6eb4300c4db40ecbd
| 695,931
|
import re
def remove_special_phrases(s):
"""
:param s:
:return:
"""
special_phrases = ['480p',
'720p',
'1080p',
'H.264',
'H265',
'Ac3',
'Aac2',
'Dl',
'Sa89',
'Web',
'Amzn',
'D1',
'Dd',
'Dd5',
'Dvd',
'Dvdrip',
'Dvdrip.xvid',
'Dubbed',
'Ep',
'Episode',
'Xvid',
'Ffndvd',
'X264',
'0.x264',
'X265',
'0.x265',
'Dd2',
'Trollhd',
'B3 bomber',
'Rs2',
'Vk007',
'Bluray',
'Bluray+web',
'BlueRay.x264',
'D1x265',
'Hevc',
'D3g',
'Ntb',
'Shortbrehd',
'Deimos',
'D3fil3r',
'Dhd',
'Ositv',
'D3g',
'Ctrlhd',
'Ffndvd']
for phrase in special_phrases:
pattern = r'\s*' + re.escape(phrase) + r'\s*'
s = re.sub(pattern, ' ', s).strip()
return s
|
632838e9d0436ddca48122a7a047b7349717c78e
| 695,932
|
def makeShapeTranslation ():
"""Makes a character translation for the 'shape' feature, which maps upper/lowercase letters to X/x and digits to 'd'."""
source = ""
target = ""
source += "aeiou"
source += "bcdfghjklmnpqrstvwxyz"
target += "x" * 26
source += "AEIOU"
source += "BCDFGHJKLMNPQRSTVWXYZ"
target += "X" * 26
source += "0123456789"
target += "d" * 10
# return string.maketrans(source,target)
charMap = dict()
for i in range(0,len(source)):
s = ord(source[i])
t = ord(target[i])
charMap[s] = t
return charMap
|
9c2be5953c6db36f2f510f89dee1cff62548c017
| 695,933
|
import pickle
def load(path):
"""
Load pickled pandas object (or any other pickled object) from the specified
file path
Parameters
----------
path : string
File path
Returns
-------
unpickled : type of object stored in file
"""
f = open(path, 'rb')
try:
return pickle.load(f)
finally:
f.close()
|
6ed0b0ae944fa8bdaacfd4cbd8cdb3864f1adb47
| 695,934
|
from datetime import datetime
def totimestamp(datetime_object):
"""Because in python2 datetime doesn't have timestamp() method,
so we have to implement in a python2,3 compatible way.
"""
return (datetime_object - datetime(1969, 12, 31, 20, 0)).total_seconds()
|
68b6e19aac974a17a7c0f9980c6fcec347e8a861
| 695,935
|
def get_y_indicator_variable_index(i, j, m, n):
"""
Map the i,j indices to the sequential indicator variable index
for the y_{ij} variable.
This is basically the (2-dimensional) 'array equation' (as per
row-major arrays in C for example).
Note that for MiniSat+, the variables are juist indexed sequentially
and we are mapping the y_{ij} to y_r for 0 <= r < m*n variables.
This function gets the sequential index for a y_{ij} variable.
Parameters:
i, j - indices for y indicator variable
m - order of tableau a (0 <= i,k < m)
n - order of tableau b (0 <= j,l < n)
Return value:
index r of indicator variable y_{r} corresponding to y_{ij}
"""
return i*n + j
|
8c6dc999ebe3120084ae741403f15acdd900e783
| 695,936
|
def truncate_string(s,length):
""" Truncate string to given length.
"""
return s[0:min(length,len(s))]
|
9b44cf31c7905109497e485d0ffa707dada9d67b
| 695,937
|
import base64
def base64_encode(input_val):
"""
:param input_val: bytes to be base64 encoded
:type input_val: buffer or str
:return:
"""
return base64.b64encode(input_val)
|
ba0b7d72b6ceda9ad9687caa88ee9e566d15562a
| 695,938
|
import torch
def inverse_sigmoid(x, eps=1e-5):
"""Inverse function of sigmoid.
Args:
x (Tensor): The tensor to do the
inverse.
eps (float): EPS avoid numerical
overflow. Defaults 1e-5.
Returns:
Tensor: The x has passed the inverse
function of sigmoid, has same
shape with input.
"""
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
|
01d02c9f04d4a9318f0ec0d4bb8cf7301181c8f5
| 695,939
|
def keypress_to_dispatch_key(key, scancode, codepoint, modifiers):
"""Converts the key_down event data into a single string for more convenient
keyboard shortcut dispatch.
:returns: The dispatch key in format ``109+alt,shift`` -- key number, ``+``,
and the modifiers joined by commas.
"""
if modifiers:
return '{0}+{1}'.format(key, ','.join(sorted(modifiers)))
else:
return '{0}'.format(key)
|
16dd835511b3e31eeb2278013b6a194172aa2e06
| 695,940
|
def thermo_data_text(species_list, note, input_type='included'):
"""Returns thermodynamic data in Chemkin-format file.
Parameters
----------
species_list : list of cantera.Species
List of species objects
input_type : str, optional
'included' if thermo will be printed in mech file, 'file' otherwise
"""
if input_type == 'included':
thermo_text = ['THERMO ALL\n' +
' 300.000 1000.000 6000.000\n']
else:
thermo_text = ['THERMO\n' +
' 300.000 1000.000 6000.000\n']
# write data for each species in the Solution object
for species in species_list:
composition_string = ''.join([f'{s:2}{int(v):>3}'
for s, v in species.composition.items()
])
# first line has species name, space for notes/date, elemental composition,
# phase, thermodynamic range temperatures (low, high, middle), and a "1"
# total length should be 80
# attempt to split note and comment
if not note:
comment, comment_str, note_str = '', '', ''
elif len(note[species.name].split('\n', 1)) == 1:
comment = ''
comment_str = ''
note_str = note[species.name]
else:
comment = '!\n'
note_str, comment_str = note[species.name].split('\n', 1)
if len(f'{species.name} {note_str}') > 24:
comment_str += '\n' + note_str
note_str = ''
comment_str = comment_str.replace('\n', '\n! ')
comment = f'{comment}! {comment_str}'
name_and_note = f'{species.name} {note_str}'
species_string = (comment + '\n' +
f'{name_and_note:<24}' + # name and date/note field
f'{composition_string:<20}' +
'G' + # only supports gas phase
f'{species.thermo.min_temp:10.3f}' +
f'{species.thermo.max_temp:10.3f}' +
f'{species.thermo.coeffs[0]:8.2f}' +
6*' ' + # unused atomic symbols/formula, and blank space
'1\n'
)
# second line has first five coefficients of high-temperature range,
# ending with a "2" in column 79
species_string += (
''.join([f'{c:15.8e}' for c in species.thermo.coeffs[1:6]]) +
' ' +
'2\n'
)
# third line has the last two coefficients of the high-temperature range,
# first three coefficients of low-temperature range, and "3"
species_string += (
''.join([f'{c:15.8e}' for c in species.thermo.coeffs[6:8]]) +
''.join([f'{c:15.8e}' for c in species.thermo.coeffs[8:11]]) +
' ' +
'3\n'
)
# fourth and last line has the last four coefficients of the
# low-temperature range, and "4"
species_string += (
''.join([f'{c:15.8e}' for c in species.thermo.coeffs[11:15]]) +
19*' ' +
'4\n'
)
thermo_text.append(species_string)
if input_type == 'included':
thermo_text.append('END\n\n\n')
else:
thermo_text.append('END\n')
return ''.join(thermo_text)
|
600be4eb4a2420d1c4ebb8f2285f5ece0ad1b9a9
| 695,941
|
def get_best_sales_rep(df):
"""Return a tuple of the name of the sales rep and
the total of his/her sales"""
best_rep_df = df.groupby(['Rep'])['Total'].sum()
return (best_rep_df.idxmax(), best_rep_df.loc[best_rep_df.idxmax()])
pass
|
e4313cf517ecb513918944fdbb55caea3f20fb0c
| 695,942
|
def _calc_time_threshold(seg_mean: float, seg_std: float) -> float:
"""
Auxiliary function for calculating the threshold based on the
mean and standard deviation of the time transitions between
adjacent places on discrete MoveDataFrame.
Parameters
----------
seg_mean : float
The time mean between two local labels (segment).
seg_std : float
The time mean between two local labels (segment).
Return
------
float
The threshold based on the mean and standard deviation
of transition time for the segment.
"""
threshold = seg_std + seg_mean
threshold = float('{:.1f}'.format(threshold))
return threshold
|
82e4a605327f20e39794e4a23ab6c9d422a06274
| 695,943
|
def _solveX(L, U, b):
"""Use forward and backwards substitution to calculate the x vector to solve the linear system of equations.
Parameters
----------
L: numpy.arrays
Lower triangular matrix
U: numpy.arrays
Upper triangular matrix
b: numpy.array
Column vector of constant terms
Returns
-------
x: numpy.array
Solution vector
"""
m, n = L.shape
# Forward Substitution
y = list()
y.insert(0, b[0]/L[0][0])
for i in range(1, m):
summ = 0
for k in range(0, i):
summ += L[i][k]*y[k]
y.insert(i, (b[i]-summ)/(L[i][i]))
# Backwards Substitution
x = [0]*m
x[m-1] = y[m-1] / U[m-1][m-1]
for i in range(m - 2, -1, -1):
summ = 0
for k in range(i+1, n):
summ += U[i][k]*x[k]
x[i] = (y[i] - summ)/U[i][i]
return x
|
997b472ea45796268a1d87c5ade3de4ab66115a0
| 695,944
|
def nearest_square(num):
"""
Find the nearest square number to num
"""
root = 0
while (root + 1) ** 2 <= num:
root += 1
return root ** 2
|
53b899958a053c8bfe6383e240d3b8ddb7d291c1
| 695,945
|
import socket
def create_rawsock(iface):
"""Creates a new raw socket object.
The socket sends/receives data at the link layer (TCP/IP model)/data-link
layer (OSI model).
Args:
iface: A string specifying the name of the network interface to which
the raw socket should be bound. For example "eth0".
Returns:
A socket object.
"""
sock = socket.socket(socket.AF_PACKET,
socket.SOCK_RAW,
socket.htons(socket.SOCK_RAW))
sock.bind((iface, socket.SOCK_RAW))
return sock
|
ea56408403ada6b9750265547677028c197ae933
| 695,946
|
import math
def atan(x):
"""
x est un nombre
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Retourne un angle en radians dont la tangente vaut x.
"""
return math.atan(x)
|
a11a046f4017678d5276adb776018ba7904b7148
| 695,947
|
def vector_mul(k, a):
"""Multiplication of a vector by a scalar.
>>> vector_mul((1, 2), 2)
(2, 4)
"""
return tuple(map(lambda x: k * x, a))
|
cdc289430ab87ac70e8387d4dd807fb4dfd1e1da
| 695,949
|
def fetch_annotations(blast_results, ref_file):
"""
Fetch protein annotations from a reference file
for the proteome of SfMNPV 3AP2.
"""
# Store reference annotations into a dictionary
with open(ref_file, 'r') as fh:
next(fh)
references = {line.split(",")[0]: line.split(",")[1]
for line in fh}
# Create annotation using reference records
annotation = {}
for record in blast_results:
for reference in references:
if blast_results[record] == reference:
annotation[record] = references[reference]
# annotation[record] = [reference, references[reference]]
return annotation
|
6e951fc8d6251209e4e8b78ce87e9ec2d60003c0
| 695,950
|
def floret_vectors_vec_str():
"""The top 10 rows from floret with the settings above, to verify
that the spacy floret vectors are equivalent to the fasttext static
vectors."""
return """10 10
, -5.7814 2.6918 0.57029 -3.6985 -2.7079 1.4406 1.0084 1.7463 -3.8625 -3.0565
. 3.8016 -1.759 0.59118 3.3044 -0.72975 0.45221 -2.1412 -3.8933 -2.1238 -0.47409
der 0.08224 2.6601 -1.173 1.1549 -0.42821 -0.097268 -2.5589 -1.609 -0.16968 0.84687
die -2.8781 0.082576 1.9286 -0.33279 0.79488 3.36 3.5609 -0.64328 -2.4152 0.17266
und 2.1558 1.8606 -1.382 0.45424 -0.65889 1.2706 0.5929 -2.0592 -2.6949 -1.6015
" -1.1242 1.4588 -1.6263 1.0382 -2.7609 -0.99794 -0.83478 -1.5711 -1.2137 1.0239
in -0.87635 2.0958 4.0018 -2.2473 -1.2429 2.3474 1.8846 0.46521 -0.506 -0.26653
von -0.10589 1.196 1.1143 -0.40907 -1.0848 -0.054756 -2.5016 -1.0381 -0.41598 0.36982
( 0.59263 2.1856 0.67346 1.0769 1.0701 1.2151 1.718 -3.0441 2.7291 3.719
) 0.13812 3.3267 1.657 0.34729 -3.5459 0.72372 0.63034 -1.6145 1.2733 0.37798
"""
|
5b9321ba6cae72760ced79085073de812b1c8070
| 695,951
|
import random
def crossover(chromosome1, chromosome2):
"""
We only swap single weights.
Half from the father, half from the mother. The half aspect will be done with RNG w/ 50% chances.
Alternatives would be to swap weights of a whole neuron, or an entire layer.
"""
offspring_chromosome = []
for i in range(len(chromosome1)):
if(random.randrange(100) < 50):
offspring_chromosome.append(chromosome1[i]) #Father
else:
offspring_chromosome.append(chromosome2[i]) #Mother
return offspring_chromosome
|
04d1f50b91d56d993154afc4381e14682268e1fd
| 695,952
|
def null_compare(path_test, path_ref, rtol, atol):
"""A null comparison. This is used when a result file is required to be
produced by a test, but it is not possible to compare to a benchmark.
"""
return True
|
66e7706950435828776ea67c0b6940cc32bad85c
| 695,953
|
import math
def replace_invalid_values(row):
"""Replace float values that are not available in BigQuery.
Args:
row: List of values to insert into BigQuery.
Returns:
List, `row` with invalid values replaced with `None`.
"""
invalid_values = [math.inf, -math.inf, math.nan]
return [x if x not in invalid_values else None for x in row]
|
c07c16780a52870f9d0954b3f8bba5a91baf6b58
| 695,954
|
def abbreviate(words):
"""
return an acronym that represent 'words'
"""
return(''.join([letter for (index, letter) in enumerate(words)
if index == 0 or words[index-1] in [' ', '-']]).upper())
|
605082359b57deba84e8d1524a4543c798cfc587
| 695,955
|
import logging
def getLogger():
"""
Get a :class: `logging.getLogger` object
:return: logging oject
:rtype: :class:`logging.getLogger`
"""
return logging.getLogger()
|
a0e5b7cb8f8ffe1ffc8b18eb6bbacc5e50b3fc5b
| 695,956
|
import os
def _python_files_in_dir(builder, root_dir):
"""Return a list of the ``*.py`` files in the specified directory.
This includes subdirectories.
Arguments:
builder (FileBuilder): The ``FileBuilder``.
root_dir (str): The directory.
Returns:
list<str>: The files.
"""
python_files = []
for dir_, subdirs, subfiles in builder.walk(root_dir):
for subfile in subfiles:
if subfile.endswith('.py'):
python_files.append(os.path.join(dir_, subfile))
return python_files
|
2b10602b73f79d128a77f29222d113c00f9f04cc
| 695,957
|
def interp_from_u(idx, w, y):
"""
compute Wy
W.shape: (n, u)
y.shape: (u,)
"""
return (y[idx] * w).sum(axis=1)
|
b0312063699a16a75307774dbf54cb758082d678
| 695,959
|
def parse_offered(course_description):
"""Parses which quarters a course is offered from a course description.
Args:
course_description: The course description text.
Returns:
The quarters the course is offered.
"""
if 'Offered:' not in course_description:
return []
parts = course_description.split('Offered: ')
if 'AWSpS.' in parts[1]:
return ['A', 'W', 'Sp', 'S']
elif 'AWSp.' in parts[1]:
return ['A', 'W', 'Sp']
elif 'AWS.' in parts[1]:
return ['A', 'W', 'S']
elif 'AW.' in parts[1]:
return ['A', 'W']
elif 'ASpS.' in parts[1]:
return ['A', 'Sp', 'S']
elif 'ASp.' in parts[1]:
return ['A', 'Sp']
elif 'AS.' in parts[1]:
return ['A', 'S']
elif 'A.' in parts[1]:
return ['A']
elif 'WSpS.' in parts[1]:
return ['W', 'Sp', 'S']
elif 'WSp.' in parts[1]:
return ['W', 'Sp']
elif 'WS.' in parts[1]:
return ['W', 'S']
elif 'W.' in parts[1]:
return ['W']
elif 'SpS.' in parts[1]:
return ['Sp', 'S']
elif 'Sp.' in parts[1]:
return ['Sp']
elif 'S.' in parts[1]:
return ['S']
return []
|
75005e4eae61717e64a7cfa821e6f772d4a93420
| 695,960
|
def A004767(n: int) -> int:
"""Integers of a(n) = 4*n + 3."""
return 4 * n + 3
|
5f97cccc4f540b46029e57c11d1ab718a59e227c
| 695,961
|
def convertXML(imagePath, labels):
"""
Convert imagePath, labels to string.
Args:
imagePath
labels
Return:
outString: a string of image path & labels.
"""
outString = ''
outString += imagePath
for label in labels:
for i in label:
outString += ' ' + str(i)
outString += '\n'
return outString
|
21a1154b6202131a3fea51156590c299fd540069
| 695,962
|
import warnings
def test_pqtb_values(p, q, T, b_interslice):
""" Tests that p, q have valid values; prints warnings if necessary; and returns valid values.
"""
# Convert parameter ints to floats
if isinstance(p,int):
p = float(p)
if isinstance(q,int):
q = float(q)
if isinstance(T,int):
T = float(T)
if isinstance(b_interslice,int):
b_interslice = float(b_interslice)
# Check that p, q, T, b_interslice are floats
if not isinstance(q, float):
q = 2.0
warnings.warn("Parameter q is wrong type; Setting q = 2.0")
if not isinstance(p, float):
p = 1.2
warnings.warn("Parameter q is wrong type; Setting q = 2.0")
if not isinstance(T, float):
T = 1.0
warnings.warn("Parameter T is wrong type; Setting T = 1.0")
if not isinstance(b_interslice, float):
b_interslice = 1.0
warnings.warn("Parameter T is wrong type; Setting b_interslice = 1.0")
# Check that q is valid
if not (1.0 <= q <= 2.0):
q = 2.0
warnings.warn("Parameter q not in the valid range of [1,2]; Setting q = 2.0")
# Check that p is valid
if not (p >= 1.0):
p = 1.0
warnings.warn("Parameter p < 1; Setting p = 1.0")
if not (p <= 2.0):
p = 2.0
warnings.warn("Parameter p > 2; Setting p = 2.0")
# Check that p and q are jointly valid
if not (p < q):
p = q
warnings.warn("Parameter p > q; Setting p = q.0")
return p, q, T, b_interslice
|
c82af66ea91048b10edb8246895642cb9e91be6f
| 695,964
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.