content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def load_file(path):
"""Load contents of a file"""
with open(path) as inf:
data = inf.read()
return data
|
9398d26379532d6c3ed407335ade608e9f52d18a
| 23,041
|
from operator import add
def neighbour(c, direction):
"""
Get the neighbour in the direction of the cube.
:param c: A cube coord x, z, y.
:param direction: THe direction the of the neighbour.
:return: The neighbouring cube coord in the specified direction.
"""
return add(c, direction)
|
0da5e9e037ad996b495412b96b6b2880c0c7e479
| 23,042
|
import string
import random
def strings(n, chars=string.ascii_letters):
""" Return random string of N characters, sampled at random from `chars`.
"""
return ''.join([random.choice(chars) for i in range(n)])
|
bc7e2cab22b4d0a98b3e93a7199c1ec2b326ee68
| 23,043
|
def search_caches(key, cache_list, raise_error=True):
"""Find UUID if it is in the cache_list dicts
Parameters
----------
key : str
the UUID we're looking for
cache_list : mapping or list of mapping
caches that the objects are stored in (will be searched in order of
the list). Mapping is {uuid: object}
raise_error : bool
whether to raise a KeyError if UUID not found; default True. If
False, object not found returns None
Returns
-------
object or None
the object with the given UUID, or ``None`` if the object is not
found and ``raise_error`` is ``False``.
"""
if key is None:
return None # some objects allow UUID to be None
if not isinstance(cache_list, list):
cache_list = [cache_list]
obj = None
for cache in cache_list:
if key in cache:
obj = cache[key]
break
if obj is None and raise_error:
raise KeyError("Missing key: " + str(key))
return obj
|
e2996e37604e26c937a046dc5f88674060aba738
| 23,045
|
def grayscale(rgb):
"""Converts image to grayscale.
"""
return rgb.dot([0.299, 0.587, 0.114])
|
baf64556a5cdce8ad49023e96271f1358f618db7
| 23,048
|
import re
def get_name_slug(name: str) -> str:
"""Get the stub of the organization's name.
Arguments:
name {str} -- Organization name.
Returns:
str -- Organization name stub.
"""
return '-'.join(re.split(r'\W', name.lower()))
|
ef3fce6346a7aabfcebcc6a6e72d1e718e0ed4d2
| 23,049
|
import os
def _object_exists(uri: str):
"""Checks whether an object exists at specified directory."""
data_exists = os.path.isfile(os.path.join(uri, "data"))
meta_exists = os.path.isfile(os.path.join(uri, "meta"))
return all((os.path.isabs(uri), os.path.isdir(uri), data_exists, meta_exists))
|
40ca99d491de7760ccfd4be2e3826fc496f380fc
| 23,051
|
def is_task_list(fn):
"""Check if a function is a task list.
Return:
boolean: if a function is a task list.
"""
return getattr(fn, '__garcon__', {}).get('list')
|
6546df08c4b6736bc3b08a77e0064191ff64efe7
| 23,052
|
import os
from glob import glob
def can_folderise(folder):
"""
Check if corpus can be put into folders
"""
if os.path.isfile(folder):
return False
fs = glob(os.path.join(folder, '*.txt'))
if len(fs) > 1:
if not any(os.path.isdir(x) for x in glob(os.path.join(folder, '*'))):
return True
return False
|
a6e6a8ca9febd192ea0ca505169fe58891725986
| 23,053
|
def _mutateCommandline(context, commandline):
"""Adjust runscript to set a different value to the LLVM_PROFILE_FILE
environment variable for each execution."""
profilefile = context.tmpBase + ".profraw"
prefix = "env LLVM_PROFILE_FILE=%s " % profilefile
context.profilefiles.append(profilefile)
return prefix + commandline
|
d31a180c292dc087a6c6b7e6d7316c2cbba08fdf
| 23,055
|
import re
def get_used_words(file_path, count):
"""
Count unique words in file, and return 'count' most used
Args:
file_path (str): path to file
count (int): number of words to return
Returns:
dict(int, word): dictionary with usage, and word
"""
words = dict()
with open(file_path, mode='r', encoding='utf-8') as fd:
for line in fd:
for word in re.findall(r'\b(\d*[a-zA-Z\']+\d*\w*)\b', line):
words[word.lower()] = words.get(word.lower(), 0) + 1
return sorted(words.items(), key=lambda w: w[1], reverse=True)[0:count]
|
b42a03525893f5ddde57c92ce17a4b9dc45ac892
| 23,056
|
import pandas
import logging
def user_specify_time(times, voltages, end_time):
""" Cuts off all time and voltage data that occurs after the user-specified
end time. If the user does not specify an end time, this function will
default to keeping the time array untrimmed.
:param times: List of time data
:param voltages: List of voltage data
:param end_time: Time (in seconds) at which the data should end
:return: Trimmed time and voltage lists
"""
try:
if pandas.isnull(end_time) or type(end_time) is bool:
raise ValueError
end_time = float(end_time)
if end_time < 0 or end_time > max(times):
raise ValueError
except ValueError:
logging.warning("End time not valid: {}".format(end_time))
logging.warning("Using default end time by not trimming data at all.")
return times, voltages
ret_times = []
ret_voltages = []
for index, time in enumerate(times):
if time <= end_time:
ret_times.append(time)
ret_voltages.append(voltages[index])
else:
break
return ret_times, ret_voltages
|
22f119e704161876dee1a0d917a179b13b1fcc51
| 23,057
|
import sys
def get_size(ds):
"""
Gets the sum of sizes of each object in vars(ds).
"""
return sum(sys.getsizeof(getattr(ds, it)) for it in vars(ds).keys())
|
12c026e8c3a706dada38e5cd752582e6a6fd41e6
| 23,059
|
def reverseCompliment(sequence):
"""
Returns the reverse compliment of the DNA sequence
"""
complimentMatrix = {'A':'T', 'T':'A',
'G':'C', 'C':'G'}
complimentArray = []
DNAarray = list(sequence.upper())
for bp in reversed(DNAarray):
complimentArray.append(complimentMatrix[bp])
compliment = ''.join(complimentArray)
return compliment
|
24fd3cf55a62988d2d942eb53ca33f910cd635e0
| 23,061
|
import torch
def squash(s, dim=-1, constant=1, epsilon=1e-8):
"""
It drives the length of a large vector to near 1 and small vector to 0
:params s: N-dim tenser
:params dim: the dimension to squash
:params constant: (0, 1]
:return: The same shape like `s`
"""
norm_2 = torch.norm(s, p=2, dim=dim, keepdim=True)
scale = norm_2**2 / (constant + norm_2**2) / (norm_2 + epsilon)
return scale * s
|
2c457a6782143061814cedab70420738f5b8c469
| 23,062
|
from typing import Dict
def count_vertically_most_common_bits(numbers) -> Dict[int, Dict[str, int]]:
"""
For each position in list of binary numbers count number 0 and number 1.
Result save into nested dictionary.
Eg. [11, 01] -> {0: {'0': 1, '1': 1}, 1: {'0': 0, '1': 2}}
Args:
numbers (list): list of numbers in binary format
Returns:
Dict[int, Dict[str, int]]: keys are numbered positions,
values are dictionaries, with 2 keys ('0' and '1') and counts as values.
"""
bits_in_lines = {}
for digit in [*range(len(numbers[0]))]:
bits_in_lines[digit] = {"0": 0, "1": 0}
for number in numbers:
for i, bit in enumerate(number):
bits_in_lines[i][bit] += 1
return bits_in_lines
|
9b7b1b4ea56091294d49f60212b30d0fd819f2d3
| 23,064
|
def has_permission_to_view(page, user):
"""
Check whether the user has permission to view the page. If the user has
any of the page's permissions, they have permission. If the page has no set
permissions, they have permission.
"""
if page.permissions.count() == 0:
return True
for perm in page.permissions.all():
perm_label = '%s.%s' % (perm.content_type.app_label, perm.codename)
if user.has_perm(perm_label):
return True
return False
|
a61337ac64e7f300a1439fb04c55ab5d89d6303d
| 23,065
|
def guessseat(seatids):
""" Our seat is the seat which id is not in the least but which previous & next one
is in.
"""
for seat in seatids:
if seat + 1 not in seatids and seat + 2 in seatids:
return seat + 1
|
eaee3147223ec1f177694dc6b6d32191df589cd7
| 23,066
|
import torch
def get_v_coords(p_range, v_size, v_indices):
"""
Args:
p_range: [x_min, y_min, z_min, x_max, y_max, z_max]
v_size: [vx, vy, vz]
v_indices : [M, 4] -> [bs, z_i, y_i, x_i]
Returns:
v_coords: [M, 4] -> [bs, x, y, z]
"""
with torch.no_grad():
v_size = torch.tensor(v_size).unsqueeze(0).to(v_indices.device)
min_range = torch.tensor(p_range[0:3]).unsqueeze(0).to(v_indices.device)
v_xyz_idx = v_indices[:, [3, 2, 1]]
v_bs = v_indices[:, [0]].float()
v_xyz = (v_indices[:, [3, 2, 1]].float() + 0.5) * v_size + min_range
v_coords = torch.cat([v_bs, v_xyz], dim = 1)
return v_coords
|
9d2870720f902d343577a85d797b72a277fe7673
| 23,067
|
def split_data(arrays, test_size=0.2):
"""
"""
splitted_arrays = {
'train': {},
'test': {}
}
for name, array in arrays.items():
t_steps = array.shape[0]
cut_point = int(t_steps * test_size)
splitted_arrays['train'][name] = array[:-cut_point, :]
splitted_arrays['test'][name] = array[-cut_point:, :]
return splitted_arrays
|
ae27085fb22c2351d1b2481ff59f707431223374
| 23,068
|
def alternate(seq):
"""
Splits *seq*, placing alternating values into the returned iterables
"""
return seq[::2], seq[1::2]
|
ef4ed2b352e411f0fb3af1774e066c74f2ff9c28
| 23,069
|
from typing import Any
def measure_overlap(lhs: Any, rhs: Any) -> float:
"""
Given two objects with "start" and "end" attributes, return the % of their overlapped time
with regard to the shorter of the two spans.
."""
lhs, rhs = sorted([lhs, rhs], key=lambda item: item.start)
overlapped_area = lhs.end - rhs.start
if overlapped_area <= 0:
return 0.
dur = min(lhs.end - lhs.start, rhs.end - rhs.start)
return overlapped_area / dur
|
760df7f8ec3f4ab59063ded4e9b4508c170c9898
| 23,070
|
def get_identity_credentials(aip):
"""Returns a dictionary containing a mapping from publickey to identity"""
agent_map = aip.get_agent_identity_to_uuid_mapping()
agent_credential_map = {}
for agent in agent_map:
agent_credential = aip.get_agent_keystore(agent_map[agent]).public
agent_credential_map[agent_credential] = agent
return agent_credential_map
|
3201345db020d3f8e88c83c330900627a0b9bdb4
| 23,071
|
def find_file_in_content_dict(content_dict, file_name):
"""
Check if a file exists in the content dictionary
"""
file_name_search_key = file_name.upper()
for uber_file_name in iter(content_dict):
vs_filter_dict = content_dict[uber_file_name]
for vs_filter_name in iter(vs_filter_dict):
source_files = vs_filter_dict[vs_filter_name]
for source_file in source_files:
if source_file.upper() == file_name_search_key:
return True
# Handle the (posix) case if file_name is in a different folder than the context root
if source_file.upper().endswith('/'+file_name_search_key):
return True
# Handle the (dos) case if file_name is in a different folder than the context root
if source_file.upper().endswith('\\'+file_name_search_key):
return True
return False
|
8f030d88f1fb3f3d219000e4265b5d620d7584c0
| 23,072
|
def clut8_rgb565(i):
"""RBG565 CLUT for wasp-os.
This CLUT implements the same palette as :py:meth:`clut8_888` but
outputs RGB565 pixels.
.. note::
This function is unused within this file but needs to be
maintained alongside the reference clut so it is reproduced
here.
:param int i: Index (from 0..255 inclusive) into the CLUT
:return: 16-bit colour in RGB565 format
"""
if i < 216:
rgb565 = (( i % 6) * 0x33) >> 3
rg = i // 6
rgb565 += ((rg % 6) * (0x33 << 3)) & 0x07e0
rgb565 += ((rg // 6) * (0x33 << 8)) & 0xf800
elif i < 252:
i -= 216
rgb565 = (0x7f + (( i % 3) * 0x33)) >> 3
rg = i // 3
rgb565 += ((0x4c << 3) + ((rg % 4) * (0x33 << 3))) & 0x07e0
rgb565 += ((0x7f << 8) + ((rg // 4) * (0x33 << 8))) & 0xf800
else:
i -= 252
gr6 = (0x2c + (0x10 * i)) >> 2
gr5 = gr6 >> 1
rgb565 = (gr5 << 11) + (gr6 << 5) + gr5
return rgb565
|
1f651c381e1995ac80300f60bd686b296abcdb17
| 23,073
|
import threading
import atexit
def call_repeatedly(func, interval, *args, **kwargs):
"""
Call a function at interval
Returns both the thread object and the loop stopper Event.
"""
main_thead = threading.current_thread()
stopped = threading.Event()
def loop():
while not stopped.wait(interval) and main_thead.is_alive(): # the first call is in `interval` secs
func(*args, **kwargs)
return
timer_thread = threading.Thread(target=loop, daemon=True)
timer_thread.start()
atexit.register(stopped.set)
return timer_thread, stopped.set
|
65c33039212463f13dd2f72e0be41f921bf98d0c
| 23,074
|
def beautifulTriplets(d, arr):
"""
Returns the number of triplets in arr where
i < j < k
and
a[j] - a[i] = a[k] - a[j] = d
"""
beautiful_count = 0
# if the array contains any values where the value plus d and plus 2 * d are
# also in the array, the value is beautiful. This approach yields an average
# time complexity of O(n ** 2) due to the "in" operator.
for _ in range(len(arr)):
if arr[_] + d in arr and arr[_] + 2 * d in arr:
beautiful_count += 1
return beautiful_count
|
6484b9fde450e2c5d3ec4814e8213b225f120112
| 23,078
|
def _to_space_separated_string(l):
"""
Converts a container to a space-separated string.
INPUT:
- ``l`` -- anything iterable.
OUTPUT:
String.
EXAMPLES::
sage: import sage.geometry.polyhedron.misc as P
sage: P._to_space_separated_string([2,3])
'2 3'
"""
s = '';
for x in l:
if len(s)>0: s += ' '
s += repr(x)
return s
|
1a14fee5bdc86c52c1c9687f922c0d077bf859bb
| 23,079
|
def _obtain_weights_CSVLogger_filenames(body_backbone_CNN, image_backbone_CNN):
"""Obtains the polished filenames for the weights and the CSVLogger of the model.
# Arguments
model_name: String to declare the name of the model
# Returns
Two strings that will serve as the filenames for the weights and the CSVLogger respectively.
"""
prefix= 'trained_models/emotic_vad_'
suffix= '_weights_tf_dim_ordering_tf_kernels.h5'
weights_filename = prefix + body_backbone_CNN + suffix
CSVLogger_filename = 'emotic_vad_'+body_backbone_CNN+'_training.csv'
return weights_filename, CSVLogger_filename
|
98815be219066eee4db7aaa6788fad79db0a1193
| 23,081
|
def build_from_clause(sources):
"""Given a list of table names, connects them with JOINs"""
from_clause = [sources[0]]
for join_to in sources[1:]:
from_clause.append('JOIN {} ON ({}. = {}.)'.format(join_to, sources[0],
join_to))
return '\n'.join(from_clause)
|
8d07ad5ae09a75bd6d56467f8e24e86825fa884d
| 23,082
|
def build_grid(input_lines, size):
"""Builds grid"""
grid = []
for y in range(size):
row = []
for x in range(size):
row.append('.')
grid.append(row)
input_length = len(input_lines[0])
start_x = start_y = (size / 2) - (input_length / 2)
for y in range(input_length):
for x in range(len(input_lines[y])):
grid[start_y + y][start_x + x] = input_lines[y][x]
return grid
|
d7ad2fffb039c8bb90c7edee0088d685d45e9a76
| 23,083
|
def download_boiler(info):
"""
Boiler plate text for On-Demand Info for downloads
:param info: values to insert into the boiler plate
:param info: dict
:return: formatted string
"""
boiler = ('\n==========================================\n'
' {title}\n'
'==========================================\n'
'Total number of ordered scenes downloaded through ESPA order interface order links: {tot_dl}\n'
'Total volume of ordered scenes downloaded (GB): {tot_vol}\n')
return boiler.format(**info)
|
ccdfa8c27590634413a8ff0e9c8572c8f00fbd76
| 23,084
|
from typing import Tuple
def intersectCmp(a: dict, b: dict) -> Tuple[bool, str]:
"""Return true if the common keys have the same values.
E.g.: Taking {"foo": 0, "bla": "test"} and
{"foo": 1, "bar": true, "bla": "test"}, the intersected keys are
"foo" and "bla", and the values are not equal.
If there are no common keys, returns False.
The same comparison applies to nested dictionaries
Args:
a, b: Dictionaries to intersect and compare.
Returns:
A tuple with the comparison result and a diff text, if different.
"""
common_keys = a.keys() & b.keys()
if not common_keys:
return False, "got %s, wanted %s" % (a.keys(), b.keys())
for k in common_keys:
x = a[k]
y = b[k]
if isinstance(x, dict) and isinstance(y, dict):
cmp, diff = intersectCmp(x, y)
if not cmp:
return False, diff
else:
if x != y:
return False, "key %s: %s != %s" % (k, x, y)
return True, ""
|
f03b888d9c0a833e440f7ed4f178edb9aa2da52c
| 23,086
|
def _invalidWin32App(pywinerr):
"""
Determine if a pywintypes.error is telling us that the given process is
'not a valid win32 application', i.e. not a PE format executable.
@param pywinerr: a pywintypes.error instance raised by CreateProcess
@return: a boolean
"""
# Let's do this better in the future, but I have no idea what this error
# is; MSDN doesn't mention it, and there is no symbolic constant in
# win32process module that represents 193.
return pywinerr.args[0] == 193
|
302b32f350267c13ebe992f980b941d1cd638769
| 23,088
|
def month_to_number(month_string):
"""Converts the name of a month to the month number"""
m = ''
if month_string == 'JAN' or month_string == 'JANUARY':
m = '01'
elif month_string == 'FEB' or month_string == 'FEBRUARY':
m = '02'
elif month_string == 'MAR' or month_string == 'MARCH':
m = '03'
elif month_string == 'APR' or month_string == 'APRIL':
m = '04'
elif month_string == 'MAY':
m = '05'
elif month_string == 'JUN' or month_string == 'JUNE':
m = '06'
elif month_string == 'JUL' or month_string == 'JULY':
m = '07'
elif month_string == 'AUG' or month_string == 'AUGUST':
m = '08'
elif month_string == 'SEP' or month_string == 'SEPTEMBER':
m = '09'
elif month_string == 'OCT' or month_string == 'OCTOBER':
m = '10'
elif month_string == 'NOV' or month_string == 'NOVEMBER':
m = '11'
elif month_string == 'DEC' or month_string == 'DECEMBER':
m = '12'
return m
|
daebf08cd2109fa35af1e3256cde26ae7f964f33
| 23,090
|
def illuminant_scotopic_luminance(L_A, CCT):
"""
Returns the approximate scotopic luminance :math:`L_{AS}` of the
illuminant.
Parameters
----------
L_A : numeric
Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`.
CCT : numeric
Correlated color temperature :math:`T_{cp}` of the illuminant.
Returns
-------
numeric
Approximate scotopic luminance :math:`L_{AS}`.
Examples
--------
>>> illuminant_scotopic_luminance(318.31, 6504.0) # doctest: +ELLIPSIS
769.9376286...
"""
CCT = 2.26 * L_A * ((CCT / 4000) - 0.4) ** (1 / 3)
return CCT
|
33f2969722ab34d823d5aa59a7f8e72e2f5c625a
| 23,091
|
def read_iteration():
"""
自己相関係数の計算で使用する、「ずらす回数」を読み込む。
Output
------
iteration : ずらす回数
Raises
------
iterationに数字以外が入力されたとき。
"""
print("計算回数を入力してください: ", end="")
iteration = input()
try:
iteration = int(iteration) # 数字以外はexceptへ
except:
print("計算回数には、数字を入力してください。")
exit()
return iteration
|
2c3bc4eec925f1c9bb3bed87d06994aac9de3756
| 23,093
|
def evaluateBoard(currentBoard, symbolToEvaluate, currentPlayer):
"""Takes as parameters: a board, a symbol to evaluate and the current symbol moving.
Evaluate the board and returns 1 if the board is a win for the symbol to evaluate,
returns 0 if the board is a draw or returns -1 if the board is a loss.
If it is neither of these options, plays all available moves and recursively calls itself
to evaluate resulting boards."""
nextPlayer = 'o' if currentPlayer == 'x' else 'x'
winner = currentBoard.hasWinner()
#Evaluate if game was win (1), loss(-1) or draw(0)
if winner:
if winner[0] == symbolToEvaluate:
return 1
else:
return -1
elif len(currentBoard.availableTiles()) == 0:
return 0
#If evaluation is not immediate, use recursion to make all possible moves and evalute those
else:
evaluations = list()
for tile in currentBoard.availableTiles():
gameBoardCopy = currentBoard.makeCopy()
gameBoardCopy.makeMove(tile,currentPlayer)
evaluations.append(evaluateBoard(gameBoardCopy,symbolToEvaluate,nextPlayer))
if symbolToEvaluate == currentPlayer:
return max(evaluations)
else:
return min(evaluations)
|
1bf1055d1ef7cd56aaad92f4c66a131fc9a7ff0b
| 23,094
|
import math
def f_arenstorf(x, y, rpar):
"""The system of differential equations.
"""
amu, amup = rpar
r1 = (y[0]+amu)**2+y[1]**2
r1 = r1*math.sqrt(r1)
r2 = (y[0]-amup)**2+y[1]**2
r2 = r2*math.sqrt(r2)
f2 = y[0]+2*y[3]-amup*(y[0]+amu)/r1-amu*(y[0]-amup)/r2
f3 = y[1]-2*y[2]-amup*y[1]/r1-amu*y[1]/r2
return [y[2], y[3], f2, f3]
|
d32d6ba08150546cba0d8fd0b4230a48d8eff3a2
| 23,096
|
def linearization(X, Jfun, P):
"""Transform a covariance matrix via linearization
Arguments:
X: the point to linearize about, (n,) numpy array
Jfun: function which takes the state and returns the (n x n) Jacobian of
the function, f, which we want to transform the covariance by. It
should return an (n x n) matrix
df1dx1 df1dx2 ... df1dxn
df2dx1 df2dx2 ... df2dxn
... ... ... ...
dfndx1 dfndx2 ... dfndxn
P: covariance matrix to transform
Returns:
P_prime: transformed covariance matrix
"""
A = Jfun(X)
P_prime = A.dot(P.dot(A.T))
return P_prime
|
c24c4d0815842cc70ac31f0ba9f505bc0d743036
| 23,097
|
def check_guess(life_point, input_guess, current_guess):
"""
Checking the new guess alphabet can be found in latest guess or not, and reduce 1 point if its not.
:param life_point: int, the current number of guess left.
:param input_guess: str, the new guess alphabet input.
:param current_guess: str, the replaced hint of latest guess.
:return: the latest number of guess left.
"""
if current_guess.find(input_guess) != -1:
print("You are correct!")
return life_point
print("There is no " + input_guess + "'s in the word.")
return life_point - 1
|
8b483662fae5d6167d6aa40fd565b2b276025352
| 23,098
|
import tempfile
import os
import atexit
def temp_file(suffix=""):
"""
Get a temporary file path that will be cleaned up on exit.
Simpler than NamedTemporaryFile--- just a file path, no open mode or anything.
:return:
"""
f = tempfile.mktemp(suffix=suffix)
def permissive_ignore(file_):
if os.path.exists(file_):
os.remove(file_)
atexit.register(permissive_ignore, f)
return f
|
7ecb5dd3ddfb79163dbf900d82678c16ad4b4bb0
| 23,100
|
from math import floor
def _special_round(num):
"""
Returns the round number:
- if decimal is equal or higher than .85 it is rounded up
- else it is rounded down.
"""
num_int = floor(num)
decimal = num - num_int
if num_int < 1:
return 1
else:
if decimal >= 0.85:
return num_int + 1
else:
return num_int
|
d55691f781a53f4e4227caeac9f1c04b7edeb3be
| 23,102
|
def ensure_bytes(value):
"""Helper function to ensure all inputs are encoded to the proper value utf-8 value regardless of input type"""
if isinstance(value, bytes):
return value
return value.encode('utf-8')
|
12a0f933c0db1d01c8682dc2c8f73db53d816880
| 23,104
|
import collections
import itertools
def string2(fml):
""" convert formula dictionary to formula string with ones when appropriate
"""
fml = collections.OrderedDict(sorted(fml.items()))
fml_str = ''.join(map(str, itertools.chain.from_iterable(fml.items())))
return fml_str
|
8ed4f1e4996038f4cc2bb28d7f7d2737e6fae0c3
| 23,105
|
def number_unique(row: int, column: int):
"""return number of unique shortest paths from start to finish"""
if row == 1 or column == 1: # base case
return 1
else:
return number_unique(row - 1, column)\
+ number_unique(row, column - 1)
|
51a6b372f7a74ffaa64fc058e7d9f05e3dca8feb
| 23,106
|
def construction_formule(dep,indep):
"""
Construit une formule de modèle à partir d'une liste de variables
"""
return dep + " ~ " + " + ".join([i for i in indep])
|
f3ac730378e1437dce6ecef3bbe73005eab0d60e
| 23,108
|
from typing import Union
from datetime import datetime
def datetime_to_iso_8601(obj: Union[datetime, str, int]) -> str:
"""
Convert a datetime to ISO 8601. For use when serializing the credentials dict.
:param obj: a datetime object
:return: the ISO 8601 representation of the datetime
"""
if isinstance(obj, datetime):
return obj.isoformat()
raise TypeError(f"{type(obj)} is not serializable")
|
13cdd05c9dda84405c650c1275b0bad41f5eddc1
| 23,114
|
def index(request):
"""Just a help page; the action is at static/index.html"""
return {}
|
a131861f587e23ec86281f2bca4472ec974b7a0c
| 23,115
|
def getFootprintByReference(board, reference):
"""
Return a footprint by with given reference
"""
for f in board.GetFootprints():
if f.GetReference() == reference:
return f
raise RuntimeError(f"Footprint with reference '{reference}' not found")
|
e0bf71531834cf6318aaa37e5bc70a4c17c1363b
| 23,116
|
from typing import List
def fmt_filter(items: List[str]) -> str:
"""Filters use comma-separated values,
but these are encoded as query-value, and we are lazy, so we directly encode the commas"""
return "%2C".join(items)
|
1b5ebb69a3ee00b64a7bca4ada9e148fcb7ca20d
| 23,117
|
import pandas
def write_output_spreadsheet(
quotes,
primer_sequences,
fragment_quotes,
errors,
part_sequences,
construct_parts,
construct_sequences,
target="output.xlsx",
):
"""Write the result of DNA construction plan computations as a spreadsheet.
:param quotes: The dictionary of DNAWeaver quote id
:param primer_sequences: The dictionary of primer sequences
:param fragment_quotes: PCR fragments of the dictionary
:param errors: List of errors as a dictionary
:param part_sequences: The sequences of all the parts
:param construct_parts: The assembled parts sequences
:param construct_sequences: The assembled sequence
:param target: The path to the output file
:type quotes: dict
:type primer_sequences: dict
:type fragment_quotes: dict
:type errors: dict
:type part_sequences: dict
:type construct_parts: dict
:type construct_sequences: dict
:type target: str
:rtype: None
:return: None
"""
# HELPER FUNCTIONS
def list_to_spreadsheet(spreadsheet_name, column_names, mylist):
"""Writes the provided list as a sheet of an Excel spreadsheet.
"""
records = [dict(zip(column_names, row)) for row in mylist]
dataframe = pandas.DataFrame.from_records(
records, index=column_names[0], columns=column_names
)
dataframe.to_excel(writer, sheet_name=spreadsheet_name)
def quote_components_ids(quote):
"""Return the list of ids of all fragments or primers in a quote."""
def _subquote_to_id(subquote):
"Return the ID of either the quote or the re-used sequence"
if subquote.source.operation_type == "library":
return subquote.metadata["part_name"]
else:
return subquote.id
return [
_subquote_to_id(subquote)
for loc, subquote in quote.assembly_plan.items()
]
# WRITE THE CONSTRUCTS PARTS SPREADSHEET
writer = pandas.ExcelWriter(target)
parts_per_construct = [
(name, " + ".join(parts)) for name, parts in construct_parts.items()
]
list_to_spreadsheet(
"construct_parts", ["construct", "parts"], parts_per_construct
)
# WRITE THE CONSTRUCT SEQUENCES SPREADSHEET
list_to_spreadsheet(
"construct_sequences",
["construct", "sequence"],
construct_sequences.items(),
)
# WRITE THE PRIMERS SEQUENCES SPREADSHEET
list_to_spreadsheet(
"primer_sequences",
["primer", "sequence"],
sorted(primer_sequences.items()),
)
# WRITE THE PARTS SEQUENCES SPREADSHEET
list_to_spreadsheet(
"part_sequences", ["part", "sequence"], sorted(part_sequences.items())
)
# WRITE THE PCR_EXTENSIONS SPREADSHEET
fragments_list = [
(
fragment,
quote.metadata["subject"],
" + ".join(quote_components_ids(quote)),
quote.sequence,
)
for fragment, quote in fragment_quotes.items()
]
list_to_spreadsheet(
"fragment_extensions",
["fragment_id", "part", "primers", "fragment_sequence"],
fragments_list,
)
# WRITE THE ASSEMBLY PLAN SPREADSHEET
assembly_plan = [
(construct, quote.source.name, " + ".join(quote_components_ids(quote)))
for construct, quote in quotes.items()
]
list_to_spreadsheet(
"assembly_plan", ["construct", "method", "fragments"], assembly_plan,
)
# WRITE THE ERRORED CONSTRUCTS SPREADSHEET
list_to_spreadsheet("errors", ["construct", "error"], list(errors.items()))
writer.close()
|
1bf94f9c9eada0021c69bd56cfee3505ae586b97
| 23,118
|
def checkNodeInObstacle(node, img):
"""
To check the color of the image at a particular Node
:param node: node to check
:type node: Node type
:param img: the image to check in
:type img: np.array
:return: Boolean of True or False
:rtype: Boolean
"""
if img[node.y, node.x][0] == 0 and img[node.y, node.x][1] == 0 and img[node.y, node.x][2] == 0:
return True
return False
|
0701fef8bbe1dba57486ea50e0eab22b3aef51cb
| 23,121
|
def reciprocal_rank(predicted_bin_list, k):
""" Reciprocal rank = 1/rank of first 'hit', i.e. first 1 in predicted_bin_list[:k]. If there is no hit,
it is 0."""
predicted_bin_list_k = predicted_bin_list[:k]
# Keep only 1s and 0s, discard 2s (2s are useful only for dcg).
predicted_bin_list_k = [1 if entry>0 else 0 for entry in predicted_bin_list_k]
# Get the index of the first 1
try:
# +1 as index starts with 0.
rr = 1 / (predicted_bin_list_k.index(1) + 1)
return rr
except ValueError:
return 0
|
38f2ad4f40225c7ed75f39f79bc0ff56e4c70862
| 23,122
|
def span(text):
"""
Wraps text around formatting tag
(That's how the web editor handles font sizes for some reason)
"""
return '''<span style="font-size: 16px;">'''+text+"</span>"
|
1c82dda9907879ad5367b8bc281b983b50cef747
| 23,124
|
def naive_max_perm(M, A=None):
"""
>>> M = [2, 2, 0, 5, 3, 5, 7, 4]
>>> print(naive_max_perm(M))
{0, 2, 5}
"""
if A is None:
A = set(range(len(M)))
if len(A) == 1:
return A
B = {M[i] for i in A}
C = A - B
if C:
A.remove(C.pop())
return naive_max_perm(M, A)
return A
|
f265e83a304d328adaadd4b1bd407ee6ddc06df0
| 23,125
|
def seqToGenbankLines(seq):
""" chunk sequence string into lines each with six parts of 10bp, return as a list
>>> seqToGenbankLines("aacacacatggtacacactgactagctagctacgatccagtacgatcgacgtagctatcgatcgatcgatcgactagcta")
['aacacacatg gtacacactg actagctagc tacgatccag tacgatcgac gtagctatcg', 'atcgatcgat cgactagcta']
"""
# first chunk into 10bp parts
parts = [seq[i:i+10] for i in range(0, len(seq), 10)]
# put into lines of 6*10 bp
lines = []
for i in range(0, len(parts), 6):
lines.append(" ".join(parts[i:i+6]))
return lines
|
f0e290cf3d666980edc18acc50523f45ab18e24a
| 23,126
|
import math
def recreate_2DFES(FES, icount, coords, xinc, xmin, xmax, yinc, ymin, ymax, E):
"""
Receive and returns an array that recreates the FES.
Parameters:
-----------
FES : Array of floats
Energy values corresponding to x location on x dimension
icount : Array of integers
Stores number of counts sampled at each location
coord : float
location of walker
xinc : float
increment of grid
xmin : float
minimum value in grid
xmax : float
maximum value in grid
yinc : float
increment of grid
ymin : float
minimum value in grid
ymax : float
maximum value in grid
E : float
Energy value to be stored
Returns:
--------
FES : Array of floats
Energy values corresponding to x location on x dimension
(updated)
icount : Array of integers
Number of counts sampled at each location (updated)
"""
xindex = int(round((round(coords[0],
int(abs(math.log10(xinc)))) +
(0 - xmin)) / xinc))
yindex = int(round((round(coords[1],
int(abs(math.log10(yinc)))) +
(0 - ymin)) / yinc))
if (coords[0] > xmin and coords[0] < xmax and
coords[1] > ymin and coords[1] < ymax):
FES[yindex, xindex] = ((FES[yindex, xindex] *
(icount[yindex, xindex]) + E) /
(icount[yindex, xindex] + 1))
icount[yindex, xindex] = icount[yindex, xindex] + 1
return (FES, icount)
|
ad5f0938903a32c9fdf264a9a42c6d1eb316adf2
| 23,128
|
def magic_index(array: list)-> int:
"""binary search + tweak
left search: take the min(m-1, array[m])
right search: take the max(m+1, array[m])
"""
l,r = 0, len(array)-1
while l<=r:
m = (l+r)//2
print(f'status: l: {l}, r: {r}, m: {m}, array_m: {array[m]}')
if array[m] == m: return m
if array[m] < m:
l = max(m+1, array[m])
else:
r = min(m-1, array[m])
raise KeyError('Not found')
|
d1166784e51ad8d56f7a7af0b55396a988f19e77
| 23,129
|
def _get_from_email_(message: dict) -> str:
"""
Returns the email address of the from message
:param message: a dict that represents a message
:return: an string containing the email or an empty string
"""
if message.get("@msg_from"):
email = message["@msg_from"].get("emailAddress")
else:
email = ""
return email
|
14c364470cb3ad0ed46d3d6e19f81e0e0d7dffd5
| 23,130
|
def get_poem_title(poem_container_soup):
"""Read in a soup object containing a poem and return the poem's title"""
poem_title = ""
title_soup = poem_container_soup.findAll("span", { "class" : "mw-headline" } )[0]
title = ''.join(title_soup.findAll(text=True))
return title
|
e6fff5425d052c09dead9d1865f5c3652c0c8f6b
| 23,131
|
from typing import List
def get_task_names(path: str, use_compound_names: bool = False) -> List[str]:
"""
Gets the task names from a data CSV file.
:param path: Path to a CSV file.
:param use_compound_names: Whether file has compound names in addition to smiles strings.
:return: A list of task names.
"""
index = 2 if use_compound_names else 1
with open(path) as f:
task_names = f.readline().strip().split(',')[index:]
return task_names
|
f97af21dbb2f8cdeb6e24de25f529ad875310135
| 23,132
|
def calculate_mem_limit(machine_data, host_data):
"""Calculate real memory limit for container"""
mem_limit = host_data["spec"]["memory"]["limit"]
mem_limit_host = machine_data["memory_capacity"]
if mem_limit > mem_limit_host:
mem_limit = mem_limit_host
return mem_limit
|
bf7df27ba12364b159446f4a83fa124cffd27ed4
| 23,133
|
from typing import List
def is_doubled(arr: List[str]) -> bool:
"""
Checks whether a segment array of strings is doubled. That is,
the first half contains the same elements as the second half.
:param arr: List of strings.
:return: True if array is doubled, False otherwise.
"""
if len(arr) % 2 != 0:
return False
first = 0
second = int(len(arr) / 2)
while second < len(arr):
if arr[first] != arr[second]:
return False
first += 1
second += 1
return True
|
3e13dc4c035fa31136e20f30e9c3913c10f90a26
| 23,135
|
def get_weight_op(weight_schedule):
"""Returns a function for creating an iteration dependent loss weight op."""
return lambda iterations: weight_schedule(iterations)
|
c3f4a01159a6a4b3ed309bf094b1821a542ada32
| 23,137
|
import base64
def get_file_content_chrome(browser, uri):
""" Use selenium [browser] to download blob [uri].
- Source https://stackoverflow.com/a/47425305/
"""
result = browser.execute_async_script("""
var uri = arguments[0];
var callback = arguments[1];
var toBase64 = function(buffer){for(var r,n=new Uint8Array(buffer),t=n.length,a=new Uint8Array(4*Math.ceil(t/3)),i=new Uint8Array(64),o=0,c=0;64>c;++c)i[c]="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charCodeAt(c);for(c=0;t-t%3>c;c+=3,o+=4)r=n[c]<<16|n[c+1]<<8|n[c+2],a[o]=i[r>>18],a[o+1]=i[r>>12&63],a[o+2]=i[r>>6&63],a[o+3]=i[63&r];return t%3===1?(r=n[t-1],a[o]=i[r>>2],a[o+1]=i[r<<4&63],a[o+2]=61,a[o+3]=61):t%3===2&&(r=(n[t-2]<<8)+n[t-1],a[o]=i[r>>10],a[o+1]=i[r>>4&63],a[o+2]=i[r<<2&63],a[o+3]=61),new TextDecoder("ascii").decode(a)};
var xhr = new XMLHttpRequest();
xhr.responseType = 'arraybuffer';
xhr.onload = function(){ callback(toBase64(xhr.response)) };
xhr.onerror = function(){ callback(xhr.status) };
xhr.open('GET', uri);
xhr.send();
""", uri)
if type(result) == int :
raise Exception("Request failed with status %s" % result)
return base64.b64decode(result)
|
284663653a94206775bca09b5fee643fd33e3dce
| 23,138
|
def get_average_quality(qualities):
""" Calculates average quality as Phred quality score
Parameters
----------
qualities: str
Read qualities for a certain position
Returns
-------
float
Average quality
"""
sum_quality = 0
for q in qualities:
sum_quality += 1 - 10 **-((ord(q) - 33) / 10.0)
return sum_quality / len(qualities)
|
821f4353d7371ba8a2f1e4f749978a75cb6bd851
| 23,139
|
def calc_lcoe_om(fom, vom, cf=1):
"""
:param fom: Fixed operation and maintentance costs as CURR/KWY
:param vom: Variable cost in the form of CURR/ KWH
:param cf: Capacity factor assumed for the plant, default is 1
:return: LCOE O&M component in CURR per KWh
"""
fixed = fom / (cf * 8600)
om_lcoe = fixed + vom
return om_lcoe
|
b847a8cdbfbffb4d8270fea7703cf3250cbff491
| 23,141
|
def extractAll(text, startText, endText):
"""
Extract all occurences of a string within text that start with startText and end with endText
Parameters:
text: the text to be parsed
startText: the starting tokem
endText: the ending token
Returns an array containing all occurences found, with tabs and newlines removed and leading whitespace removed
"""
result = []
start = 0
pos = text.find(startText, start)
while pos != -1:
start = pos + startText.__len__()
end = text.find(endText, start)
result.append(text[start:end].replace('\n', '').replace('\t', '').lstrip())
pos = text.find(startText, end)
return result
|
2357f7c0f0d1b35a20a23408f55ce88d3bb2d6ea
| 23,142
|
def package_names_first(dependency_names, package_names):
""" Put package names first. """
dep_packages = sorted(
[x for x in dependency_names if x in package_names])
dep_projects = sorted(
[x for x in dependency_names if x not in package_names])
dep_packages.extend(dep_projects)
return dep_packages
|
365fefa9b0855776baf9cee982c45c8a85bb6926
| 23,143
|
def get_float_from_str(str):
"""
字符串转float
"""
r = 0 if str == "" else float(str)
return r
|
0695e8214373ea72c5da0f86f5dc33aa198cf271
| 23,145
|
def logistic_rhs(t, x, r=2., k=2.):
"""
RHS evaluation of logistic ODE,
returns
f(t, x) = r * x * (1 - x/k)
"""
return r * x * (1. - x / k)
|
eb65d625a3ae1b544032e6f0efe0eb500aa256cf
| 23,146
|
import re
def convert_to_api_url(url: str, netloc: str, api_netloc: str) -> str:
"""Support both regular and raw URLs"""
match = re.search(f"https://{netloc}/(.*?)/(.*?)/(?:raw|src)/(.*)", url)
if match:
user, repo, path = match.groups()
else:
raise ValueError("Not a valid bitbucket URL: {url}")
return f"https://{api_netloc}/2.0/repositories/{user}/{repo}/src/{path}"
|
a6835f3b7b1dcdaf6901ad5644a189dfc902caf8
| 23,147
|
def _get_criteria_interactions(xmltree, criteria_allowed):
"""In the returned dict 'interactions', the most outer key designates
direction of the interaction effect (i.e. which criterion is affected),
which is significant in case of 'antagonistic' interaction.
"""
interaction_types_allowed = ['strengthening', 'weakening', 'antagonistic']
path = 'criteriaValues[@mcdaConcept="criteriaInteractions"]/criterionValue'
interactions = {}
cvs = xmltree.xpath(path)
if not cvs:
raise RuntimeError("Wrong or missing definitions for criteria "
"interactions.")
for cv in cvs:
interaction_type = cv.attrib.get('mcdaConcept')
if interaction_type not in interaction_types_allowed:
raise RuntimeError("Wrong interaction type '{}'."
.format(interaction_type))
criteria_involved = cv.xpath('.//criterionID/text()')
if len(criteria_involved) != 2:
raise RuntimeError("Wrong number of criteria for '{}' interaction."
.format(interaction_type))
for criterion in criteria_involved:
if criterion not in criteria_allowed:
raise RuntimeError("Unknown criterion '{}' for '{}' interaction."
.format(criterion, interaction_type))
interaction_value = float(cv.find('./value//').text)
if ((interaction_value > 0 and interaction_type == 'weakening') or
(interaction_value < 0 and interaction_type in ('strengthening','antagonistic')) or
(interaction_value == 0)):
raise RuntimeError("Wrong value for '{}' interaction."
.format(interaction_type))
if interaction_type == 'strengthening' and 'weakening' in interactions.keys():
for i in interactions['weakening']:
if set(i[:2]) == set(criteria_involved):
raise RuntimeError("'strengthening' and 'weakening' "
"interactions are mutually exclusive.")
elif interaction_type == 'weakening' and 'strengthening' in interactions.keys():
for i in interactions['strengthening']:
if set(i[:2]) == set(criteria_involved):
raise RuntimeError("'strengthening' and 'weakening' "
"interactions are mutually exclusive.")
c1, c2 = criteria_involved
try:
interactions[interaction_type].append((c1, c2, interaction_value))
except KeyError:
interactions.update({interaction_type: [(c1, c2, interaction_value)]})
return interactions
|
b29baa719f55ac4eabd1319531b7b15b7092b586
| 23,150
|
def get_descendants(manager, resource, pathway_id, pathway_name):
"""Generate d3 dendrogram structure by using BFS starting from the starting from a parent (root) node to the last descendants.
:param manager: ComPath manager
:param str resource: resource name
:param str pathway_id: pathway identifier in the resource
:param str pathway_name: pathway name
:return: parent-children data structure
:rtype: list[dict]
"""
# Create the entry dictionary of the pathway (node).
d3_dendrogram = dict(
children=[],
name=pathway_name.replace(' - Homo sapiens (human)', ''), # Replace KEGG Suffixes
pathway_id=pathway_id,
resource=resource
)
# Get direct descendents for the pathway.
descendent_mappings = manager.get_decendents_mappings_from_pathway_with_is_part_of_relationship(
resource,
pathway_id,
pathway_name
)
# Return the entry dict with no children if the node got no descendants.
if not descendent_mappings:
return d3_dendrogram
# Do the recusive call for each child.
for mapping in descendent_mappings:
pathway = mapping.get_complement_mapping_info(resource, pathway_id, pathway_name)
d3_dendrogram["children"].append(
get_descendants(
manager,
pathway[0],
pathway[1],
pathway[2]
)
)
return d3_dendrogram
|
ad94c144a78e180b8a06c3af851cae8c2a1aea2f
| 23,151
|
def _get_offset_ann_map(ann_objs, restrict_types=None, ignore_types=None):
"""Helper function for search.
Given annotations, returns a dict mapping offsets in text into the
set of annotations spanning each offset.
"""
# treat None and empty list uniformly
restrict_types = [] if restrict_types is None else restrict_types
ignore_types = [] if ignore_types is None else ignore_types
offset_ann_map = {}
for ann_obj in ann_objs:
for t in ann_obj.get_textbounds():
if t.type in ignore_types:
continue
if restrict_types != [] and t.type not in restrict_types:
continue
for t_start, t_end in t.spans:
for o in range(t_start, t_end):
if o not in offset_ann_map:
offset_ann_map[o] = set()
offset_ann_map[o].add(t)
return offset_ann_map
|
577b85fb627278c090658f594047fb0e1e3c7781
| 23,152
|
from datetime import datetime
def createTrunk(name):
"""
Creates a trunk name for data in data_vault corresponding
to the current date.
Arguments:
name (str) : the name of the client.
Returns:
(*str) : the trunk to create in data_vault.
"""
date = datetime.now()
trunk1 = '{0:d}_{1:02d}_{2:02d}'.format(date.year, date.month, date.day)
trunk2 = '{0:s}_{1:02d}:{2:02d}'.format(name, date.hour, date.minute)
return ['', str(date.year), '{:02d}'.format(date.month), trunk1, trunk2]
|
c4b1967468159cc13a551afcb142b05a510174ad
| 23,153
|
def PERDA_DESLIZAMENTO_ANCORAGEM(P_IT0, SIGMA_PIT0, A_SCP, L_0, DELTA_ANC, E_SCP):
"""
Esta função determina a perda de protensão por deslizamento da armadura na anco-
ragem.
Entrada:
P_IT0 | Carga inicial de protensão | kN | float
SIGMA_PIT0 | Tensão inicial de protensão | kN/m² | float
A_SCP | Área de total de armadura protendida | m² | float
L_0 | Comprimento da pista de protensão | m | float
DELTA_ANC | Previsão do deslizamento do sistema de ancoragem | m | float
E_SCP | Módulo de Young do aço protendido | kN/m² | float
Saída:
DELTAPERC | Perda percentual de protensão | % | float
P_IT1 | Carga final de protensão | kN | float
SIGMA_PIT1 | Tensão inicial de protensão | kN/m² | float
"""
# Pré-alongamento do cabo
DELTAL_P = L_0 * (SIGMA_PIT0 / E_SCP)
# Redução da deformação na armadura de protensão
DELTAEPSILON_P = DELTA_ANC / (L_0 + DELTAL_P)
# Perdas de protensão
DELTASIGMA = E_SCP * DELTAEPSILON_P
SIGMA_PIT1 = SIGMA_PIT0 - DELTASIGMA
DELTAP = DELTASIGMA * A_SCP
P_IT1 = P_IT0 - DELTAP
DELTAPERC = (DELTAP / P_IT0) * 100
return DELTAPERC, P_IT1, SIGMA_PIT1
|
6218ea2e328e05e56e9a8c13828152067f988550
| 23,154
|
from typing import List
def get_possible_stride_sizes(image_size: int, padding_size: int, filter_size: int) -> List[int]:
"""Get possible stride sizes as list of Integers
Args:
image_size (int): input image size
padding_size (int): padding size
filter_size (int): filter/kernel size
Returns:
int: all possible stride sizes
"""
resultant_srides = []
total_value = (image_size+2*padding_size - filter_size)
print(f"{total_value=}")
for i in range(1, total_value):
if total_value % i == 0:
resultant_srides.append(i)
return resultant_srides
|
4b9f1413b11ad4430bcebdc3df4b82fab07f849b
| 23,155
|
def build_error_msg(error, code=10):
"""Builds an error message to add to the queue"""
msg = {
'jsonrpc': '2.0',
'error': {
'code': code,
'message': f'{error}'
},
'id': 'app_error'
}
return msg
|
2e4f9be31defe675738d7c2cbaa591a65f5abdcf
| 23,158
|
import re
def remove_state_keys(state, keys_regex):
"""Remove keys from state that match a regex"""
regex = re.compile(keys_regex)
return {k: v for k, v in state.items() if not regex.findall(k)}
|
d1ec89b5da23f866cb8bec8d03a14cc2deba8b5f
| 23,160
|
import requests
def list_measurement_defintion(measurement=None):
"""
Given a measurment type from one of the available GLOS stations return its defintion ie. what is it?
to the user.
"""
response = requests.get(
"https://seagull-api.glos.org/api/v1/parameter-configurations"
)
data_types = {}
for i in response.json():
data_types[i["display_name"]["en"]] = i
if measurement != None:
return data_types[measurement]
return data_types
|
6d6c7cdebd8fe4af054fe152faf329a1489300bf
| 23,162
|
def get_value_assigned_to_user(user_data, key):
"""
Try to retrieve (attribute) value assigned to an user.
In practise it will be certificate or key. KeyError will be raised in case
of improper payload format or when the attribute for given key does not
exist.
"""
d = user_data["user"]
return d[key]
|
f6e5155612e048ae3d52aa6759e96bb823ebbaca
| 23,163
|
def fib(n):
"""
input: positive integer 'n'
returns the n-th fibonacci term , indexing by 0
"""
# precondition
assert isinstance(n, int) and (n >= 0), "'n' must been an int and >= 0"
tmp = 0
fib1 = 1
ans = 1 # this will be return
for i in range(n-1):
tmp = ans
ans += fib1
fib1 = tmp
return ans
|
89ec5eeecf80a2f802297198de147ff94f750e87
| 23,164
|
import base64
def read_file(file: str) -> str:
"""
Reads the given file and returns the contents as a base64 string
@param file: Path to a file
@return: base64 string containing the file bytes
"""
with open(file, "rb") as f:
return base64.b64encode(f.read()).decode()
|
08ce5bec02c55aa1a4934605bd27c8f083308591
| 23,165
|
import csv
def load_labels(abs_path):
"""
loads relative path file as dictionary
Args:
abs_path: absolute path
Returns dictionary of mappings
"""
label_tsv = open(abs_path)
labels = list(csv.reader(label_tsv, delimiter="\t"))
return labels
|
091fe644d8d9a8aef26a1905497dcc8d5e7921ce
| 23,168
|
def user_roles(value):
"""List all roles for a given user"""
return list(value.groups.all().values_list("communityrole__role_name", flat=True))
|
76bafdc492742734fe65cbb5668dd8a05f1c8e59
| 23,169
|
def addMortonCondition(mortonRanges, mortonColumnName):
"""
Composes the predicate with the morton ranges.
"""
elements = []
for mortonRange in mortonRanges:
elements.append('(' + mortonColumnName + ' between ' + str(mortonRange[0]) + ' and ' + str(mortonRange[1]) + ')')
if len(elements) == 1:
return elements[0]
elif len(elements) > 1:
return '(' + ' OR '.join(elements) + ')'
return None
|
8157268dd9d051ddd767c8b5093771f0b5d79710
| 23,170
|
import os
def path(filename):
"""
:param filename:
:return:
"""
return os.path.join(os.path.dirname(__file__), filename)
|
6b22edbdd914d683dcf021798f3f126875b02e76
| 23,171
|
import pandas as pd
def merge_dfs(dfs):
"""
Merge a list of dataframes where each contains one row
showing all unique region-region pairs.
Parameters
----------
dfs : list
list of dataframes where each contains one row
showing all unique region-region pairs
Returns
-------
out_df : pandas.core.frame.DataFrame
merged dataframe where each row represents a unique scan
"""
out_df = pd.concat(dfs, copy=False, ignore_index=True)
headers = list(out_df.columns.values)
# if any of these columns exist in the dataframe, move them to the front
if 'variant_id' in headers:
headers.insert(0, headers.pop(headers.index('variant_id')))
if 'space_id' in headers:
headers.insert(0, headers.pop(headers.index('space_id')))
if 'run_id' in headers:
headers.insert(0, headers.pop(headers.index('run_id')))
if 'rec_id' in headers:
headers.insert(0, headers.pop(headers.index('rec_id')))
if 'acq_id' in headers:
headers.insert(0, headers.pop(headers.index('acq_id')))
if 'task_id' in headers:
headers.insert(0, headers.pop(headers.index('task_id')))
if 'session_id' in headers:
headers.insert(0, headers.pop(headers.index('session_id')))
if 'subject_id' in headers:
headers.insert(0, headers.pop(headers.index('subject_id')))
out_df = out_df[headers]
return out_df
|
e47cae67a51bb356b4e03bf22a8e7c81593ee909
| 23,172
|
def get_end_year(season):
"""
Returns the end year given a season in YYYY-YY format
"""
second_part = season.split("-")[1]
first_part = "20" if second_part == "00" else season.split("-")[0][:2]
year = f"{first_part}{second_part}"
return int(year)
|
13b1094c47bbfdd388a2df627f3565bc3c326a04
| 23,173
|
def format_name(name):
"""Remove non alphanumeric/whitespace characers from user input or
restaurant data
"""
return ''.join(chr for chr in name if chr.isalnum() or chr.isspace())
|
edbeaa643f1b721287aa722cd491d61a53bf7dff
| 23,175
|
from typing import Dict
from typing import OrderedDict
def generate_panelist_vs_panelist_results(panelists: Dict,
panelist_appearances: Dict,
show_scores: Dict
) -> Dict:
"""Generate panelist vs panelist results"""
pvp_results = OrderedDict()
for _, panelist_a in panelists.items():
panelist_a = panelist_a["slug"]
pvp_results[panelist_a] = OrderedDict()
for _, panelist_b in panelists.items():
panelist_b = panelist_b["slug"]
if panelist_a != panelist_b:
panelist_a_appearances = panelist_appearances[panelist_a]
panelist_b_appearances = panelist_appearances[panelist_b]
a_b_intersect = list(set(panelist_a_appearances) & set(panelist_b_appearances))
a_b_intersect.sort()
pvp_results[panelist_a][panelist_b] = OrderedDict()
wins = 0
draws = 0
losses = 0
for show in a_b_intersect:
panelist_a_score = show_scores[show][panelist_a]
panelist_b_score = show_scores[show][panelist_b]
if panelist_a_score > panelist_b_score:
wins = wins + 1
elif panelist_a_score == panelist_b_score:
draws = draws + 1
else:
losses = losses + 1
pvp_results[panelist_a][panelist_b]["wins"] = wins
pvp_results[panelist_a][panelist_b]["draws"] = draws
pvp_results[panelist_a][panelist_b]["losses"] = losses
pvp_results[panelist_a][panelist_b]["total"] = wins + draws + losses
return pvp_results
|
34dd140e28526479c149b517f01e02654ac4a390
| 23,178
|
def emulator_default_visibility(emulator):
"""The default visibility for this emulator."""
return emulator["default_visibility"]
|
d279920f4c401b8bf68bf1224432badec4658ebe
| 23,179
|
def local_path(epic, c):
"""
Returns file directory and filename of K2 target pixel files
"""
if len(str(c)) < 2: c_str = '0'+str(c)
else: c_str = str(c)
# get path to fits
#mastDownload_path = '/home/rachel/Research/K2/mastDownload/'
#fpath = mastDownload_path+'K2/ktwo%s-c%s_lc/ktwo%s-c%s_lpd-targ.fits.gz'%(epic, c_str, epic, c_str)
#dir_path = mastDownload_path+'K2/ktwo%s-c%s_lc/'%(epic, c_str)
#dir_path = '/home/rachel/.lightkurve-cache/mastDownload/K2/ktwo%s-c%s_lc/'%(epic, c_str)
dir_path = '~/.lightkurve-cache/mastDownload/K2/ktwo%s-c%s_lc/'%(epic, c_str)
fname = 'ktwo%s-c%s_lpd-targ.fits.gz'%(epic, c_str)
return dir_path, fname
|
6111406ee934b0cafbd1c91742d30f16fd70167c
| 23,180
|
def replace_special_signs(str_to_change: str) -> str:
"""Replace special signs in given string"""
replacements = {
'*': '_',
'.': '_',
',': '_',
'-': '_',
'__': '_',
' ': ''
}
replaced = str(str_to_change)
for key, value in replacements.items():
replaced = replaced.strip(key).replace(key, value)
return replaced
|
5ad7830f0a05dd52d8cb7027dd321083406c2f38
| 23,181
|
def get_Q_hs_max_CS_d_t(Q_hs_max_C_d_t, SHF_dash_d_t):
"""(25)
Args:
Q_hs_max_C_d_t: 日付dの時刻tにおける1時間当たりの熱源機の最大冷房出力(MJ/h)
SHF_dash_d_t: 日付dの時刻tにおける冷房負荷補正顕熱比(-)
Returns:
日付dの時刻tにおける1時間当たりの熱源機の最大冷房顕熱出力(MJ/h)
"""
return Q_hs_max_C_d_t * SHF_dash_d_t
|
13005580427e38382d9d632a0cd1bbab56b68ff6
| 23,183
|
def is_even(n_digits) -> bool:
"""
Check if the number is even or not
:param n_digits:
:return: Bool
"""
if n_digits % 2 == 0:
return True
return False
|
3ada9b181ab3283fd8d5c3a67fde5004129cfc4c
| 23,184
|
def predict_with_neuron(model_class, X_train, y_train, X_valid,
lrate, epochs, on_epoch_end_callback=None):
"""
Args:
X_train (np.ndarray of shape (N, m): Features of the training set
y_train (np.ndarray of shape (N,): Target values of the training set
X_valid (np.ndarray of shape (V, m): Features of the validation set
lrate (float): learning rate
epochs (int): number of epochs
on_epoch_end_callback (function): A function that accepts as an
argument an integer (the index of current iterator) and a
LinearNeuron object. This function can be used as a callback in
order to perform some action at the end of every epoch
Returns:
The predictions of the trained neuron for X_valid and a np.ndarray
vector with the parameters of the trained neuron.
"""
dims = X_train.shape[1]
model = model_class(dims)
model.fit(X_train, y_train, lrate, epochs, on_epoch_end_callback)
predictions = model.predict(X_valid)
parameters = model.theta
return predictions, parameters
|
7c7b36474e46b4b4b8ee152f97e82b2e8e4770e9
| 23,185
|
def is_str(var):
"""
is this a string?
"""
# python2:
# return isinstance(var, basestring)
return isinstance(var, str)
|
91516d2d24dab235e374e0d77f184cdd59fc82ec
| 23,186
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.