content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def _check_schema_within_schema(la, lb, dir=None, verbose=False):
""" Check is a Two-Symbol schemata is covered by another.
This is used to simplify the number of TS schematas returned.
The arguments for this function are generated by `_expand_ts_logic`.
Args:
tsa (list) : A list of :math:`F'` schematas that a Two-Symbol :math:`F''` schemata can cover.
tsb (list) : A list of :math:`F'` schematas that a Two-Symbol :math:`F''` schemata can cover.
dir (string) : The direction to check, either ``a`` or ``b`` is in the other.
Defaults to both directions.
"""
a_in_b, b_in_a = None, None
#
if dir != 'b':
a_in_b = all([(xa in lb) for xa in la])
if verbose:
print('%s in %s : %s' % (la, lb, a_in_b))
if dir != 'a':
b_in_a = all([(xb in la) for xb in lb])
if verbose:
print('%s in %s : %s' % (lb, la, b_in_a))
#
return a_in_b, b_in_a | 03a42f6d868f030a670cb97b79bed9136ec76a1d | 47,778 |
def minute_to_clock(m):
""" 跳变的分钟数改成 小时:分钟 的格式 """
return "%s:%s" % (int(m/60), m%60) | 799da35f0148a13076dff7b035dac92fa3679f2f | 47,779 |
def process_arguments(app, what, name, obj, options, signature,
return_annotation):
"""
Exclude arguments for classes in the documentation.
"""
if what == 'class':
return (None, None) | a5acbf488c39e08439546c9abecdfe2a8528e999 | 47,780 |
def vocab_from_file(path):
""" Read all words of vocabulary from a file.
Parameters
----------
path : string
Path of vocabulary file.
Returns
-------
index2word : dict_like, shape (vocabsize)
Dictionary that contains indices as keys, and words as values.
word2index : dict_like, shape (vocabsize)
Dictionary that contains words as keys, and indices as values.
"""
fin = open(path, 'r')
WORD_INDEX = 0
index2word = dict()
word2index = dict()
while True:
line = fin.readline()
if not line:
break
word_freq = line.split()
word = word_freq[WORD_INDEX]
word2index[word] = len(word2index) + 1
index2word[len(index2word) + 1] = word
fin.close()
return (index2word, word2index) | 9434935f4aca26a9867e9471d9060c6b1617cf1e | 47,783 |
import torch
def _compute_idxs_v2(vals, counts):
""" Fast vectorized version of index computation """
# Consider an example where:
# vals = [0, 1, 0, 1, 1]
# counts = [2, 3, 3, 2, 1]
#
# These values of counts and vals mean that the dense binary grid is:
# [0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]
#
# So the nonzero indices we want to return are:
# [2, 3, 4, 8, 9, 10]
# After the cumsum we will have:
# end_idxs = [2, 5, 8, 10, 11]
end_idxs = counts.cumsum(dim=0)
# After masking and computing start_idx we have:
# end_idxs = [5, 10, 11]
# counts = [3, 2, 1]
# start_idxs = [2, 8, 10]
mask = vals == 1
end_idxs = end_idxs[mask]
counts = counts[mask].to(end_idxs)
start_idxs = end_idxs - counts
# We initialize delta as:
# [2, 1, 1, 1, 1, 1]
delta = torch.ones(counts.sum().item(), dtype=torch.int64)
delta[0] = start_idxs[0]
# We compute pos = [3, 5], val = [3, 0]; then delta is
# [2, 1, 1, 4, 1, 1]
pos = counts.cumsum(dim=0)[:-1]
val = start_idxs[1:] - end_idxs[:-1]
delta[pos] += val
# A final cumsum gives the idx we want: [2, 3, 4, 8, 9, 10]
idxs = delta.cumsum(dim=0)
return idxs | 9386189db24c9d13e698ec240469a74ad00d10d1 | 47,784 |
from typing import Union
def atoi(s: str) -> Union[int, str]:
"""Convert a :class:`str` to an :class:`int` if possible."""
return int(s) if s.isdigit() else s.lower() | 217131151329b5eed62ead118c38a8c3ca387753 | 47,786 |
def _CheckNoBaseTimeCalls(input_api, output_api):
"""Checks that no files call base::Time::Now() or base::TimeTicks::Now()."""
pattern = input_api.re.compile(
r'(base::(Time|TimeTicks)::Now)\(\)',
input_api.re.MULTILINE)
files = []
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if (f.LocalPath().startswith('components/autofill/') and
not f.LocalPath().endswith("PRESUBMIT.py")):
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [ output_api.PresubmitPromptWarning(
'Consider to not call base::Time::Now() or base::TimeTicks::Now() ' +
'directly but use AutofillClock::Now() and '+
'Autofill::TickClock::NowTicks(), respectively. These clocks can be ' +
'manipulated through TestAutofillClock and TestAutofillTickClock '+
'for testing purposes, and using AutofillClock and AutofillTickClock '+
'throughout Autofill code makes sure Autofill tests refers to the '+
'same (potentially manipulated) clock.',
files) ]
return [] | f2fa9999e384cf91a3199cfbfda6d2277764c503 | 47,788 |
def adjective_negations(token):
"""Find all negated adjectives in a sentence.
:param token: negation token to handle
:type token: :class:`spacy.token`
:return: list of negated adjectives
:rtype: [string]
"""
negated_adjectives = []
# "This color is not pretty" case
if token.head.dep_ == "conj" or token.head.dep_ == "amod":
negated_adjectives.append(token.head.text)
# always looking to the right -- not handling sarcasm (e.g., "Beautiful
# table, not!")
for tok in token.head.rights:
if tok.dep_ == "conj" or tok.dep_ == "amod" or tok.dep_ == "acomp":
negated_adjectives.append(tok.text)
# noun phrase case "This is not a terrible table"
if tok.dep_ == "attr":
for n_tok in tok.children:
if (
n_tok.dep_ == "conj"
or n_tok.dep_ == "amod"
or n_tok.dep_ == "acomp"
):
negated_adjectives.append(n_tok.text)
return negated_adjectives | 271a86f3ead5dbb9f8f464b5502f9d239bba8bba | 47,789 |
def CheckChangeHasNoCrAndHasOnlyOneEol(input_api, output_api,
source_file_filter=None):
"""Runs both CheckChangeHasNoCR and CheckChangeHasOnlyOneEOL in one pass.
It is faster because it is reading the file only once.
"""
cr_files = []
eof_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
contents = input_api.ReadFile(f, 'rb')
if '\r' in contents:
cr_files.append(f.LocalPath())
# Check that the file ends in one and only one newline character.
if len(contents) > 1 and (contents[-1:] != '\n' or contents[-2:-1] == '\n'):
eof_files.append(f.LocalPath())
outputs = []
if cr_files:
outputs.append(output_api.PresubmitPromptWarning(
'Found a CR character in these files:', items=cr_files))
if eof_files:
outputs.append(output_api.PresubmitPromptWarning(
'These files should end in one (and only one) newline character:',
items=eof_files))
return outputs | 09a97bebdd10c3e043547a9e5049415c74b7276f | 47,790 |
from typing import List
from typing import Optional
def create_patient_resource(patient_identifier: List[dict],
gender: str,
communication: Optional[List[dict]] = None) -> dict:
"""
Create patient resource following the FHIR format
(http://www.hl7.org/implement/standards/fhir/patient.html)
"""
gender_codes = {"male", "female", "other", "unknown"}
assert gender in gender_codes, \
f"Gender must be one of these gender codes: {gender_codes}"
patient_resource = {
"resourceType": "Patient",
"identifier": patient_identifier,
"gender": gender
}
if communication:
patient_resource["communication"] = communication
return patient_resource | 0d8046f3600856ebecf3413ea3247f893c852885 | 47,791 |
def minimum(key):
"""
Curried version of the built-in min.
>>> Stream([[13, 52], [28, 35], [42, 6]]) >> minimum(lambda v: v[0] + v[1])
[42, 6]
"""
return lambda s: min(s, key=key) | 80877ce3d9eb51b6221a2f8c39a043a212a01406 | 47,794 |
from datetime import datetime
def pairs_hook(pairs):
"""Convert the "time" key to datetime"""
# Aquí se almacena el objeto gaurdado en json
obj = {}
for key, value in pairs:
if key == 'time':
# Construyo un datetime
value = datetime.fromisoformat(value) # Python >= 3.7
# Lo meto en el diccionario de python
obj[key] = value
return obj | 01e88dc972f657299deb65548099fa59300b8b56 | 47,795 |
import math
def solar_apparent_longitude(solar_true_longitude, julian_century):
"""Returns the SolarApparentLongitude with Solar True Longitude,
solar_true_longitude, and Julian Century, julian_century."""
solar_apparent_longitude = (
solar_true_longitude
- 0.00569
- 0.00478 * math.sin(math.radians(125.04 - 1934.136 * julian_century))
)
return solar_apparent_longitude | 37af338fdf61061475e0b10db106effae878eb48 | 47,796 |
def squareDistance(p1, p2):
"""Answers the square of the distance for relative comparison and to
save the time of the **sqrt**."""
tx, ty = p2[0]-p1[0], p2[1]-p1[1]
return tx*tx + ty*ty | 9ca4a5951df48a4c823db178a3bb6173a1e8c23d | 47,797 |
def test_odd(value):
"""Return true if the variable is odd."""
return value % 2 == 1 | 3ee4d6b5b66c108a09408af70e6f6a3d37ccc729 | 47,798 |
def index_acl(acl):
"""Return a ACL as a dictionary indexed by the 'entity' values of the ACL.
We represent ACLs as lists of dictionaries, that makes it easy to convert
them to JSON objects. When changing them though, we need to make sure there
is a single element in the list for each `entity` value, so it is convenient
to convert the list to a dictionary (indexed by `entity`) of dictionaries.
This function performs that conversion.
:param acl:list of dict
:return: the ACL indexed by the entity of each entry.
:rtype:dict
"""
# This can be expressed by a comprehension but turns out to be less
# readable in that form.
indexed = dict()
for e in acl:
indexed[e["entity"]] = e
return indexed | 4a49cab07fbb06045e0ced58d413b7255ad841c7 | 47,799 |
import re
def find(str_: str, re_: str) -> list:
"""功能类似于str.find(),但是支持正则表达式"""
f, flag = [], 0
x = re.findall(re_, str_)
for v in x:
index = str_.find(v, flag)
flag = index + 1
f.append(index)
return f | 1be25be950050ee808913d57a43b04c06c9735e9 | 47,801 |
def payload_string(nibble1, nibble2, nibble3, nibble4):
"""Returns the string representation of a payload."""
return'0x{:X}{:X}{:X}{:X}'.format(nibble1, nibble2, nibble3, nibble4) | aa80620ebcaec8bebb087fb953d065f9165cc2c0 | 47,804 |
def node_list2node_dict(node_list):
"""
ノードについての情報(属性)をリスト形式から辞書形式に変換する。
Args:
node_list:全ノードをNodeクラスでまとめたリスト。
Return:
各ノードのname, href, x, y, is_dummyを持つ辞書。
キーはnameで、その値としてhref, x, y, is_dummyをキーに持つ辞書が与えられる。
例:
node_dict = {"f": { "href": "example.html", "x": 0, "y": 2, "is_dummy": false}, ... }
"""
node_dict = {}
for node in node_list:
node_dict[node.name] = {
"href": node.href,
"x": node.x,
"y": node.y,
"is_dummy": node.is_dummy
}
return node_dict | 5e8ee6679fb3874c2a7d5f4051b3dd848c9f7933 | 47,806 |
import six
def to_ascii(s):
"""Compatibility function for converting between Python 2.7 and 3 calls"""
if isinstance(s, six.text_type):
return s
elif isinstance(s, six.binary_type):
return "".join(map(chr, map(ord, s.decode(encoding='UTF-8'))))
return s | c7c02d6087fe5649e72cdf32742d13268a8dbf3d | 47,807 |
def create_operation(cls, a, b):
"""Create operation between a and b, merging if a or b is already an operation of same class
"""
operands = []
operands.extend(a.operands if isinstance(a, cls) else [a])
operands.extend(b.operands if isinstance(b, cls) else [b])
return cls(*operands) | 5f142dd692d96835e15e3c5934c8f53e70582b9d | 47,808 |
def LFRS(x: list) -> bool:
"""
【ЛФРС системы】根据各个部位零件的相连和正常的工作时长,判断该系统是否可以正常运行
:param x: 包含对应零件正常运行时长的 list
:return: bool
"""
T = 8760 # 系统正常运行的小时数
# ЛФРС системы
return (( (x[0] > T) & (x[1] > T) | (x[2] > T)) \
& ((x[3] > T) & (x[4] > T)) \
& ((x[5] > T) & (x[6] > T) | (x[7] > T) & (x[8] > T) | (x[9] > T) & (x[10] > T))) | 58c3f0f3861d4bfb7b355ee26549075556545b08 | 47,809 |
import torch
import time
def time_encode_decode(model, dataframe, verbose=False):
"""Time the model's endoce and decode functions.
Parameters
----------
model : torch.nn.Module
The model to evaluate.
dataframe : type
A pandas DataFrame containing data to encode and decode.
Returns
-------
tuple
Tuple containing (encode_time_per_jet, decode_time_per_jet).
"""
data = torch.tensor(dataframe.values, dtype=torch.float)
start_encode = time.time()
latent = model.encode(data)
end_encode = time.time()
encode_time = end_encode - start_encode
start_decode = time.time()
_ = model.decode(latent)
end_decode = time.time()
decode_time = end_decode - start_decode
n_jets = len(dataframe)
decode_time_per_jet = decode_time / n_jets
encode_time_per_jet = encode_time / n_jets
if verbose:
print('Encode time/jet: %e seconds' % encode_time_per_jet)
print('Decode time/jet: %e seconds' % decode_time_per_jet)
return encode_time_per_jet, decode_time_per_jet | 0c6b5e99518375106274bc2caac3a5ab86454f60 | 47,810 |
def ngrams(text, n=3):
"""Return list of text n-grams of size n"""
return {text[i:i + n].lower() for i in range(len(text) - n + 1)} | 702273d1cba98ab6a5dbcecda2d65909308e7d07 | 47,811 |
def convert_rgb_tuple(tuple_256):
"""Convert R,G,B Decimal Code from 8-bit integers to [0, 1] floats.
E.g. (255, 247, 0) -> (1., 0.9686... , 0.) representing a specific
yellow colour, namely #FFF700 in Hex(ademical).
"""
return tuple(float(rgb_int) / 255 for rgb_int in tuple_256) | 1748c6207b58d68ace9e947359cd686964ceb207 | 47,812 |
def version():
"""Extract version in file. Returns version string"""
try:
with open("dynamicdns/version") as file:
return file.read()
except:
return "[Version will be generated during deployment]" | 35d7118b2431852fd2c8114dce39d8e901a1cc13 | 47,816 |
def SubTypeProb(Deck):
"""Determine Subtype probability"""
small_item_sum = 0
food_drink_sum = 0
clothing_sum = 0
art_sum = 0
type_list = []
for idx in range(len(Deck.card_list)):
if Deck.card_list[idx].subtype == "Small Item":
small_item_sum += 1
elif Deck.card_list[idx].subtype == "Food and Drink":
food_drink_sum += 1
elif Deck.card_list[idx].subtype == "Clothing":
clothing_sum += 1
elif Deck.card_list[idx].subtype == "Art":
art_sum += 1
else:
continue
small_item_sum = round(((small_item_sum / Deck.number_of_cards) * 100), 2)
food_drink_sum = round(((food_drink_sum / Deck.number_of_cards) * 100), 2)
clothing_sum = round(((clothing_sum / Deck.number_of_cards) * 100), 2)
art_sum = round(((art_sum / Deck.number_of_cards) * 100), 2)
type_list.append(small_item_sum)
type_list.append(food_drink_sum)
type_list.append(clothing_sum)
type_list.append(art_sum)
return(type_list) | 64171011304c40a000a564718bbcf9dca579a57a | 47,817 |
def chop_into_panels(all_info_in_pdf):
"""
Parse the text from the pdf into name and institution, what panel they're on, what sub-panel, their role, etc
:param all_info_in_pdf: all the text in the pdf separated into lines
:return: parsed_ref_panel: a dict containing all the data on each REF panelist
"""
# initialise a dict and some variables used to store panelist info
parsed_ref_panel = {}
main_panel = None
role = None
sub_panel = None
name = None
# So I can text for this string OR this string OR ... I set up these sets for use in the later if statements
role_strings = {'Chair', 'Deputy Chair', 'Members', 'Observers', 'Secretariat', 'Additional assessment phase members'}
title_strings = {'Pr', 'Dr', 'Mr', 'Mi', 'Ms'}
for x in all_info_in_pdf:
#print(x)
if 'Main Panel' in x:
main_panel = x
sub_panel = 'Main panel'
if 'Sub-panel' in x:
sub_panel = x
if x in role_strings:
role = x
if x[:2] in title_strings:
name = x
if '*' in name:
name = name.replace('*', '')
parsed_ref_panel[name] = {'main panel': main_panel, 'sub-panel': sub_panel, 'role': role, 'interdisciplinary': True}
else:
parsed_ref_panel[name] = {'main panel': main_panel, 'sub-panel': sub_panel, 'role': role, 'interdisciplinary': ''}
return parsed_ref_panel | c10c1dbe38dc2be2e554caf90dffe21966e44178 | 47,818 |
import pickle
def get_sample(data_path):
"""Get the sample to show.
Parameters:
data_path: The data in the system.
Returns:
samples: The samples to show.
mods: The samples' category.
"""
orig_data = pickle.load(open(data_path, 'rb'), encoding='iso-8859-1')
mode_snr = list(orig_data.keys())
mods, snrs = [sorted(list(set(x[i] for x in mode_snr))) for i in [0, 1]]
mods.remove('AM-DSB')
mods.remove('QAM16')
mods.remove('8PSK')
mods.remove('WBFM')
snr = 0
samples = [orig_data[(mods[i], snr)][786] for i in range(len(mods))]
return samples, mods | 6ba726f6ba44b30ba182529f5ae175ab96cb691a | 47,819 |
from datetime import datetime
def get_lead_item(tabs):
""" Return the index of an item for placement in the lead slot on the index.
"""
candidates = tabs['top']
count = len(candidates)
daynum = datetime.today().weekday()
return daynum % count | 84e7ef75b105d444ccd22df557e4a37773b81350 | 47,820 |
def getDepletableNuclides(activeNuclides, obj):
"""Get nuclides in this object that are in the burn chain."""
return sorted(set(activeNuclides) & set(obj.getNuclides())) | db7cc7b55b70c6169cfaf9f235341a6133a953f6 | 47,821 |
import torch
def masked_zero(tensor, mask):
""" Tensor masking operation """
while mask.dim() < tensor.dim():
mask = mask.unsqueeze(-1)
if isinstance(tensor, torch.FloatTensor):
mask = mask.float()
elif isinstance(tensor, torch.ByteTensor):
mask = mask.byte()
elif isinstance(tensor, torch.LongTensor):
mask = mask.long()
return tensor * mask | ad0e046034e77178a2810dd4328c6d1404d561b2 | 47,823 |
def axis_slice(value, axis):
"""Creates a slice for pandas indexing along given axis."""
return {0: (value, slice(None)), 1: (slice(None), value)}.get(axis) | c7a566c95d0304fcca9d41a8431a8cd9e81a016d | 47,826 |
def node2str(net, node):
"""Converts a node into a string
The result will be written as the junction of the roads that the node is
connected to.
:param net: The network
:param node: The node id
"""
edge_names = set(
attrs['name'] for _, attrs in net[node].iteritems()
)
if len(edge_names) == 1:
prefix = 'end point of '
else:
prefix = 'junction of '
return prefix + ' and '.join(edge_names) | 0df208e56ef6602daadb9fb5204c9dbc077a245a | 47,828 |
def split_sequential(data, nsplits, td_per_split=None):
"""
Sequential data splitting
>>> import numpy as np
>>> from util import split_sequential
>>> data = np.arange(100).reshape(10, 10)
>>> d1 = split_sequential(data, 2)
>>> d2 = split_sequential(data, 3, 3)
>>> for i in d1:
... print(i)
[[ 0 1 2 3 4 5 6 7 8 9]
[10 11 12 13 14 15 16 17 18 19]
[20 21 22 23 24 25 26 27 28 29]
[30 31 32 33 34 35 36 37 38 39]
[40 41 42 43 44 45 46 47 48 49]]
[[50 51 52 53 54 55 56 57 58 59]
[60 61 62 63 64 65 66 67 68 69]
[70 71 72 73 74 75 76 77 78 79]
[80 81 82 83 84 85 86 87 88 89]
[90 91 92 93 94 95 96 97 98 99]]
>>> for i in d2:
... print(i)
[[ 0 1 2 3 4 5 6 7 8 9]
[10 11 12 13 14 15 16 17 18 19]
[20 21 22 23 24 25 26 27 28 29]]
[[30 31 32 33 34 35 36 37 38 39]
[40 41 42 43 44 45 46 47 48 49]
[50 51 52 53 54 55 56 57 58 59]]
[[60 61 62 63 64 65 66 67 68 69]
[70 71 72 73 74 75 76 77 78 79]
[80 81 82 83 84 85 86 87 88 89]]
"""
if len(data.shape) != 2:
raise ValueError("Needs 2D dataset. Must reshape before use")
if td_per_split is None:
res = data.shape[0] % nsplits
if res == 0:
l = data.shape[0] // nsplits
else:
data = data[0:-res]
l = data.shape[0] // nsplits
else:
l = td_per_split
split_data = [data[i * l : (i + 1) * l] for i in range(nsplits)]
return split_data | a7752f0644c12812bd16678763cad9b603c3fdba | 47,831 |
def rename(i,j,iteration,k,finalDict):
"""rename
Args:
i (_type_): _description_
j (_type_): _description_
iteration (_type_): _description_
k (_type_): _description_
finalDict (_type_): _description_
Returns:
_type_: _description_
"""
# check if it is the first round
if iteration==1:
letrai =chr(ord('A')+iteration-1)
# check if its the first position
if i==1:
letrai = letrai.lower()
else:
letrai = list(finalDict.keys())[k]
if j==1:
letraj = chr(ord('A')+iteration).lower()
else:
letraj = chr(ord('A')+iteration)
return letrai,letraj | 3c6d313725d1535413df47b67daa6d4926dd5a58 | 47,832 |
import json
def get_category_map(json_path):
"""
카카오에서 제공된 json 파일을 파싱하여 code2name dictionary를 return
:param json_path:
:return: dictionary
example
>>> cate_map = get_category_map(json_path="../data/raw/cate1.json")
"""
json_data = open(json_path, encoding='utf-8').read()
data = json.loads(json_data)
s_cate_map = {value: key for key, value in data['s'].items()}
b_cate_map = {value: key for key, value in data['b'].items()}
m_cate_map = {value: key for key, value in data['m'].items()}
d_cate_map = {value: key for key, value in data['d'].items()}
return {
"scateid": s_cate_map,
"bcateid": b_cate_map,
"mcateid": m_cate_map,
"dcateid": d_cate_map
} | 508207db68b2afc6046209d4983c64629688dd26 | 47,833 |
def _flat(obj):
"""
:param obj:
:return: Return a list
"""
if hasattr(obj, "js_dependencies"):
return list(obj.js_dependencies)
if isinstance(obj, (list, tuple, set)):
return obj
return (obj,) | 4f9ba7a5515c481421aa3e214aeeea0dda8b6864 | 47,834 |
import torch
def categorical_mean_average_precision(preds, targets, pred_name='output',
target_name='map'):
"""
Computes the mean average precision from predictions given as logits, for a
categorical segmentation task.
"""
preds = preds[pred_name]
gt = targets[target_name]
# order classes by predicted probability
top_predictions = torch.argsort(preds, -1, descending=True)
# find the correct class in there
top_correct = (top_predictions == gt[..., None])
# determine the rank of the correct class (requires conversion from bool)
rank = top_correct.byte().argmax(-1)
# the average of the inverse ranks is the mean average precision
# (but averaging over the batch items happens outside of this)
return (1. / (1 + rank)) | 0882394ab1d4a4e0b53565df84aa1aefecfbd88f | 47,835 |
def make_orbit_codes(text_file):
"""Reads text file with each line in form "ASK)DGE" and returns list of codes."""
fin = open(text_file)
orbit_codes = []
for line in fin:
orbit_codes.append(line.strip())
return orbit_codes | fbbe9df54f7d51789cbe4ea59abcb05b45fcd0eb | 47,839 |
def np_split(array, nrows, ncols):
"""Split a matrix into sub-matrices."""
r, h = array.shape
return (array.reshape(h//nrows, nrows, -1, ncols)
.swapaxes(1, 2)
.reshape(-1, nrows, ncols)) | f5e03d34aeaea418c717fe5d322de9ef1cec77ca | 47,840 |
def get_overlap(a, b, c, d):
"""
Given 2 segments (a, b) and (c, d), get the overlap length of the 2 segments;
"""
s = max(a, c)
e = min(b, d)
return max(0, e - s), s, e | 97d78d72c36949af401e5eac555819085c83f9a2 | 47,843 |
def resolve_name(name):
"""
Resolve a dotted name to some object (usually class, module, or function).
Supported naming formats include:
1. path.to.module:method
2. path.to.module.ClassName
>>> resolve_name('coilmq.store.memory.MemoryQueue')
<class 'coilmq.store.memory.MemoryQueue'>
>>> t = resolve_name('coilmq.store.dbm.make_dbm')
>>> import inspect
>>> inspect.isfunction(t)
True
>>> t.__name__
'make_dbm'
@param name: The dotted name (e.g. path.to.MyClass)
@type name: C{str}
@return: The resolved object (class, callable, etc.) or None if not found.
"""
if ':' in name:
# Normalize foo.bar.baz:main to foo.bar.baz.main
# (since our logic below will handle that)
name = '%s.%s' % tuple(name.split(':'))
name = name.split('.')
used = name.pop(0)
found = __import__(used)
for n in name:
used = used + '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found | bc072c9763815f87882c68fe1464b88aa3b36d6a | 47,845 |
import string
def count_punctuation(text: str) -> float:
"""Count punctuation.
This is one of the features.
"""
if text:
count = sum([1 for char in text if char in string.punctuation])
return round(count / (len(text) - text.count(" ")), 3) * 100
return 0 | 6990e5a445866107978dd4add748bc84d35957ca | 47,846 |
from typing import Any
def _maybe_float_to_int(value: Any) -> Any:
"""Convert floats that are integers"""
if isinstance(value, float) and value.is_integer():
return int(value)
return value | 01c956a37cb3b2c44afcdeb97112ac05be573007 | 47,847 |
def _orelse(exp1, exp2):
"""Generates an SQLite expression that evaluates to exp1 if exp1 is
non-null and non-empty or exp2 otherwise.
"""
return ('(CASE {0} WHEN NULL THEN {1} '
'WHEN "" THEN {1} '
'ELSE {0} END)').format(exp1, exp2) | 098d29a63eeec23cf52f6280700e45bdeb8536f3 | 47,848 |
import functools
import time
def process_time(logger):
"""
Decorator for measuring the ellapsed time for a process.
The result is logged.
"""
def decorator_wrapper(func):
@functools.wraps(func)
def wrapper_process_time(*args, **kwargs):
logger.info("Process {} STARTED.".format(func.__name__))
start_time = time.perf_counter()
value = func(*args, **kwargs)
end_time = time.perf_counter()
logger.info("Process {0} FINISHED. Ellapsed time: {1:.4f}".format(func.__name__, end_time - start_time))
return value
return wrapper_process_time
return decorator_wrapper | b48badb735c040427750560ebe58b96f772b1339 | 47,850 |
def specific_heat_ratio(Cp: float, Cv: float) -> float:
"""[summary]
Args:
Cp (float): Spectfic Heat Constant Pressure [J/kg*K].
Cv (float): Spectfic Heat Constant Volume [J/kg/K].
Returns:
float: Specific heat ratio [no units].
"""
return Cp / Cv | 89fd11f36a9ba7e8416ef6433ecab094754a9bf8 | 47,851 |
import requests
def mw_search(server, query, num):
"""
Searches the specified MediaWiki server for the given query, and returns
the specified number of results.
"""
search_url = ('http://%s/w/api.php?format=json&action=query'
'&list=search&srlimit=%d&srprop=timestamp&srwhat=text'
'&srsearch=') % (server, num)
search_url += query
query = requests.get(search_url).json()
if 'query' in query:
query = query['query']['search']
return [r['title'] for r in query]
else:
return None | 7559f3e2576f05f41c732981cccb65c1920a31f1 | 47,853 |
def normalise_number(a):
""" Split the power part of a number and its float value normalised to 1
Example
-------
>>> normalise_number(1.433364345e9)
(0.1433364345, 10)
>>> normalise_number(14e-6)
(0.13999999999999999, -4)
>>> normalise_number(-14e-6)
(-0.13999999999999999, -4)"""
i = 0 # power value
v = a # normalised value
if v == 0.:
v = 0.
i = 0.
elif abs(v) < 1:
while abs(v) < 0.1:
v *= 10.
i -= 1
elif abs(v) > 1:
while abs(v) > 1:
v /= 10.
i += 1
return v, i | b34373f56ebb110f6a86572cbfc968a8b3094d7d | 47,854 |
def remove_xml(rid):
"""
Removes the .xml from a resource or spec id.
"""
if '.xml' in rid[-4:]:
return rid[:-4]
else:
return rid | e57c7ccfdfb130092ef0f2fd8412d4771fd716aa | 47,855 |
from typing import Type
def isgeneric(class_type: Type) -> bool:
"""Checks if a class type is a generic type (List[str] or Union[str, int]"""
return hasattr(class_type, "__origin__") | 49f9efbd474852a7a6fd91ba7a1586ede494203d | 47,857 |
def _read_vector(instream, structure, name):
"""Reads a vector from the instream and returns it as a tuple."""
try:
v = instream.read(structure.size)
vector = structure.unpack(v)
return vector
except Exception as e:
raise RuntimeError("Failed to read vector " +
"{} // {}".format(name, str(e))) | 46ce645feef6cf0d8b3425a216bd25c374fab9af | 47,858 |
import sys
def read_data():
"""
コマンドライン引数の一番目で指定されたファイルから読み取り、一行ずつリストにして返す。
コマンドライン引数が指定されなかった場合は、usageを表示してプログラムを終了する。
"""
if len(sys.argv) < 2:
print("usage: python3 assembler.py input-file [output-file]", file=sys.stderr)
exit(1)
path_in = sys.argv[1]
fin = open(path_in)
s = [tmp.strip() for tmp in fin.readlines()]
fin.close()
return s | a8450a8a4a312cad782d0c2dc752f5ea791a5be4 | 47,859 |
def get_project_title_html(project):
"""Return HTML version of the full project title including parents"""
ret = ''
if project.get_parents():
ret += ' / '.join(project.full_title.split(' / ')[:-1]) + ' / '
ret += project.title
return ret | 23be200d2b3d4047a85b3040bec6063f1c043c01 | 47,863 |
def get_kernel(kernel_name, **kernel_params):
"""
Examples
--------
>>> get_kernel('rbf', gamma=2.)
1.0**2 * RBF(gamma=2.0)
"""
for k, v in globals().items():
if k.lower() == kernel_name.lower():
return v(**kernel_params)
raise ValueError("invalid kernel name '{0}'".format(kernel_name)) | 3610b8ed3c09ce8f7a0140e4532b305e801019e3 | 47,864 |
import hashlib
def hash_path(path: str) -> str:
"""Creates a UTF-8 SHA1 hash of an input string.
Arguments:
path: The string value to hash.
Return:
UTF-8 SHA1 hash of `path`.
"""
hash = hashlib.sha1(path.encode('utf-8')).hexdigest()
return hash | 66d00b5705be013f56c8c9501f527b31766f7b82 | 47,865 |
def check_input(user_input):
"""
Check if input is meet the input requirements: 4 alphabet spacing with blank
:param user_input: str, can be anything
:return: bool, whether input passed the check
"""
if len(user_input) != 7:
return False
for i in (0, 2, 4, 6):
if not user_input[i].isalpha():
return False
for i in (1, 3, 5):
if user_input[i] != ' ':
return False
else:
return True | bffa4a2e1a7c188cee3b1eab579a74e856277be5 | 47,867 |
import os
def confirm_dir(base, *path):
"""
??? why not use exists ???
create folder for one category if not exist
"""
ph = os.path.join(base, *path)
try:
os.makedirs(ph)
except FileExistsError:
pass
return ph | 2984585aaed051cab7dbac1f1abe39be950a6f6a | 47,868 |
def is_less_than_thresh(t_ref,t_comp):
"""Check that t_comp is effectively less than the t_ref with the congestion level given by max_threshold"""
max_threshold = 100 # c'est des %
return (t_comp < (t_ref + max_threshold * t_ref/100)) | ed9985e1f1c82f8324f69cca45a67b24d0a15559 | 47,869 |
import torch
def _make_pytorch_shapes_consistent(output, labels):
"""Try to make inputs have the same shape by adding dimensions of size 1."""
shape1 = output.shape
shape2 = labels.shape
len1 = len(shape1)
len2 = len(shape2)
if len1 == len2:
return (output, labels)
shape1 = tuple(shape1)
shape2 = tuple(shape2)
if len1 > len2 and all(i == 1 for i in shape1[len2:]):
for i in range(len1 - len2):
labels = torch.unsqueeze(labels, -1)
return (output, labels)
if len2 > len1 and all(i == 1 for i in shape2[len1:]):
for i in range(len2 - len1):
output = torch.unsqueeze(output, -1)
return (output, labels)
raise ValueError("Incompatible shapes for outputs and labels: %s versus %s" %
(str(shape1), str(shape2))) | 2557302be3d47417e6c172c28a8beff0cfe36502 | 47,870 |
import os
def broken_symlink(file_path):
""" Returns True if the file is a broken symlink """
return os.path.lexists(file_path) and not os.path.exists(file_path) | 269da94d8a6c4f1732543eabd8692b58d5cc2f02 | 47,871 |
def createYangHexStr(textString):
""" Convert plain hex string into yang:hex-string """
data = textString[0:2]
i = 2
while i < len(textString):
data = data + ':' + textString[i:i+2]
i = i + 2
return data | 9e081e50efca5331f1bbad924c61549c6835e8e3 | 47,872 |
def cached(oldMethod):
"""Decorator for making a method with no arguments cache its result"""
storageName = f'_cached_{oldMethod.__name__}'
def newMethod(self):
if not hasattr(self, storageName):
setattr(self, storageName, oldMethod(self))
return getattr(self, storageName)
return newMethod | 72b2fe49abc717de279e4f13869c9e1de9b91f01 | 47,873 |
def _label_to_path(label):
"""
Given a Label, turn it into a path by keeping the directory part of the
name, and attaching that to the package path.
"""
base = label.package
name = label.name
last_slash = name.rfind("/")
if last_slash >= 0:
base = "{}/{}".format(base, name[0:last_slash])
return base | 3e822402d5c91aba7a4d7b9d21ce302f19fc9939 | 47,874 |
def train_list_filename(split_id):
"""
"""
return f'train_{split_id}.json' | d1a19294fd12c9286f81e9e2463e758aac8875c8 | 47,875 |
def human_size(size: int) -> str:
"""Converts size in bytes to a human-friendly format."""
if size > 1_000_000_000:
return f"{size / 1_000_000_000:.1f} GB"
if size > 1_000_000:
return f"{size // 1_000_000} MB"
if size > 1_000:
return f"{size // 1_000} KB"
return f"{size} B" | c2eda3c554d2875d62b4a9699fc38cf89898defc | 47,876 |
def line_side(start_vector, end_vector, position_vector):
"""
Find out what side a position_vector is on given a line defined by start_vector and end_vector.
Args:
start_vector (list): eg. [0,0,0] vector\
end_vector (list): eg. [0,0,0] vector
position_vector (list): eg. [0,0,0] vector
Returns:
float: If positive it's on one side of the line, if negative its on the other side.
"""
return ((end_vector.x - start_vector.x)*(position_vector.y - start_vector.y) - (end_vector.y - start_vector.y)*(position_vector.x - start_vector.x)) > 0 | 26ebb60f6f8779c8be7ef2068bfb5e4c255657c0 | 47,877 |
import configparser
def load_config(path, defaults=None):
"""Load a config file."""
if not defaults:
defaults = {}
cfg = configparser.ConfigParser()
cfg['DEFAULT'] = defaults
cfg.read(path)
return cfg | 9c853f9344848652fbe627b1b4fcaa9d91672c61 | 47,878 |
def major_axis_equiv_diam_ratio(prop, **kwargs):
"""Return the ratio of the major axis length to the equivalent diameter
Args:
prop (skimage.measure.regionprops):
The property information for a cell returned by regionprops
**kwargs:
Arbitrary keyword arguments
Returns:
float:
major axis length / equivalent diameter
"""
return prop.major_axis_length / prop.equivalent_diameter | fd2e1668577cac77788415f8ecfefd9d9b8df926 | 47,880 |
import typing
def convert_defaults_to_param_grid(defaults: typing.List[typing.Dict[str, typing.Union[str, int, bool, float]]]) \
-> typing.List[typing.Dict[str, typing.List]]:
"""
Converts the default format into the parameter grid format required to run
the sklearn.model_selection.GridSearchCV on it.
Parameters
----------
defaults: list[dict[str, mixed]]
A list containing various dictionaries, each dictionary representing
a single configuration
"""
param_grid = list()
for default in defaults:
param_grid.append({k: [v] for k, v in default.items()})
return param_grid | 6eea8420b91f44ec0a09f6cf812027c4c2c84a64 | 47,881 |
import codecs
def _detect_encoding(data):
"""Detect which UTF codec was used to encode the given bytes.
The latest JSON standard (:rfc:`8259`) suggests that only UTF-8 is
accepted. Older documents allowed 8, 16, or 32. 16 and 32 can be big
or little endian. Some editors or libraries may prepend a BOM.
:param data: Bytes in unknown UTF encoding.
:return: UTF encoding name
"""
head = data[:4]
if head[:3] == codecs.BOM_UTF8:
return 'utf-8-sig'
if b'\x00' not in head:
return 'utf-8'
if head in (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE):
return 'utf-32'
if head[:2] in (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE):
return 'utf-16'
if len(head) == 4:
if head[:3] == b'\x00\x00\x00':
return 'utf-32-be'
if head[::2] == b'\x00\x00':
return 'utf-16-be'
if head[1:] == b'\x00\x00\x00':
return 'utf-32-le'
if head[1::2] == b'\x00\x00':
return 'utf-16-le'
if len(head) == 2:
return 'utf-16-be' if head.startswith(b'\x00') else 'utf-16-le'
return 'utf-8' | 36159863436a8e2342ff329cc8540693abff60a5 | 47,882 |
def _get_faculty_members():
"""Provides a `faculty` property instantiation data"""
return [
{
"type": "member",
"value": {"name": "Test Faculty", "description": "<p>description</p>"},
},
{
"type": "member",
"value": {"name": "Test Faculty", "description": "<p>description</p>"},
},
] | d9ca8b9e02d10767f11b55b22ffabb99e4085d6e | 47,883 |
def indent(text, n=4):
"""
Indent each line of text with spaces
:param text: text
:param n: amount of spaces to ident
>>> indent("")
''
>>> indent("the quick brown fox\\njumped over an lazy dog\\nend")
' the quick brown fox\\n jumped over an lazy dog\\n end'
"""
if not text:
return ""
i = " " * n
return i + text.replace("\n", "\n" + i) | 4720d29dfa8095342358317396d4e7d4208f2bcc | 47,886 |
def _nwd(number: int, euler: int):
"""
The function calculates nwd.
"""
if number % euler == 0:
return euler
return _nwd(euler, number % euler) | 1b33b8fe167c50e661e1d3865c8bf6126ee165fc | 47,888 |
def calcCropSensorWidth(sensorWidth, nativeAspectRatio, mediaAspectRatio):
"""
Calculate effective/utilised width of camera sensor when image/video is
recorded at non-native aspect ratio.
"""
cropRatio = (nativeAspectRatio[1] / nativeAspectRatio[0]
) / (mediaAspectRatio[1] / mediaAspectRatio[0])
return sensorWidth * cropRatio | a58dcf9bf2520f27ac74afe995b8ea045fbc9778 | 47,889 |
def reverse_table(tab):
"""
Brief: reverse input tab
Args:
tab: a liste with values
Return: the same liste with an other order
"""
if not(isinstance(tab, list)):
raise ValueError('Expected a list as input')
sizeTab = len(tab);
i = 0;
while i+1 <= sizeTab/2:
valueChange = tab[sizeTab-1-i]
tab[sizeTab-1-i] = tab[i]
tab[i] = valueChange
i = i+1
return tab | c2e7a14f59cd377f95f19eacb56bed0cec98c670 | 47,892 |
def openfile(file):
"""This function makes a maze structure for a file, and returns it"""
with open(file, mode='r') as f:
content_readlines = f.readlines()
return [[char for char in line.strip()] for line in content_readlines] | 37557dd9c37176c89a10ec4e11b426e656cc6db9 | 47,894 |
def response():
"""Hacky fixture to enable passing the client response between BDD steps."""
return {} | 438e742230da9b012613ad57b7f4bc705c439d07 | 47,896 |
import pkg_resources
def available_backends():
"""A function that reports available backends"""
supported_backends = ["tensorflow", "torch", "torchvision", "numpy"]
available_backends = []
installed_packages = [p.project_name for p in pkg_resources.working_set]
for backend in supported_backends:
if backend in installed_packages:
available_backends.append(backend)
return available_backends | 123cf2138662b36e16125115ba8928a11a19ed7e | 47,898 |
def mul(x, y):
"""Unnecessary multiplication function."""
return x * y | d3b69f2a2486061c670fcee8c7171d20043f2b9a | 47,899 |
def calculations(pyramid, i):
""" Calculate the value of the given path. """
res = 1
for x, row in enumerate(pyramid):
res *= row[i[x]]
return res | e86d59370193ed8908f6d9fd50a9f7a87f8aac42 | 47,900 |
def is_valid_file(ext, argument):
""" Checks if file format is compatible """
formats = {
'input_ligand_path': ['pdb'],
'output_ligand_path': ['pdbqt'],
'input_receptor_path': ['pdb'],
'output_receptor_path': ['pdbqt'],
'input_ligand_pdbqt_path': ['pdbqt'],
'input_receptor_pdbqt_path': ['pdbqt'],
'input_box_path': ['pdb'],
'output_pdbqt_path': ['pdbqt'],
'output_log_path': ['log']
}
return ext in formats[argument] | e362e778793b038ea690a7f248a05b0413c2b528 | 47,901 |
from dateutil import tz
from datetime import datetime
def utc_convert(utc_time, time_zone):
""" Converets to utc_tiem to the selected 'time_zone'
"""
from_zone = tz.gettz('UTC')
to_zone = tz.gettz(time_zone)
utc = datetime.strptime(utc_time, '%Y-%m-%d %H:%M:%S')
# Tell the datetime object that it's in UTC time zone since
# datetime objects are 'naive' by default
utc = utc.replace(tzinfo=from_zone)
# Convert time zone
new_tzone = utc.astimezone(to_zone)
return new_tzone | 0018d86e0d3a3dfc0884ebe4d182e7dec3c9b96a | 47,902 |
def read_query(query_file):
"""
"""
query_file = open(query_file, 'r')
name_map = open('name_map.txt', 'w+')
protein_dict = {}
i = 1
for line in query_file:
if line.startswith(">"):
i += 1
name_map.write(f"protein_{i}\t{line}")
protein_dict[f"protein_{i}"] = ""
else:
protein_dict[f"protein_{i}"] += line.strip().replace("*", "")
name_map.close()
query_file.close()
return protein_dict | 8053cd8090aa537a89eb92887337f67872cdd3f3 | 47,903 |
import uuid
def generate_uuid_string():
"""Generates uuid string
:returns: generated uuid
"""
return str(uuid.uuid4()) | 17f0492d7748cf98d7e93051e91fc663604e8e64 | 47,904 |
def findMedianSortedArrays(nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
m = len(nums1)
n = len(nums2)
x = m + n
i = 0
j = 0
kk = []
while i + j < int(x / 2) + 1 and i < m and j < n:
if nums1[i] <= nums2[j]:
kk.append(nums1[i])
i += 1
else:
kk.append(nums2[j])
j += 1
while i + j < int(x / 2) + 1:
if i < m:
kk.append(nums1[i])
i += 1
else:
kk.append(nums2[j])
j += 1
if x % 2:
res = kk[int(x / 2)]
else:
res = (kk[int(x / 2 - 1)] + kk[int(x / 2)]) / 2
idd = int(x / 2)
print("idd= ", idd)
print("all num", x, " ", len(kk), " ", kk)
return res | 0c3b1ea115495ac8dcdc700d636b99f3bfe2949c | 47,905 |
import math
def add_aggregate_info(site_and_date_info, percentage, sorted_names):
"""
Function is used to add an 'aggregate metric' that
summarizes all of the data quality issues for a
particular site on a particular date.
NOTE: This function DOES NOT take the weighted value
of all of these metrics. This is merely to attach
the aggregate statistic.
NOTE: This is for the DICTIONARY with the date as the
first set of keys.
:param
site_and_date_info (dict): dictionary with key:value
of date:additional dictionaries that contain metrics
for each HPO's data quality by type
percentage (boolean): used to determine whether or not the
number is a simple record count (e.g. duplicates)
versus the percentage of records (e.g. the success rate
for each of the tables)
sorted_names (lst): list of the names that should have an
aggregate statistic analyzed (e.g. avoiding 'avarage'
statistics)
:return:
site_and_date_info (dict): same as input parameter but
now each site and date has an added aggregate
statistic.
"""
for date in site_and_date_info.keys():
date_report = site_and_date_info[date]
date_metric, num_iterated = 0, 0
for site in sorted_names:
table_metrics = date_report[site]
date_metric, num_iterated = 0, 0
for table in table_metrics.keys():
stat = table_metrics[table]
if not math.isnan(stat):
date_metric += stat
num_iterated += 1
# NOTE: 'AGGREGATE INFO' SHOULD NOT BE USED FOR
# THE PERCENTAGE METRIC. THIS IS BECAUSE THE
# FIRST 'AGGREGATE INFO' DOES NOT WEIGHT SITES
# BY THEIR RELATIVE CONTRIBUTIONS (BY # OF ROWS).
if percentage and num_iterated > 0:
date_metric = date_metric / num_iterated
elif percentage and num_iterated == 0:
date_metric = float('NaN')
date_report['aggregate_info'] = date_metric
return site_and_date_info | 71eb0b8d33bcbf9ad04f53621d7959995112bc47 | 47,906 |
from typing import Tuple
def format_hint(title: str, description: str, original_hint: str) -> str:
"""Generate complete hint message.
Arguments:
title: The title of the draft to edit or commit.
description: The description of the draft to edit or commit.
original_hint: The original hint message.
Returns:
The complete hint message.
"""
hint: Tuple[str, ...] = (title,)
if description:
hint += ("", description)
hint += (original_hint,)
return "\n".join(hint) | e3439b14e9344de891c73d0f84ebdd6f8759f042 | 47,907 |
import copy
def get_median(my_list):
""" Gets the median in a list of numeric values.
Args:
my_list (list of ordinal): The list to find from.
Returns:
(float): The median.
Notes:
Throws a ZeroDivisionError on an empty list.
"""
if len(my_list) == 0:
raise ZeroDivisionError
copied_list = sorted(copy.deepcopy(my_list))
while len(copied_list) > 2:
copied_list = copied_list[1:len(copied_list)-1]
if len(copied_list) == 2:
return (copied_list[0] + copied_list[1]) / 2
else: # len(copied_list) is 1:
return copied_list[0] | 86529e37928d31553f2ace906b9b9870b365d547 | 47,908 |
import argparse
def get_arguments():
"""
parse all the arguments from command line inteface
return a list of parsed arguments
"""
parser = argparse.ArgumentParser(
description="evaluation for action segment refinement network."
)
parser.add_argument("config", type=str, help="path to a config file")
parser.add_argument(
"--model",
type=str,
default=None,
help="""
path to the trained model.
If you do not specify, the trained model,
'final_model.prm' in result directory will be used.
""",
)
parser.add_argument(
"--refinement_method",
type=str,
default="refinement_with_boundary",
choices=["refinement_with_boundary", "relabeling", "smoothing"],
)
parser.add_argument(
"--cpu", action="store_true", help="Add --cpu option if you use cpu."
)
return parser.parse_args() | 28b1314638960df500532ea4e1cb3a6a92e256a8 | 47,911 |
def iterable_response_to_dict(iterator):
""" Convert Globus paginated/iterable response object to a dict """
output_dict = {"DATA": []}
for item in iterator:
dat = item
try:
dat = item.data
except AttributeError:
pass
output_dict["DATA"].append(dat)
return output_dict | 9764718a922a310b2892e1896657dd2af24e7a8f | 47,912 |
import struct
import fcntl
import termios
def GetTerminalSize():
"""Retrieve terminal window size."""
ws = struct.pack('HHHH', 0, 0, 0, 0)
ws = fcntl.ioctl(0, termios.TIOCGWINSZ, ws)
lines, columns, unused_x, unused_y = struct.unpack('HHHH', ws)
return lines, columns | b1af7d1abe8dd9bc6b933c1a2efa80a9b5b332d0 | 47,914 |
def load_words(path):
"""
Load a list of words from a text file.
Args:
path - path to a file containing line separated words
Returns
List of strings
"""
with open(path, 'r') as fh:
words = [w.strip() for w in fh.readlines()]
if len(words) == 0:
raise EOFError("No text found in file")
return words | d7c94937b6257f2af28701ebdf4766251d50a7ee | 47,916 |
import time
def read_file(path):
"""Read in a text file
Args:
path: path to the text.
"""
start = time.perf_counter()
with open(path, 'r') as f:
text = f.readlines()
end = time.perf_counter()
print(f"Read file in: {end-start:.4} seconds.")
return text | e233f37c7d7b8de8c538954eff9e8865f9320a22 | 47,917 |
def run_metrics( metricObject, args ):
""" Runs metric for individual sequences
Params:
-----
metricObject: metricObject that has computer_compute_metrics_per_sequence function
args: dictionary with args for evaluation function
"""
metricObject.compute_metrics_per_sequence(**args)
return metricObject | cc39cc3ea22bf798fcef61189cddd6838325cda7 | 47,918 |
def overlay(sounds, **kwargs):
""" Overlay lst of sounds to one audio """
if sounds is None or len(sounds) == 0:
return None
sounds.sort(reverse=True,
key=lambda x: len(x) if x is not None else 0)
the_audio = sounds[0]
for sound in sounds[1:]:
if sound is None:
return None
the_audio = the_audio.overlay(sound)
return the_audio | 97543d8f47d6822e863a005b2208b126db5395e1 | 47,919 |
def condTo25(cond, temp):
"""
Converts given value of specific conductance to the value at 25 C.
Uses the relation from Sorensen and Glass (1987), as found in
Equation 4 of Hayashi (2004).
Parameters
----------
cond : float or array
measured value of specific conductance
temp : float or array
temperature of water at which specific conductance was measured.
Returns
-------
cond25: float or array
specific conductance at 25 C.
"""
a = 0.0187 #from Hayashi, 2004
cond25 = cond/(1+a*(temp - 25.0))
return cond25 | 58f95851280e83e8bd3fb332e0ad3b02b9b53c48 | 47,920 |
def get_repository_metadata_by_id( trans, id ):
"""Get repository metadata from the database"""
return trans.sa_session.query( trans.model.RepositoryMetadata ).get( trans.security.decode_id( id ) ) | 7db2685ff113ae3d08b6acc6f9d16039e889fdf4 | 47,921 |
def _normalize_email(email: str) -> str:
"""Normalize email.
Arguments:
email {str} -- Email to normalize.
Returns:
str -- Normalized email.
"""
return email.upper() | 69f7c5238fe2dd8a0be08139373af91a1609d04f | 47,923 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.