content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def get_features(*functions):
"""合并多个 list,主要用途(合并多种不同的数据集进行训练)"""
features = []
for i in functions:
features.extend(i)
return features
|
643c077985a69910f7d69d1db3c8b62a94de94c2
| 17,975
|
def num_divisors_from_fac(fac):
"""
Given a prime factorization fac (a dictionary of the form prime :
exponent), returns the number of divisors of the factorized number.
"""
acc = 1
for p in fac:
acc *= fac[p] + 1
return acc
|
776991357a5f198f156e74657b557cd889de06b3
| 17,976
|
def get_list_mode(request):
"""Get the current display mode for the dashboard."""
list_mode = request.args.get('list')
if (list_mode != 'true'):
list_mode = None
return list_mode
|
ceb58b9aba64d96a4675e2ef821deadc26110af2
| 17,977
|
import math
def cost(a,b,c,e,f,p_min,p):
"""cost: fuel cost based on "standard" parameters
(with valve-point loading effect)
"""
return a + b*p + c*p*p + abs(e*math.sin(f*(p_min-p)))
|
c0acc4e8820e5ce354cee411afc1a121c07757bc
| 17,979
|
import sys
import json
def get_deposit_account(cbpro_client):
""" Gets ID of account's ACH bank account (assumes there's only one)
Params:
- None
Return:
- account: dict
{
{
"allow_buy": bool,
"allow_deposit": bool,
"allow_sell": bool,
"allow_withdraw": bool,
"cdv_status": str,
"created_at": time,
"currency": str,
"id": str,
"limits": dict
"name": str,
"primary_buy": bool,
"primary_sell": bool,
"resource": str,
"resource_path": str,
"type": str,
"updated_at": time,
"verification_method": str,
"verified": bool
}
}
"""
bank_accounts = cbpro_client.get_payment_methods()
sys.stdout.write("Bank account type: "+type(bank_accounts).__name__)
if type(bank_accounts) is dict:
bankout = json.dumps(bank_accounts)
sys.stdout.write(bankout)
if bank_accounts.get("type") == 'ach_bank_account':
return bank_accounts
for account in bank_accounts:
if account:
if type(account) is str:
sys.stdout.write(account)
account = json.loads(account)
# This assumes that there is only one ACH bank account connected
if type(account) is dict:
accountout = json.dumps(account)
sys.stdout.write(accountout)
if account.get("type") == 'ach_bank_account':
return account
|
124ae6857833e96f67d113a3919aacc9a1c1c442
| 17,980
|
def safer_eval(some_string):
"""
Not full-proof, but taking advice from:
https://nedbatchelder.com/blog/201206/eval_really_is_dangerous.html
"""
if "__" in some_string or "import" in some_string:
raise Exception("string to eval looks suspicious")
return eval(some_string, {'__builtins__': {}})
|
6a392d48fe66b3c6397e0970a8e854b26326c188
| 17,981
|
def xgcd(x, y):
"""Extended GCD
Args:
x (integer)
y (integer)
Returns:
(gcd, x, y) where gcd is the greatest common divisor of a and b.
The numbers x, y are such that gcd = ax + by.
"""
prev_a = 1
a = 0
prev_b = 0
b = 1
while y != 0:
q = x // y
temp = x % y
x = y
y = temp
temp = a
a = prev_a - q * a
prev_a = temp
temp = b
b = prev_b - q * b
prev_b = temp
return [x, prev_a, prev_b]
|
f7d326302042880f72b9e70b0ea66890b00a093d
| 17,982
|
def __remove_emojies(post_tokenized):
"""It deletes emojies from a post.
Args:
tokens(list): the tokenized target post.
Returns:
list: the tokenized target post without any emoji.
"""
new_post_tokenized = []
for word in post_tokenized:
try:
str(word)
new_post_tokenized.append(word)
except UnicodeEncodeError:
pass
return new_post_tokenized
|
b56edbec102ef5a259a744dc9787991986cc9af8
| 17,983
|
import os
def get_template(name):
"""
Get a template name.
@param name template name
@return content
"""
this = os.path.dirname(name)
tm = os.path.join(this, name)
if not os.path.exists(tm):
raise FileNotFoundError("Unable to find template '{0}'.".format(name))
with open(tm, "r", encoding="utf-8") as f:
return f.read()
|
07b1f40fbbd6bbf488b12514308f7bc482c13c52
| 17,985
|
def chunk(mylist, chunksize):
"""
Args:
mylist: array
chunksize: int
Returns:
list of chunks of an array len chunksize (last chunk is less)
"""
N = len(mylist)
chunks = list(range(0, N, chunksize)) + [N]
return [mylist[i:j] for i, j in zip(chunks[:-1], chunks[1:])]
|
69cc8aa812c989c63dae4c444ee132c54a861fa4
| 17,986
|
def is_rejected_with_high_vaf(vaf, status, vaf_th):
"""Check if SNV is rejected with vaf over threshold.
Args:
vaf: vaf of the SNV.
vaf_th: vaf threshold.
status: reject/pass status of SNV.
Returns:
True if rejected, False otherwise
"""
if vaf > vaf_th and status == 'REJECT':
return True
else:
return False
|
9b048c9a2d562f50cbfc4af56d4c33189d95f00e
| 17,987
|
import os
def fixture_path(*path: str) -> str:
"""
Get fixture path for this test folder.
"""
basedir = os.path.realpath(os.path.dirname(__file__))
filepath = os.path.join(basedir, "fixtures", *path)
return filepath
|
e46438483c4a681ea547a46e4bbe5a3105d7e456
| 17,990
|
def _Backward3a_v_Ph(P, h):
"""Backward equation for region 3a, v=f(P,h)
>>> "%.12f" % _Backward3a_v_Ph(20,1700)
'0.001749903962'
>>> "%.12f" % _Backward3a_v_Ph(100,2100)
'0.001676229776'
"""
I=[-12, -12, -12, -12, -10, -10, -10, -8, -8, -6, -6, -6, -4, -4, -3, -2, -2, -1, -1, -1, -1, 0, 0, 1, 1, 1, 2, 2, 3, 4, 5, 8]
J=[6, 8, 12, 18, 4, 7, 10, 5, 12, 3, 4, 22, 2, 3, 7, 3, 16, 0, 1, 2, 3, 0, 1, 0, 1, 2, 0, 2, 0, 2, 2, 2]
n=[0.529944062966028e-2, -0.170099690234461, 0.111323814312927e2, -0.217898123145125e4, -0.506061827980875e-3, 0.556495239685324, -0.943672726094016e1, -0.297856807561527, 0.939353943717186e2, 0.192944939465981e-1, 0.421740664704763, -0.368914126282330e7, -0.737566847600639e-2, -0.354753242424366, -0.199768169338727e1, 0.115456297059049e1, 0.568366875815960e4, 0.808169540124668e-2, 0.172416341519307, 0.104270175292927e1, -0.297691372792847, 0.560394465163593, 0.275234661176914, -0.148347894866012, -0.651142513478515e-1, -0.292468715386302e1, 0.664876096952665e-1, 0.352335014263844e1, -0.146340792313332e-1, -0.224503486668184e1, 0.110533464706142e1, -0.408757344495612e-1]
Pr=P/100
nu=h/2100
suma=0
for i in range(32):
suma+=n[i]*(Pr+0.128)**I[i]*(nu-0.727)**J[i]
return 0.0028*suma
|
3417154afacd8d582bce1be94a306ddc3eed11d2
| 17,992
|
def apply(f, *args, **kwargs):
"""Apply a function to arguments.
Parameters
----------
f : callable
The function to call.
*args, **kwargs
**kwargs
Arguments to feed to the callable.
Returns
-------
a : any
The result of ``f(*args, **kwargs)``
Examples
--------
>>> from toolz.curried.operator import add, sub
>>> fs = add(1), sub(1)
>>> tuple(map(apply, fs, (1, 2)))
(2, -1)
Class decorator
>>> instance = apply
>>> @instance
... class obj:
... def f(self):
... return 'f'
...
>>> obj.f()
'f'
>>> issubclass(obj, object)
Traceback (most recent call last):
...
TypeError: issubclass() arg 1 must be a class
>>> isinstance(obj, type)
False
See Also
--------
unpack_apply
mapply
"""
return f(*args, **kwargs)
|
55850e03ce6078e17c92197cbece14a0ff99c0fd
| 17,993
|
def first_word_complex(text: str) -> str:
"""
returns the first word in a given text.
"""
l = []
for x in text:
if x.isalpha() or x == "'":
l.append(x)
else:
l.append(" ")
w = "".join(l)
result = w.split()
return result[0]
|
57c9558ad7bedfa31fbf1d14b4665b6d803175a7
| 17,995
|
def delete_from_list(head, tail, elem):
"""Delete an element from the list"""
if head is None or tail is None:
return -1, -1
if head.data == elem:
if head == tail:
head = tail = None
return -1, -1
curr = head
head = head.next
head.prev = None
curr = None
print(f"{elem} deleted from head")
return head, tail
elif tail.data == elem:
curr = tail
if tail == head:
tail == None
return -1, -1
tail = tail.prev
tail.next = None
curr = None
print(f"{elem} deleted from tail")
return head, tail
else:
curr = head
while curr:
if curr.data == elem:
break
else:
curr = curr.next
if not curr:
print("Element to be deleted was not found")
return head, tail
curr.prev.next = curr.next
curr.next.prev = curr.prev
print(f"{elem} deleted")
curr = None
return head, tail
|
f8e4a62be7b196cd9159015d86d8ea19c4bcc909
| 17,998
|
def icingByteFcnPRB(c):
"""Return probability value of icing pixel.
Args:
c (unicode): Unicode icing pixel.
Returns:
int: Value of probability portion of icing pixel.
"""
return ord(c) & 0x07
|
a19d9896ed1b932e44629cb0eff7268ffe8c6fee
| 17,999
|
def replace_key_value(lookup, new_value, expected_dict):
"""
Replaces the value matching the key 'lookup' in the 'expected_dict' with
the new value 'new_value'.
"""
for key, value in expected_dict.items():
if lookup == key:
if isinstance(value, dict) and isinstance(new_value, dict):
value.update(new_value)
else:
expected_dict[key] = new_value
elif isinstance(value, dict):
expected_dict[key] = replace_key_value(lookup, new_value, value)
return expected_dict
|
923cda5b8d2ba44749345d7d1adf907d4c5f3b23
| 18,002
|
from unittest.mock import Mock
def mock_group(test_state={}, n=0):
"""Mock a Tradfri group."""
default_state = {"state": False, "dimmer": 0}
state = {**default_state, **test_state}
mock_group = Mock(member_ids=[], observe=Mock(), **state)
mock_group.name = "tradfri_group_{}".format(n)
return mock_group
|
76664f2350838dbc8170aa3eeed972a7f7eda304
| 18,003
|
import numpy
def manual_bin(x, y, cuts):
"""
The function discretizes the x vector and then summarizes over the y vector
based on the discretization result.
Parameters:
x : A numeric vector to discretize without missing values,
e.g. numpy.nan or math.nan
y : A numeric vector with binary values of 0/1 and with the same length
of x
cuts : A list of numeric values as cut points to discretize x.
Returns:
A list of dictionaries for the binning outcome.
Example:
for x in manual_bin(scr, bad, [650, 700, 750]):
print(x)
# {'bin': 1, 'freq': 1311, 'miss': 0, 'bads': 520.0, 'minx': 443.0, 'maxx': 650.0}
# {'bin': 2, 'freq': 1688, 'miss': 0, 'bads': 372.0, 'minx': 651.0, 'maxx': 700.0}
# {'bin': 3, 'freq': 1507, 'miss': 0, 'bads': 157.0, 'minx': 701.0, 'maxx': 750.0}
# {'bin': 4, 'freq': 1016, 'miss': 0, 'bads': 42.0, 'minx': 751.0, 'maxx': 848.0}
"""
_x = [_ for _ in x]
_y = [_ for _ in y]
_c = sorted([_ for _ in set(cuts)] + [numpy.NINF, numpy.PINF])
_g = numpy.searchsorted(_c, _x).tolist()
_l1 = sorted(zip(_g, _x, _y), key = lambda x: x[0])
_l2 = zip(set(_g), [[l for l in _l1 if l[0] == g] for g in set(_g)])
return(sorted([dict(zip(["bin", "freq", "miss", "bads", "minx", "maxx"],
[_1, len(_2), 0,
sum([_[2] for _ in _2]),
min([_[1] for _ in _2]),
max([_[1] for _ in _2])])) for _1, _2 in _l2],
key = lambda x: x["bin"]))
|
3b0eb6dd2a352dcfb8d6b88fe7e15bef9baa6623
| 18,005
|
def bend(msg):
"""Returns the pitch bend value of a pitch bend message."""
return (msg[2] & 0x7F) << 7 | (msg[1] & 0x7F)
|
e3e19567a92fccb609b85fed42245727221e72f1
| 18,006
|
def AND_nobias(x1: int, x2: int) -> int:
"""
[summary] AND operator wituout bias term
Args:
x1 (int): 1st input for AND
x2 (int): 2nd input for AND
Returns:
int: result of AND operator
"""
(w1, w2, theta) = (0.5, 0.5, 0.7)
tmp = w1 * x1 + w2 * x2
if tmp <= theta:
return 0
else:
return 1
|
e58a125a9c21233b8de7a044e5c0acdfdb29d1fa
| 18,009
|
def get_edges(source_label, target_label,
edge_label, data=False):
"""Generate query for getting all the edges of the graph.
Parameters
----------
source_label : optional
Label of the source nodes to match
target_label : optional
Label of the target nodes to match
edge_label : iterable, optional
Label of the edges to match
"""
if data:
query = "MATCH (n:{})-[r:{}]->(m:{})\nRETURN n.id as source_id, m.id as target_id, properties(r) as attrs\n".format(
source_label,
edge_label,
target_label)
else:
query = "MATCH (n:{})-[r:{}]->(m:{})\nRETURN n.id as source_id, m.id as target_id\n".format(
source_label,
edge_label,
target_label)
return query
|
39b47267a59c1409b4b32dab36033aec73296cf7
| 18,010
|
def precheck(atoms, i, j, Hs, As, Ds, fsc0):
"""
Check if two atoms are potential H bond partners, based on element and altloc
"""
ei, ej = atoms[i].element, atoms[j].element
altloc_i = atoms[i].parent().altloc
altloc_j = atoms[j].parent().altloc
resseq_i = atoms[i].parent().parent().resseq
resseq_j = atoms[j].parent().parent().resseq
one_is_Hs = ei in Hs or ej in Hs
other_is_acceptor = ei in As or ej in As
is_candidate = one_is_Hs and other_is_acceptor and \
altloc_i == altloc_j and resseq_i != resseq_j
if(ei in Hs):
bound_to_h = fsc0[i]
if(not bound_to_h): # exclude 'lone' H
is_candidate = False
elif(atoms[bound_to_h[0]].element not in Ds): # Use only first atom bound to H
is_candidate = False
if(ej in Hs):
bound_to_h = fsc0[j]
if(not bound_to_h):
is_candidate = False
elif(atoms[bound_to_h[0]].element not in Ds):
is_candidate = False
return is_candidate
|
1d1a721c85f8f920b097f71891280b7dd0b9cc77
| 18,012
|
def to_date(val):
"""Used if *val* is an instance of date."""
# NOTE: strftime's %Y is inconsistent between OSs; pads with 0 to 4 digits
# on Windows but doesn't pad on linux
# isoformat does not seemt to have that problem???
# return val.strftime("%Y-%m-%d")
return val.isoformat()
|
d67034c336b90de01e44e7beebb78c6cf4837f4b
| 18,014
|
def filterReturnChar(string):
"""
过滤字符串中的"\r"字符
:param string:
:return: 过滤了"\r"的字符串
"""
return string.replace("\r", "")
|
fff1b39d8c837fc33e627d6ad2395eb05458517a
| 18,016
|
def count_logistic(x):
"""Return value of a simple chaotic function."""
val = 3.9 * x * (1-x)
return val
|
476c4efb391652e25443685f2660b82721c04e94
| 18,018
|
def check_learner_is_fitted(learner):
"""Return True if fitted and False otherwise"""
# Following scikit-learn's convention,
# fitted models have additional attributes ending with underscores.
# See: https://scikit-learn.org/dev/glossary.html#term-fitted
# Hence checking whether these exist is sufficient:
after_init_attr = [attr for attr in learner.__dict__.keys() if attr.endswith("_")]
is_fitted = len(after_init_attr) > 0
return is_fitted
|
35e17e3d3a85fa9a65d77ed3be990fc060f87156
| 18,019
|
import torch
def load_latent_avg(checkpoint_path):
"""Load the latent average used by encoder."""
checkpoint = torch.load(checkpoint_path)
return checkpoint["latent_avg"]
|
51ed354bd67ae78a6000dd5b60cab2e6a0cec413
| 18,020
|
import statistics
def Standard_Deviation(All_Results, pos):
"""Calculate the standard deviation to evaluate number of trial runs required."""
converted = [row[pos] for row in All_Results]
if isinstance(converted[0], list) == True:
converted = [row[1] for row in converted]
stdev = float(statistics.pstdev(converted))
return stdev
|
ddbbfa06c744014ced30ba7aadf9fe82bee984cb
| 18,021
|
def parse_float(txt):
""" Returns None or parsed float value. """
# Floats must have decimal point
if txt.find('.') == -1:
return None
# Parse float using python's built-in converter
try:
return float(txt)
except ValueError:
return None
|
2767faa591227ea8664a458c3bc55da51a0cb2f5
| 18,022
|
def makov_payne_correction(defect_cell, geometry, e_r, mk_1_1):
""" Makov-Payne correction
:param defect_cell: Cell object of the defect cell calculation
:param geometry: geometry of the host cell
:param e_r: relative permittivity
:param mk_1_1: Value of the first term of the Makov-Payne correction in the case q = 1 & e_r = 1 """
c_sh_dico = {'sc': -0.369, 'fcc': -0.343, 'bcc': -0.342, 'hcp': -0.478, 'other': -1./3}
c_sh = c_sh_dico[geometry]
return (1. + c_sh * (1. - 1./e_r)) * defect_cell.charge ** 2 * mk_1_1 / e_r
|
670755010e0bc2af35eea43563aae807db1d05d5
| 18,023
|
def method(a):
"""
:param a: 传参用于XXXX
:return: 返回一个XXX
"""
a+=1
return a
|
22441f99679a765a11a64ab48897256502fed347
| 18,027
|
from pathlib import Path
from typing import List
import os
def recursive_dir_files(target_dir: Path) -> List[Path]:
"""return paths of files under target_dir, with prefix removed"""
prefix_len = len(target_dir.parts)
return [Path(*(Path(path) / f).parts[prefix_len:])
for path, _, files in os.walk(target_dir)
for f in files]
|
264a3895d00334fad4b3b820b49564755e8bda67
| 18,028
|
import time
def retry(assertion_callable, retry_time=10, wait_between_tries=0.1, exception_to_retry=AssertionError):
"""Retry assertion callable in a loop"""
start = time.time()
while True:
try:
return assertion_callable()
except exception_to_retry as e:
if time.time() - start >= retry_time:
raise e
time.sleep(wait_between_tries)
|
b8fdb7115d94da96823d6728c87ffce84d6f925c
| 18,029
|
def swap(m, r1, r2):
""" Swap rows in forward elimination"""
temp = m[r1]
m[r1] = m[r2]
m[r2] = temp
return m
|
4fcbde1aff552802a321f5c060a48d3961ba3a23
| 18,030
|
import os
def mkdir(dir_path):
""" Creates a directory.
Args:
dir_path: The path to create.
"""
return os.system("mkdir -p " + dir_path)
|
91105f6cedbe36185bd53771135570be6eff37cf
| 18,032
|
def deslugify(slug):
"""Turn a slug into human speak"""
return slug.replace('-', ' ').title()
|
d2a58a68d1759007068ca484146b2ca70fc88ce9
| 18,033
|
def to_bold_on_html(html_table:str) -> str:
"""100.0 in bold and background"""
html_tag = "<td>100</td>"
html_tag_replace = (
"""
<td>
<div style='text-align:center; background: #c0c0c0'>
<strong>100.0</strong>
</div>
</td>
"""
)
html_table = html_table.replace(html_tag, html_tag_replace)
return html_table
|
462e820103f1cfcc3f7519f2978ad0e9435cd098
| 18,034
|
def compressive_failure(panel):
""""Return critical failure load for compressive failure"""
compressive = panel.area*panel.skin.material_properties['Suts']
return compressive
|
55c199f47e77033a2c67f8514675c6845a9b90bc
| 18,035
|
import requests
import json
def get_access_token(code, env):
""" Exchange the authorization code for an access token.
"""
token_url = env['tenant_url'] + '/oauth/token'
headers = {'Content-Type': 'application/json'}
body = {
'grant_type': env['grant_type'],
'client_id': env['client_id'],
'code_verifier': env['verifier'],
'code': code,
'audience': env['audience'],
'redirect_uri': env['callback_url']
}
return requests.post(token_url, headers=headers,
data=json.dumps(body)).json()
|
9db7798ab63eff78d67d588a0e51e1289e26eb53
| 18,036
|
def construct_consensus_gp_as(has_rna, has_pb):
"""Dynamically generate an autosql file for consensus"""
consensus_gp_as = '''table bigCat
"bigCat gene models"
(
string chrom; "Reference sequence chromosome or scaffold"
uint chromStart; "Start position in chromosome"
uint chromEnd; "End position in chromosome"
string name; "Name"
uint score; "Score (0-1000)"
char[1] strand; "+ or - for strand"
uint thickStart; "Start of where display should be thick (start codon)"
uint thickEnd; "End of where display should be thick (stop codon)"
uint reserved; "RGB value (use R,G,B string in input file)"
int blockCount; "Number of blocks"
int[blockCount] blockSizes; "Comma separated list of block sizes"
int[blockCount] chromStarts; "Start positions relative to chromStart"
string name2; "Gene name"
string cdsStartStat; "Status of CDS start annotation"
string cdsEndStat; "Status of CDS end annotation"
int[blockCount] exonFrames; "Exon frame {0,1,2}, or -1 if no frame for exon"
string txId; "Transcript ID"
string type; "Transcript type"
string geneName; "Gene ID"
string geneType; "Gene type"
string sourceGene; "Source gene ID"
string sourceTranscript; "Source transcript ID"
string alignmentId; "Alignment ID"
lstring alternativeSourceTranscripts; "Alternative source transcripts"
lstring Paralogy; "Paralogous alignment IDs"
lstring UnfilteredParalogy; "Unfiltered paralogous alignment IDs"
lstring collapsedGeneIds; "Collapsed Gene IDs"
lstring collapsedGeneNames; "Collapsed Gene Names"
string frameshift; "Frameshifted relative to source?"
lstring exonAnnotationSupport; "Exon support in reference annotation"
lstring intronAnnotationSupport; "Intron support in reference annotation"
string transcriptClass; "Transcript class"
string transcriptModes; "Transcript mode(s)"
string validStart; "Valid start codon"
string validStop; "Valid stop codon"
string properOrf; "Proper multiple of 3 ORF"
'''
if has_rna:
consensus_gp_as += ' lstring intronRnaSupport; "RNA intron support"\n'
consensus_gp_as += ' lstring exonRnaSupport; "RNA exon support"\n'
if has_pb:
consensus_gp_as += ' string pbIsoformSupported; "Is this transcript supported by IsoSeq?"'
consensus_gp_as += '\n)\n'
return consensus_gp_as
|
04ad89609d68873792222d191c4259b4a35d1f33
| 18,038
|
from warnings import filters
def remove_punct(sentences):
"""
remove punctuation from a list of sentences
Args:
sentences (list): list of sentences
Returns:
sentences (list): list of senteces without punctuation
"""
for sentence in sentences:
sentence = sentence.translate(None, filters)
return sentences
|
e53d4c6bec96048e177f81ee0fe48afc17180860
| 18,039
|
def strftimeTranslate(txt: str) -> str:
"""
Translate strftime output to Turkish.
:param txt: strftime output.
:type txt: str
:rtype: str
"""
b = {"Jan": "Ocak", "Feb": "Şubat", "Mar": "Mart", "Apr": "Nisa", "May": "Mayıs", "Jun": "Haziran", "Jul": "Temmuz", "Aug": "Ağustos", "Sep": "Eylül", "Oct": "Ekim", "Nov": "Kasım", "Dec": "Aralık"}
for i in b:
if i in txt:
txt = txt.replace(i, b[i])
return txt
|
666f56d74fd8da39170e8fd9fd32e138f962aa4e
| 18,041
|
import os
def get_user_login() -> str:
"""
Check user uid and return user name
:return str: user name
"""
return "root" if os.getuid() == 0 else os.getlogin()
|
4da9b985d5b52759259da62c27f7fe76a984a635
| 18,042
|
from typing import List
import requests
def search(query: str, api_key: str) -> List:
"""Return search results."""
# ! to edit head to: https://cse.google.com/cse/all
params = {
"key": api_key,
#"cx": "'000281471148392423350:ymsqkb0dqs8"
"cx": "000281471148392423350:64fnnp-ny2w", # original + darklyrics
#"cx": "002017775112634544492:7y5bpl2sn78", # original without darklyrics
"q": query
}
resp = requests.get("https://www.googleapis.com/customsearch/v1",
params=params)
data = resp.json()
items = data.get("items", [])
return items
|
b29961bbda063072d6dc8dedaef9cffe85ab6a2c
| 18,044
|
def unpack_question_dimid(dimension_id):
"""Decompose the dimension id into unit, lesson and question id.
Returns:
A tuple unit_id, lesson_id, question_id.
unit_id and question_id are strings. lesson_id can be a string or
None.
"""
unit_id, lesson_id, question_id = dimension_id.split(':')
if lesson_id == 'None':
lesson_id = None
return unit_id, lesson_id, question_id
|
4133ec1ce5cd986b64c9af096931b9f2bf8cb123
| 18,046
|
import time
def timestamp2time( timestamp ):
""" convert a timestamp in yyyy.mm.dd.hh.mm.ss format to seconds for comparisons """
return time.mktime(time.strptime(timestamp,"%Y.%m.%d.%H.%M.%S"))
|
7a2e9a6b51de0c9a4ebd69e1e0050bab4af064f9
| 18,048
|
import os
import sys
def load_whitelist(whitelist_file):
"""Load provided whitelist file."""
if not os.path.isfile(whitelist_file):
print('Error: Whitelist file not found.')
sys.exit(1)
whitelist = ['127.0.0.1']
with open(whitelist_file) as f:
for line in f.read().splitlines():
if not line.startswith('#'):
whitelist.append(line)
return whitelist
|
8781255f8bc357e5f3149723e12f2d7cbb50ab1e
| 18,049
|
def dot_attention_score(key, query):
"""[B, Tk, D], [(Bs), B, Tq, D] -> [(Bs), B, Tq, Tk]"""
return query.matmul(key.transpose(1, 2))
|
ec1f855d4d24d30bd3ab38943b25194887c7841c
| 18,050
|
import six
def sorted_files(pdict):
"""
Returns the file_summary information sorted in reverse order based on duration
:param pdict: profile dict
:return: sorted list of file_summary dicts
"""
return sorted([v['file_summary'] for k, v in six.iteritems(pdict) if k != 'summary'],
key=lambda f: f['duration'], reverse=True)
|
3135561bd68cdc0018c1b904f8a6b47dc4a96468
| 18,052
|
def get_axis_labels(df_list, key, kdims, vdims):
"""Generates the axis labels"""
try:
if df_list[key]["units"][df_list[key]["measurements"].index(kdims[1])]:
ylabel = "{} ({})".format(
kdims[1],
df_list[key]["units"][df_list[key]["measurements"].index(kdims[1])],
)
else:
ylabel = "{}".format(kdims[1])
except:
try:
if df_list[key]["units"][df_list[key]["measurements"].index(vdims)]:
ylabel = "{} ({})".format(
vdims,
df_list[key]["units"][df_list[key]["measurements"].index(vdims)],
)
else:
ylabel = "{}".format(vdims)
except:
ylabel = "{}".format(kdims[1])
try:
if df_list[key]["units"][df_list[key]["measurements"].index(kdims[0])]:
xlabel = "{} ({})".format(
kdims[0],
df_list[key]["units"][df_list[key]["measurements"].index(kdims[0])],
)
else:
xlabel = "{}".format(kdims[0])
except:
xlabel = "{}".format(kdims[0])
return xlabel, ylabel
|
5ac498879048ad50192d9106d177420e4574820e
| 18,053
|
def all_subsets(s):
"""returns all the subsets included in this set."""
r = [set()]
for i in s:
for j in r[:]:
r.append(j | set([i]))
return r
|
d182ac9050cdb330fd18c5ce0ec63ceb8ac8bfc2
| 18,056
|
def create_link(n, d, suffix):
""" Creates a link from the name of the manpage.
This hasn't been tested extensively, as there are 10k+ links... but it's
worked for the handful that I tried?
"""
return (n, f"http://man7.org/linux/man-pages/man{d}/{n}.{d}{suffix}.html")
|
6c5a98e634395874f09aa00dced37962a1fe5530
| 18,058
|
import json
def dump(key: str):
"""
Dumps all the JSON given the key
Parameters:
key (str): The key to dump
Returns:
data (dict): The JSON data
"""
data = {}
with open(f'./data/guild_{key}.json', 'r') as filein:
data = json.load(filein)
return data
|
6f243d5c09f4ff4a7e4ba4133b26089dfa37cac2
| 18,059
|
def str_set_of_candidates(candset, cand_names=None):
"""
Nicely format a set of candidates.
.. doctest::
>>> print(str_set_of_candidates({0, 1, 3, 2}))
{0, 1, 2, 3}
>>> print(str_set_of_candidates({0, 3, 1}, cand_names="abcde"))
{a, b, d}
Parameters
----------
candset : iterable of int
An iteratble of candidates.
cand_names : list of str or str, optional
List of symbolic names for every candidate.
Returns
-------
str
"""
if cand_names is None:
named = sorted(str(cand) for cand in candset)
else:
named = sorted(str(cand_names[cand]) for cand in candset)
return "{" + ", ".join(named) + "}"
|
817592c964f8912585944a05e11606b4c4597721
| 18,063
|
from typing import List
from typing import Dict
def _jupyter_server_extension_paths() -> List[Dict[str, str]]:
"""
Function to declare Jupyter Server Extension Paths.
"""
# This comprehension actually works, but black can't handle it!
# return [ {"module": f"rsp_jupyter_extensions.{ext}"} for ext in exts ]
return [{"module": "rsp_jupyter_extensions"}]
|
dd3b364fdf08a796942223d1f46bc5ce4bb89150
| 18,064
|
def cli_submit_ci_job(apic, args):
"""Implement CLI command `submit-ci-project`.
"""
# pylint: disable=unused-argument
if args.repo_branch is None or args.repo_ref is None:
print('error: missing required arguments: --repo-branch or --repo-ref')
return 1
job_id = apic.submit_ci_job(args.ID, branch=args.repo_branch, ref=args.repo_ref)
print(job_id)
return 0
|
19abc8d9f7427c5f57b53ab341870ea0a4d6c2a5
| 18,065
|
def lorenz(amplitude: float, fwhm: float, x: int, x_0: float):
"""Model of the frequency response."""
return amplitude * ((fwhm / 2.0) ** 2) / ((x - x_0) ** 2 + (fwhm / 2.0) ** 2)
|
f7764c0103d8912a78e2f022e5a863310e0bf6b9
| 18,067
|
import os
def __is_cached__(file_name):
""" Determine which files should receive a cache-control header
Files that are not cached are:
- *.html (e.g. index.html, mobile.html)
- *.txt files
- the *.appcache file itself (although it's versioned with the
git_commit_short)
- info.json
The behaviour is exactly the same for master and other branches
example:
<bucket_name>/master/1902211564/index.html <= no cache header
<bucket_name>/master/1902211564/as5a56a/lib/build.js <= cache header
activated master or branch `fix_1234`
<bucket_name>/fix_1234/index.html <= no cache header
<bucket_name>/fix_1234/as5a56a/lib/build.js <= cache header
"""
_, extension = os.path.splitext(file_name)
return os.path.basename(file_name) not in ['info.json'] and extension not in [
'.html', '.txt', '.appcache', '']
|
959f2775e40cd33a0ca64d1a57e2aa5e2a7c3fe5
| 18,068
|
def find_anagrams(word, candidates):
"""
Return a list of word's anagrams from candidates
"""
same_removed = filter(lambda x: x.lower() != word.lower(), candidates)
return filter(lambda x: sorted(x.lower()) == sorted(word.lower()), same_removed)
|
4c016e6febd9c1d4144b4756d43b5f596c33ec4f
| 18,069
|
import subprocess
def get_file_contents(path, revision=None):
""" Return contents of a file in staged env or in some revision. """
revision = "" if revision is None else revision
r = subprocess.run(
["git", "show", "{r}:{p}".format(r=revision, p=path)],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
check=True,
)
return r.stdout
|
f23ad9238a8cb222c456cb98cb5d91de35896449
| 18,072
|
import time
def now():
"""
Returns:
Current formatted date-time.
"""
return time.strftime("%d.%m.%Y_%H:%M:%S")
|
16401e97c10902b9a84a2868454421df83288b98
| 18,074
|
from typing import Dict
from typing import Any
def get_max_field_length(fields: Dict[str, Any]) -> int:
"""Get the length of the longest field in the dictionary ``fields``.
:param dict fields: A dictionary to search for the longest field.
"""
return max(map(len, fields))
|
62da7fd4fa8173bb641391100136122fde4c902c
| 18,077
|
import math
def approximate_exp(num_terms=10):
"""Approximating exponential by adding
the inverse of n factorials."""
# returning the approximated exponential
return sum(1/math.factorial(num) for num in range(num_terms))
|
d4c2cd7eaf000ca862051e64b22a0795c564ba26
| 18,078
|
import string
import os
def expand_path_vars(path):
"""
Expands a path variable which uses $-style substitutions.
:func:`os.path.expandvars` doesn't have any way to escape the substitutions
unlike :class:`string.Template`, so we have to do the substitutions manually.
"""
template = string.Template(path)
return template.safe_substitute(os.environ)
|
a5969fcf118fba791a46636ec93cae9e09fe6a64
| 18,079
|
from typing import Tuple
from typing import Any
from typing import List
def _convert_record_to_object(class_: type, record: Tuple[Any], field_names: List[str]) -> Any:
"""
Convert a given record fetched from an SQL instance to a Python Object of given class_.
:param class_: Class type to convert the record to.
:param record: Record to get data from.
:param field_names: Field names of the class.
:return: the created object.
"""
kwargs = dict(zip(field_names, record[1:]))
field_types = {key: value.type for key, value in class_.__dataclass_fields__.items()}
for key in kwargs:
if field_types[key] == bytes:
kwargs[key] = bytes(kwargs[key], encoding='utf-8')
obj_id = record[0]
obj = class_(**kwargs)
setattr(obj, "obj_id", obj_id)
return obj
|
3422fb51f959f1ead4f6af05bf750746358263bd
| 18,080
|
def VOLTStoHEX(volts):
""" DAC: 1000.0 volts full scale (FFFF).
(for setting DAC output)
VOLTStoHEX(1000.0) => 'FFFF'
VOLTStoHEX( 900.0) => 'E665' """
return hex(int(volts * 65.535))[2:].upper()
|
70418ab141f8e46e5068b56a3d3c9b2ecaa1d00e
| 18,081
|
import os
def list_files(dirname, extension=None):
"""
List all files in directory `dirname`, option to filter on file extension
"""
f = []
for (dirpath, dirnames, filenames) in os.walk(dirname):
f.extend(filenames)
break
if extension is not None:
# Filter on extension
filtered = []
for filename in f:
fn, ext = os.path.splitext(filename)
if ext.lower() == '.' + extension.lower():
filtered.append(filename)
f = filtered
return f
|
84fd78dad7e624754ba4b39e1cf09f9a09748fca
| 18,082
|
def make_invalid_varname_comment(varname: str):
"""Make a comment about a Stata varname being invalid."""
return f'* Invalid STATA varname: {varname}'
|
e40dc6a8d15d81a9af3e25825f0ff424379c73ca
| 18,083
|
def GetFoldTypeFromAnnotation(line):#{{{
"""Get foldType from the annotation line of the topomsa file
In the *.topomsa files, the header line has now been tagged with
fold-type (5B,7B, 6R - 5 helical broken fold-type, 7 helical broken
fold-type , 6 helical broken fold-type) in column 3.
Example from Full_alignment
>SBF_like|A0A077PBM8|tmname:TM1-S1;TM2-S1;TM3-BC1;TM4-BC1;TM5-BC1;TM6-S2;TM7-S2;TM8-BC2;TM9-BC2;TM10-BC2|5B
Example from Repeat_alignment
>Glt_symporter-NR|P73275_SYNY3|tmname:TM1-S1;TM2-S1;TM3-S1;TM4-RC1;RH-RC1;TM6-RC1|6R
"""
foldType = ""
if line:
li = line.split('|')
if len(li) >= 3:
foldType = li[3]
return foldType
|
1c0b6f90b7d22cb78b8828cbd9c6d74867d430ae
| 18,084
|
def avatar_to_dict(avatar):
"""Creates a Python dict for an Avatar database entity.
This is an admin-only function that exposes more database information than
the method on Avatar.
"""
return dict(
id=avatar.id,
flake=avatar.flake,
uri=avatar.uri,
user=avatar.user.to_dict(),
timestamp=avatar.timestamp
)
|
3ccb7da50aee0ccd847a8f38f60c02c7b7f2a820
| 18,085
|
def get_skip_comments(events, skip_users=None):
"""
Determine comment ids that should be ignored, either because of
deletion or because the user should be skipped.
Args:
events: a list of (event_type str, event_body dict, timestamp).
Returns:
comment_ids: a set of comment ids that were deleted or made by
users that should be skipped.
"""
skip_users = skip_users or []
skip_comments = set()
for event, body, _timestamp in events:
action = body.get('action')
if event in ('issue_comment', 'pull_request_review_comment'):
comment_id = body['comment']['id']
if action == 'deleted' or body['sender']['login'] in skip_users:
skip_comments.add(comment_id)
return skip_comments
|
30663624714104bc7b9aa0fd4da45f537b06420f
| 18,086
|
def encode_jump(input_string):
"""Encode input string according to algorithm specifications"""
jump_map = {
"0": "5",
"1": "9",
"2": "8",
"3": "7",
"4": "6",
"5": "0",
"6": "4",
"7": "3",
"8": "2",
"9": "1",
}
output_string = ""
output_string = ''.join([jump_map[char] if char in jump_map else char for char in input_string])
return output_string
|
b316ffab520ce2cdbc97e9711b897e85081a827e
| 18,087
|
def numberOfSteps (self, num):
"""
:type num: int
:rtype: int
len is slow
64% faster
85% less mem
"""
binary = bin(num)[2:]
return binary.count('1') * 2 + binary.count('0') - 1
|
1eba817694af7d798a8717a5ecae5a80abf942b1
| 18,088
|
def sluggify(text):
"""
Create a file system friendly string from passed text by stripping special characters.
Use this function to make file names from arbitrary text, like titles
:param text:
:return:
"""
if not text:
return ''
data = ''.join([c for c in text if c.isalpha() or c.isdigit() or c in [' ', '.', ',', '_', '-', '=']]).rstrip()
truncated = data[:75] if len(data) > 75 else data
return truncated
|
a597f1625ee215314a77832b5f0fce2b7884c30b
| 18,089
|
def UnZeroMatrix(matrix):
"""
replaces all instances of 0.0000 in the
matrix with the lowest observed non-zero value
assumes first row and first column of matrix are
descriptors
"""
minNonZero = min([float(m) for m in matrix[1][1:]])
for line in matrix[2:]:
for el in line[1:]:
if float(el) != 0.000 and float(el) < minNonZero:
minNonZero = float(el)
newMat = [matrix[0]]
for line in matrix[1:]:
curVec = [line[0]]
for el in line[1:]:
if float(el) == 0.000:
curVec.append(minNonZero)
else:
curVec.append(float(el))
newMat.append(curVec)
return newMat
|
8ede8d57e009aa8248f73d70fe48d4c4ea80baa8
| 18,090
|
def _get_input(query_str):
"""Get float-valued input."""
while True:
try:
temp = float(input(query_str))
return temp
except:
print("Unrecognized input!")
pass
|
22b8ac5e24e484dbf8842f297866569281f32878
| 18,092
|
def test_policy(env, pie, max_steps, verbose=0, render=0):
""" test policy for one episode or end of mas_steps; returns (steps, return) """
cs, act, reward, done, steps = env.reset(), 0, 0, False, 0
total_reward = 0
if verbose>0:
print('[RESET]')
if render==2:
env.render()
while not (done or steps>=max_steps):
steps+=1
act = pie.predict(cs)
cs, reward, done, _ = env.step(act)
total_reward+=reward
if verbose>1:
print('[STEP]:[{}], A:[{}], R:[{}], D:[{}], TR:[{}]'.format(steps, act, reward, done, total_reward))
if render==3:
env.render()
if verbose>0:
print('[TERM]: TS:[{}], TR:[{}]'.format(steps, total_reward))
if render==1 or render==2:
env.render()
return total_reward, steps
|
28017b78e0bbfc1c29c30a6dd6e7d6cecdda3478
| 18,093
|
def prepare_df(df):
"""
This function converts the df header into string format
for the gui to be able to plot
Args:
df --> pandas dataframe: df needs to be cleaned
Return:
df --> pandas dataframe: cleaned df
"""
columns = list(df.columns.values)
for num, item in enumerate(columns):
if not isinstance(item, str):
columns[num] = "{}".format(item)
df.columns = columns
return df
|
b1028f0a791dc4e10d3cfdf5f43bf259a8e34a9b
| 18,095
|
def sign(a):
"""Gets the sign of a scalar input."""
if a >= 0:
return 1
else:
return 0
|
365d05998ebd5146881a2e3c1a4003d3ed02f681
| 18,096
|
def resize_based_on_aspect_ratio(aspect_ratio, base_width, max_width=400):
"""アスペクト比を元にリサイズ後のwidth, heightを求める"""
if base_width < max_width:
return None
base = max_width / aspect_ratio[0]
new_w = int(base * aspect_ratio[0])
new_h = int(base * aspect_ratio[1])
return (new_w, new_h)
|
3ae395e5e9bab27b00ed8ed57490dd1d342e61a5
| 18,097
|
import re
def isValidURL(str: str):
"""
Input: String
Output: Boolean
Checks if the URL are valid or not
"""
regex = ("((http|https)://)(www.)?" +
"[a-zA-Z0-9@:%._\\+~#?&//=]" +
"{2,256}\\.[a-z]" +
"{2,6}\\b([-a-zA-Z0-9@:%" +
"._\\+~#?&//=]*)")
p = re.compile(regex)
if (str == None):
return False
if(re.search(p, str)):
return True
else:
return False
|
9b30a556f5a38fdde8e03a6084369449b2da9ea1
| 18,098
|
def create_expected_output(parameters, actual_data):
"""
This function creates the dict using given parameter and actual data
:param parameters:
:param actual_data:
:return: expected data
:type: dict
"""
expected_output = {}
for key in parameters:
for value in actual_data:
expected_output[key] = value
actual_data.remove(value)
break
return expected_output
|
356b8e50e9ee48bead784ff3cad3778b9076c3c5
| 18,099
|
def smartScale(gdk, src, resolution):
"""
**SUMMARY**
Resizes an image preserving its aspect ratio. Copied from adaptive scale.
Is used to resize image for the display
**PARAMETERS**
* *gdk* - A gdk module object
* *src* - The source pixbuf ( gtk.gdk.pixbuf )
* *resolution* - The desired resouution to scale to.
**RETURNS**
A resized pixbuf.
"""
srcWidth = src.get_width()
srcHeight = src.get_height()
srcSize = srcWidth,srcHeight
wndwAR = float(resolution[0])/float(resolution[1])
imgAR = float(srcWidth)/float(srcHeight)
targetx = 0
targety = 0
targetw = resolution[0]
targeth = resolution[1]
if( srcSize == resolution): # we have to resize
return src
elif( imgAR == wndwAR ):
wScale = float(resolution[0])/srcWidth
hScale = float(resolution[1])/srcHeight
return src.scale_simple(resolution[0],resolution[1],gdk.INTERP_BILINEAR)
else:
#scale factors
wscale = (float(srcWidth)/float(resolution[0]))
hscale = (float(srcHeight)/float(resolution[1]))
if(wscale>1): #we're shrinking what is the percent reduction
wscale=1-(1.0/wscale)
else: # we need to grow the image by a percentage
wscale = 1.0-wscale
if(hscale>1):
hscale=1-(1.0/hscale)
else:
hscale=1.0-hscale
if( wscale == 0 ): #if we can get away with not scaling do that
targetx = 0
targety = (resolution[1]-srcHeight)/2
targetw = srcWidth
targeth = srcHeight
elif( hscale == 0 ): #if we can get away with not scaling do that
targetx = (resolution[0]-srcWidth)/2
targety = 0
targetw = srcWidth
targeth = srcHeight
elif(wscale < hscale): # the width has less distortion
sfactor = float(resolution[0])/float(srcWidth)
targetw = int(float(srcWidth)*sfactor)
targeth = int(float(srcHeight)*sfactor)
if( targetw > resolution[0] or targeth > resolution[1]):
#aw shucks that still didn't work do the other way instead
sfactor = float(resolution[1])/float(srcHeight)
targetw = int(float(srcWidth)*sfactor)
targeth = int(float(srcHeight)*sfactor)
targetx = (resolution[0]-targetw)/2
targety = 0
else:
targetx = 0
targety = (resolution[1]-targeth)/2
else: #the height has more distortion
sfactor = float(resolution[1])/float(srcHeight)
targetw = int(float(srcWidth)*sfactor)
targeth = int(float(srcHeight)*sfactor)
if( targetw > resolution[0] or targeth > resolution[1]):
#aw shucks that still didn't work do the other way instead
sfactor = float(resolution[0])/float(srcWidth)
targetw = int(float(srcWidth)*sfactor)
targeth = int(float(srcHeight)*sfactor)
targetx = 0
targety = (resolution[1]-targeth)/2
else:
targetx = (resolution[0]-targetw)/2
return src.scale_simple(targetw,targeth,gdk.INTERP_BILINEAR)
|
e59c943334a8f16a5b1787f97c07b3ec7aaf7d32
| 18,100
|
def data_split(dataset, split_at=0.8, random_state=42):
"""split data into partitions randomly
"""
train = dataset.sample(frac=split_at, random_state=random_state)
test = dataset.drop(train.index)
return train, test
|
153de855dcb2fa74b7d330a057d62850d9250e6c
| 18,101
|
def where2(value1, value2, ls, interval):
"""Find where the value1 and value2 are located in ls, where the interval between neighboring elements are given.
This function may be faster than where, but might be slower in case that the interval is irregular.
This function returns the slice of ls between value1 and value2.
Suppose that where returns (s1, s2) given value1, value2, and ls, and value1 <= value2
The use can get the slice between value1 and value2 by ls[s1:s2].
"""
length = len(ls)
start = 0
end = 0
# if empty array, just return 0, 0
if length ==0: return start, end
diff1 = int(value1 - ls[0])
if diff1 >= 0:
start = min(diff1/interval, length-1)
# adjust 'start'
while start > 0 and value1 < ls[start]:
start-=1
while start < length and value1 > ls[start]:
start+=1
diff2 = int(value2 - value1)
if diff2 >= 0:
try:
end = min(start + diff2/interval, length-1)
except ZeroDivisionError:
interval = 1
end = min(start + diff2/interval, length-1)
else: # if value1 > value2, just return start, start
return start, start
# adjust 'end'
while end > 0 and value2 < ls[end]:
end-=1
while end < length and value2 >= ls[end]:
end+=1
end = min(length, end)
return start, end
|
37fd1248f74e0ffc3a6194f0755f1075d745093f
| 18,102
|
import os
def coco_as_image_name(dataset, label):
"""Convert a COCO detection label to the image name.
Args:
label (list of dict): an image label in the COCO detection format.
Returns:
str: image name.
"""
if not label:
return None
image_id = label[0]['image_id']
image = dataset.coco.loadImgs(image_id)[0]
return os.path.splitext(image['file_name'])[0]
|
c56dbb23cec5e7293111e52e6e1dfac3bd6657d2
| 18,103
|
import sqlite3
def create_connection(database_name):
"""Create database connection"""
return sqlite3.connect(database_name)
|
754d569fef76bc9b498efab176f33ed35fc60f6c
| 18,104
|
def decorate_table(table_text, convert_fun, d_cols=" & ", d_rows="\\\\\n"):
"""Transforms text of the table by applying converter function to each element of this table.
:param table_text: (str) text of the table.
:param convert_fun: (str => str) a function to be applied to each element of the table.
:param d_cols: (str) delimiter between columns.
:param d_rows: (str) delimiter between rows.
:return: (str) text of the converted table.
"""
def process_cell(s):
return str(convert_fun(s))
if d_cols not in table_text:
return table_text # delimiter was not present
splitted = table_text.split(d_cols)
new_text = ""
for i in range(0, len(splitted)):
s = splitted[i]
last_in_row = d_rows in s
if last_in_row:
two_elems = s.split(d_rows)
decorated = process_cell(two_elems[0]) + d_rows
if len(two_elems) > 1 and two_elems[1] != '':
decorated += process_cell(two_elems[1])
else:
decorated = convert_fun(s)
new_text += decorated
if i < len(splitted)-1:
new_text += d_cols
return new_text
|
55788a8ffb853702b81b38dc446ca9951371f9c9
| 18,105
|
def _qualname(cls):
"""
Returns a fully qualified name of the class, to avoid name collisions.
"""
return u'.'.join([cls.__module__, cls.__name__])
|
530fb5a7e9231702850c4bc8be09e9ca7e9dd8f5
| 18,107
|
def normalize_index(header, method):
"""
Equivalent to:
self.votes_index = self.header.index(
''.join(votes(self.header)))
"""
return header.index(''.join(method(header)))
|
76a099d0ed9913b1909082b26c60e2db25c97905
| 18,109
|
import sys
def type_hints_equal(hint1, hint2):
"""Returns true if type hints equal.
For example:
type_hints_equal(typing.List, list) => True
"""
assert hint1 is not None
assert hint2 is not None
if hint1 is hint2:
return True
h1 = getattr(hint1, '__origin__', hint1) or hint1 # `... or hint 1`
h2 = getattr(hint2, '__origin__', hint2) or hint2 # - protect from `None` value
if h1 is h2:
return True
PYTHON_36 = (3, 6)
if (sys.version_info.major, sys.version_info.minor) == PYTHON_36:
h1 = getattr(hint1, '__extra__', hint1)
h2 = getattr(hint2, '__extra__', hint2)
return h1 is h2
|
60c14b5563a73028ba5e7cb53db4e5b4184e98a5
| 18,110
|
def largeur_image(lst, largeur_bande):
"""
Calcul la largeur de l'image composee de bandes de largeur
largeur_bande, noires ou blanches definies par lst
"""
return len(lst)*largeur_bande
|
ef42c5cb0a9d4f17c87e823bd4d6373d21095fae
| 18,111
|
def gcd(a, b):
"""最大公约数辗转相除法"""
if a < b:
a, b = b, a
x = a % b
while x != 0:
x = a % b
a = b
b = x
return a
|
084f4972f1e6490ab0fb60df48641bf622b36288
| 18,114
|
def _check_name_should_break(name):
"""
Checks whether the passed `name` is type `str`.
Used inside of ``check_name`` to check whether the given variable is usable, so we should stop checking
other alternative cases.
Parameters
----------
name : `Any`
Returns
-------
should_break : `bool`
If non empty `str` is received returns `True`, meanwhile if `None` or empty `str` is received `False`.
Raises
------
TypeError
If `name` was not passed as `None` or type `str`.
"""
if (name is None):
return False
if type(name) is not str:
raise TypeError(f'`name` should be `None` or type `str`, got `{name.__class__.__name__}`.')
if name:
return True
return False
|
4e6981fa840b89bf69a1a0e6c6401b1e2387e17d
| 18,115
|
def close_mail_session(server):
"""
Mail session termination
"""
# Terminate the SMTP session and close the connection
server.quit()
return True
|
421bf144f2e499aea60dddf3274bd0b8e910d155
| 18,116
|
def my_permutation(nums: list) -> list:
"""backtrack
Args:
nums (list)
Returns:
permutation_list
"""
res = []
visited_list = [0] * len(nums)
def dfs(path_list):
if len(path_list) == len(nums):
res.append(path_list)
else:
for i in range(len(nums)):
if not visited_list[i]:
visited_list[i] = 1
dfs(path_list + [nums[i]])
visited_list[i] = 0
dfs([])
return res
|
cf080febe9474dc53d1937b9ba92c12515fb5be9
| 18,118
|
def normalize_ns(namespaces: str) -> str:
"""
Normalizes url names by collapsing multiple `:` characters.
:param namespaces: The namespace string to normalize
:return: The normalized version of the url path name
"""
return ':'.join([nmsp for nmsp in namespaces.split(':') if nmsp])
|
3be2e6cf1615d9610ec6695966c9c32f4b531ea3
| 18,119
|
def get_param_dict(df, column_name, cast_as_type):
"""
:param df: the project-params dataframe
:param column_name: string, column name of the parameter to look for
:param cast_as_type: the type for the parameter
:return: dictionary, {project: param_value}
Create a dictionary for the parameter to load into Pyomo.
"""
param_dict = dict()
for row in zip(df["project"], df[column_name]):
[prj, param_val] = row
# Add to the param dictionary if a value is specified
# Otherwise, we'll use the default value (or Pyomo will throw an
# error if no default value)
if param_val != ".":
param_dict[prj] = cast_as_type(row[1])
else:
pass
return param_dict
|
b2b0dd2626e9cceb1dec18e14467178e42f1fd06
| 18,121
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.