content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def register_endpoints(app):
"""Register endpoints defined in openapi.yml
Additionally register any flask endpoints / blueprints
Parameters
----------
app : Connexion
A Connexion application instance.
"""
app.add_api('openapi.yml', strict_validation=True)
# Add health check
@app.app.route('/health/pricing')
def health_check():
"""Pingable endpoint used to determine whether the service is running
"""
return '', 200
|
5c7990be3bde5c7d99cbac1e34a98aa6df5761e3
| 24,991
|
def is_project_admin_context(context):
"""Check if this request has admin context with in the project."""
if context.is_admin:
return True
return False
|
a2365f7a0d830cdbb3ca76347b5d152d42ce178e
| 24,992
|
def one_byte_xor(hex1, letter):
""" Calculates hex1 XOR letter """
result = b""
for char_ in hex1:
result += bytes([char_ ^ letter])
return result
|
8ca6c91a436ac64cd221856e29426339a6434888
| 24,995
|
from typing import Union
def _get_year_sample_size(cls, kind: str, year: Union[str, int], sample_size):
"""get sample size for year"""
return (cls._get_year_population_size(kind, year) * sample_size).clip(1).round()
|
1190be301c0b5fc1d8e93aaaeff7b1da47acc1e0
| 24,996
|
def stopword_ratio(text):
"""
:type text: Text
:param text: The text to be analysed
:rtype Dict
:returns Returns the ratio of stopwords in the text
"""
return (len(text.tokens_alphabetic) - len(text.tokens_without_stopwords)) / len(text.tokens_alphabetic)
|
c7932bac82636ee031b143661863b7e1ac05be4d
| 24,999
|
def get_last_timestamp(db):
"""Return the last update timestamp from the database"""
return db.execute('SELECT update_time FROM meta_slurm_lastupdate').fetchone()[0]
|
bce062d16b00e46c9ff5943e99329885fd1a0dc9
| 25,000
|
import subprocess
import json
import logging
def info(file):
"""Standard way of calling gdalinfo and returning a python dictionary of metadata"""
cmd = ['gdalinfo', '-json', str(file)]
result = subprocess.run(cmd, stdout=subprocess.PIPE)
try:
return json.loads(result.stdout)
except Exception as e:
logging.error(f'gdalinfo fail: {" ".join(cmd)}; {e}')
return {}
|
199e27ef713920e5c7a0242530201b3ba77ccf78
| 25,001
|
def line_parameter_form_from_points(a):
"""
Parameters of line in parameter form: x = v + t * u, determined from points in a.
Only the first and last element of a are used.
"""
# u = a[0] - a[-1]
u = a[-1] - a[0]
v = a[0]
return u, v
|
d8f6abcbacc9bca3feea8f556630a4499512b8b1
| 25,002
|
import re
def extract_replies(tweet):
"""Extract the ids of replied or cited tweets"""
if "retweeted_status" in tweet:
return None
exp = re.compile(r"\/[0-9]{4,}")
urls = [url["expanded_url"] for url in tweet["entities"]["urls"]]
urls = ' '.join(urls)
return exp.findall(urls)
|
aecd5a962cb864998b50562575594a02fb87287a
| 25,003
|
def get_key(dictionary, keys, i):
"""
Get a value from a dictionary
"""
if (len(keys) - 1) == i:
return dictionary[keys[i]]
else:
return get_key(dictionary[keys[i]], keys, i+1)
|
2d50946947771d0070f22e6db0789b49c547359c
| 25,004
|
def calc_db_cost_v3(db) -> float:
"""Returns a noise cost for given dB: every 10 dB increase doubles the cost (dB >= 45 & dB <= 75).
"""
if db <= 44:
return 0.0
db_cost = pow(10, (0.3 * db)/10)
return round(db_cost / 100, 3)
|
bad2506da5cb53aa552294498122e70dd95f780f
| 25,005
|
def analyse_data(Sigma, loopNum=20):
"""analyse_data(分析 Sigma 的长度取值)
Args:
Sigma Sigma的值
loopNum 循环次数
"""
# 总方差的集合(总能量值)
Sig2 = Sigma**2
SigmaSum = sum(Sig2)
eyenum = 0
for i in range(loopNum):
SigmaI = sum(Sig2[:i])
# 保留90%
if eyenum == 0 and (SigmaI/SigmaSum*100) >= 90:
eyenum = i
return eyenum
|
0c822a59ea70d284f1dd4c9afb52e0fa762eab62
| 25,006
|
import inspect
import textwrap
def body(func_obj):
"""If a function A calls body(), the call returns the Python source code of
the function definition body (not including the signature) of A.
"""
if func_obj is None:
return None
code = inspect.getsource(func_obj)
# Remove function signature
code = code[code.find(":") + 1 :] # noqa: E203
# Function might be defined in some indented scope
# (e.g. in another function).
# We need to handle this and properly dedent the function source code
return textwrap.dedent(code)
|
b4b013145f39ff7917761e374f0d0c478fc1e48f
| 25,007
|
def read_chrom_sizes(chrom_sizes):
"""Read chrom.sizes file."""
ret = {}
f = open(chrom_sizes, "r")
for line in f:
ld = line.rstrip().split("\t")
ret[ld[0]] = int(ld[1])
f.close()
return ret
|
ccba3e56f006746d1e34953364d3f8a40fc70086
| 25,009
|
def set_domain_at_host(domains_host, i):
""" Choose the right domain out of domains_host. If i < len(domains_host)
then it is the i-th element otherwise it is the last element in the list.
"""
if type(domains_host) == list:
j = i if i < len(domains_host) else len(domains_host)-1
domain_host_interactor = domains_host[j]
i += 1
else:
domain_host_interactor = domains_host
return domain_host_interactor, i
|
7369f9a5a3947532880c749e18e2f16a40f87fac
| 25,010
|
def serializeRegressor(tree):
""" Convert a sklearn.tree.DecisionTreeRegressor into a JSON-compatible format """
LEAF_ATTRIBUTES = ['children_left', 'children_right', 'threshold', 'value',
'feature', 'impurity', 'weighted_n_node_samples']
TREE_ATTRIBUTES = ['n_classes_', 'n_features_', 'n_outputs_']
encoded = {
'nodes': {},
'tree': {},
'n_leaves': len(tree.tree_.threshold),
'params': tree.get_params()
}
for attr in LEAF_ATTRIBUTES:
encoded['nodes'][attr] = getattr(tree.tree_, attr).tolist()
for attr in TREE_ATTRIBUTES:
encoded['tree'][attr] = getattr(tree, attr)
return encoded
|
d94f8cde0144cd842175480332a398def8e19ae8
| 25,012
|
def linear_probe(h, i, m):
"""
Finds a possible next position using linear probing
:param h: Computed hash value that has resulted in a collision
:param i: Offset
:param m: Size of the table
:return: The next index to be checked if it is open
"""
return (h + i) % m
|
49ecb3ca389255b99bdde3e61bb503d3d517549b
| 25,013
|
def find_min_max(ls):
"""
Question 12.8: Find the min and max of
a list simultaneously
"""
min_num = ls[0]
max_num = ls[0]
for elt in ls[1:]:
if min_num > elt:
min_num = elt
elif max_num < elt:
max_num = elt
return min_num, max_num
|
09c66bc74d8668ab7e85ff3032f8d8d728c5ae5f
| 25,014
|
import os
def getpackagepath():
"""
*Get the root path for this python package*
*Used in unit testing code*
"""
moduleDirectory = os.path.dirname(__file__)
packagePath = os.path.dirname(__file__) + "/../"
return packagePath
|
60cc33ab29fdc8667822475c328f6bdef4541be8
| 25,015
|
def categorize(contenttype: str):
"""Return 'cbor', 'json' or 'link-format' if the content type indicates it
is that format itself or derived from it."""
media_type, *_ = contenttype.split(';')
_, _, subtype = media_type.partition('/')
if subtype == 'cbor' or subtype.endswith('+cbor'):
return 'cbor'
if subtype == 'json' or subtype.endswith('+json'):
return 'json'
if media_type == 'application/link-format':
return 'link-format'
return None
|
d065c9c3823bda424108d70a8e5b8d7dc70eab9b
| 25,016
|
def checkAstIntegrity(instruction):
"""
This function check if all ASTs under an Instruction class are still
available.
"""
try:
for se in instruction.getSymbolicExpressions():
str(se.getAst())
for x, y in instruction.getLoadAccess():
str(y)
for x, y in instruction.getStoreAccess():
str(y)
for x, y in instruction.getReadRegisters():
str(y)
for x, y in instruction.getWrittenRegisters():
str(y)
for x, y in instruction.getReadImmediates():
str(y)
return True
except:
return False
|
c51f6b155ce2812bc6f30f292cf89a028c187bbd
| 25,017
|
def transform_single_row_info(row, target_mappings):
"""transform the row info for a single row into a row mapped to target keys"""
mapped_row = {}
options_for_none = ["None", "NA", "N/A", "?", "#VALUE!"]
for key in target_mappings.keys():
mapping_options = target_mappings[key]
if len(mapping_options) == 1 and mapping_options[0] in row:
mapped_row[key] = row[mapping_options[0]].strip()
elif len(mapping_options) > 1:
# iterate through mapping_options in reverse because to give the first element in
# target_mappings priority over other elements in mapping list
for option in reversed(mapping_options):
if option in row and row[option].strip() not in options_for_none:
mapped_row[key] = row[option].strip()
# returns single dictionary with key-value pairs
return mapped_row
|
98448faf0e634f369476e976f55541f8f6709e05
| 25,018
|
def colrows_to_xy(screen_size, cursor_position):
"""Convert cursor position to x, y pixel position
Args:
screen_size (tuple): The screen size (width, height)
cursor_position (tuple): The cursor position (row, col)
Returns:
(tuple): The screen position in pixels (x, y)
"""
x = (8 * (cursor_position[0] - 1))
y = (screen_size[1] - 2) - (cursor_position[1] * 10)
return (x, y)
|
63d3b9f7af789c613d2067fe0ceb671e5ed04465
| 25,019
|
def numCreator(a,b,c,d):
"""convert the random numbers into a 4 digit int"""
output =0
output += a*1000
output += b*100
output += c*10
output += d
output = int(output)
return output
|
e7a41d1c4908fc46915fc028bd0646c90f0ee356
| 25,022
|
from typing import Optional
def irepeat(obj: object, cnt: Optional[int] = None) -> object:
"""
Yield the object cnt times if specified or infinitely.
Notes
-----
The Python itertools repeat class implementation.
Parameters
----------
obj : object
to be repeated.
cnt : Optional[int], optional
the number of times counter. The default is None.
Raises
------
TypeError
if object is not provided or cnt is not integer.
References
----------
https://docs.python.org/3/library/itertools.html#itertools.repeat
Yields
------
object
"""
def _repeat(obj: object, cnt: Optional[int] = None) -> object:
"""Yield repeat generator."""
if cnt is None:
while True:
yield obj
else:
for _ in range(cnt):
yield obj
if not (cnt is None or isinstance(cnt, int)):
raise TypeError(f'cnt = {cnt} is not integer')
return _repeat(obj, cnt)
|
7d0c3cefb763d099c75ebfd070368882004d1cf0
| 25,024
|
def max_pairwise_product(numbers):
"""
max_pairwise_product gets the two biggest numbers and returns the product of them
TimeComplexity: O(n)
"""
biggest = -9999999999999
second_bigest = -9999999999999
for ele in numbers:
if ele > biggest:
biggest, second_bigest = ele, biggest
elif ele > second_bigest:
second_bigest = ele
return biggest * second_bigest
|
470a1400fc235de7c4a6eb459577f674717b6ced
| 25,025
|
def load_anns(coco):
"""
Loading annotations
"""
ann_ids = coco.getAnnIds()
anns = coco.loadAnns(ann_ids)
# Sorting annotations in ascending order by image IDs
anns = sorted(anns, key=lambda x: x['image_id'])
return anns
|
de71c8b871cee43bf45f99caa485ddbc88a95321
| 25,026
|
def task_sort():
"""list sorting (C)"""
def list_sort(l):
l = l[::-1]
l.sort()
return list_sort, (list(range(1000)), )
|
2f2151e3d902760b5cc7f8c5b881d99937e1561a
| 25,028
|
from typing import List
import glob
def filter_filetype(filetype: str) -> List:
"""
Filtra según el tipo de archivo indicado.
Args:
filetype: Tipo de archivo a filtrar (ej: *.png).
Returns:
Lista de coincidencias según el tipo filtrado.
"""
return glob.glob(filetype)
|
45ea03ac4a148d2817df1c5fdea2969e395dfcaa
| 25,029
|
import json
def deserializeValue(v):
""" Deserialize single value from JSON string format """
try:
return json.loads(v)
except ValueError:
raise ValueError("No JSON object could be decoded from \"%s\"" % v)
|
5bc26f42f7873030d9bf79a502c2604433df150f
| 25,030
|
def mean(numbers: list):
"""Average value
"""
return float(sum(numbers)) / max(len(numbers), 1)
|
5f995531e1f0fd3ac76e6c337ca66d7f02241a8f
| 25,031
|
def hydrobasins_upstream_ids(fid, df):
"""Return a list of hydrobasins features located upstream.
Parameters
----------
fid : feature id
HYBAS_ID of the downstream feature.
df : pd.DataFrame
Watershed attributes.
Returns
-------
pd.Series
Basins ids including `fid` and its upstream contributors.
"""
def upstream_ids(bdf, bid):
return bdf[bdf['NEXT_DOWN'] == bid]['HYBAS_ID']
# Locate the downstream feature
ds = df.set_index("HYBAS_ID").loc[fid]
# Do a first selection on the main basin ID of the downstream feature.
sub = df[df['MAIN_BAS'] == ds['MAIN_BAS']]
# Find upstream basins
up = [fid, ]
for b in up:
tmp = upstream_ids(sub, b)
if len(tmp):
up.extend(tmp)
return sub[sub['HYBAS_ID'].isin(up)]
|
3fed2e9f5bad0d58bd21175750b6e733c930b952
| 25,032
|
import functools
def with_header(header=None):
"""Decorator that adds a section header if there's a any output
The decorated function should yield output lines; if there are any the
header gets added.
"""
def wrap(func):
@functools.wraps(func)
def wrapped(cls, remaining_attrs):
result = list(func(cls, remaining_attrs))
if result:
# Sphinx/ReST doesn't allow "-----" just anywhere :(
yield u''
yield u'.. raw:: html'
yield u''
yield u' <hr>'
yield u''
if header:
yield header + u':'
yield u''
for row in result:
yield row
return wrapped
return wrap
|
653e6a710acdca1b6ac28a6ed5ffb3ac60c84bc0
| 25,034
|
def local_import(name):
""" Returns the module *name*.
Attributes:
name - the name of the module to be imported.
Exception:
TypeError - if *name* is not a string.
ImportError - if there is no module *name* in your namespace.
"""
if type(name) is not str:
raise TypeError("barbante.api.recommend: name must be a string.")
try:
mod = __import__(name)
except ImportError:
raise ImportError(
"barbante.api.recommend: there is no module name {0}".format(name))
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
|
99fecc3484d20bb26a3ce3b6003bff908bfcb3d4
| 25,035
|
import time
def timetst(nbsec=0):
""" Adds test report
:param nbsec: elapsed time before printing a message
:return: timetst decorator """
def decor(fct):
"""
:param fct: function to decorate with time test
:return: decorator """
def decfct():
""" :return: decorated function """
string = str()
tim = time.ctime()
string += "\nStarted {}".format(tim)
time_bef = time.time()
retval = fct()
time_aft = time.time()
time_exec = time_aft - time_bef
if time_exec >= nbsec:
string += "\nelapsed time: {} s for function {}".format(time_exec, fct)
string += "\naverage time by case: %.03f µs" % ((time_exec / retval) * 1000000)
string += "\n--- TST END ---\n"
return string
return decfct
return decor
|
c837f31e2a9ee7d89f573e8a2b11c3eb39163374
| 25,036
|
from typing import Optional
from typing import Any
from typing import Dict
def get(
*,
created_user_pool_id: Optional[str] = None,
user_pool_arn: Optional[str] = None,
**_: Any,
) -> Dict[str, Any]:
"""Retrieve the ID of the Cognito User Pool.
The User Pool can either be supplied via an ARN or by being generated.
If the user has supplied an ARN that utilize that, otherwise retrieve
the generated id. Used in multiple pre_hooks for Auth@Edge.
Args:
user_pool_arn: The ARN of the supplied User pool.
created_user_pool_id: The ID of the created Cognito User Pool.
"""
context_dict = {"id": ""}
# Favor a specific arn over a created one
if user_pool_arn:
context_dict["id"] = user_pool_arn.split("/")[-1:][0]
elif created_user_pool_id:
context_dict["id"] = created_user_pool_id
return context_dict
|
99388498208a824218ce8733f066576bd96260f5
| 25,037
|
from datetime import datetime
import random
def get_random_date():
"""Returns a date object between a time interval"""
start = datetime(2020, 1, 1)
end = datetime(2020, 10, 28)
random_date = start + (end - start) * random.random()
return random_date
|
18fa9acb3801b2f87915c5df99a74bca843dc09d
| 25,038
|
import os
import json
def get_version_data():
"""
get_version_data loads previously live data from disk for examples with
your sept application.
Plus, not like I'm going to hard code my credentials or anything
"""
with open(os.path.join(os.path.dirname(__file__), "usage_data.json"), "r") as fh:
version_data = json.loads(fh.read())
return version_data
|
b4fbb59246a2b74041f95adc22a33229039f7059
| 25,040
|
def convert(s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
### find the patterns behind this problem , Time O(n)
if not s or numRows == 1:
return s
ss = ''
for i in range(numRows):
for j in range(i, len(s), 2 * numRows - 2):
if i == 0 or i == numRows - 1:
ss += s[j]
pass
else:
sub_index = (j // (2 * numRows - 2) + 1) * (2 * numRows - 2) - j % (2 * numRows - 2)
ss += s[j]
if sub_index < len(s):
ss += s[sub_index]
pass
return ss
|
8b47eb0e43a61514e9a51635cb5446d897f8df69
| 25,041
|
import csv
def readFile(file):
"""
Read the columns from the input files with the chromosome
number in it. This is column 1 and 4 in the bed file.
It then return the right and left breakpoints.
"""
with open(file) as inf:
reader = csv.reader(inf, delimiter="\t")
chrom_right = list(zip(*reader))[0]
with open(file) as inf:
reader = csv.reader(inf, delimiter="\t")
chrom_left = list(zip(*reader))[3]
return chrom_right + chrom_left
|
afca0b83bf92c2fdbf1b2efd9ee866bc526de0ad
| 25,042
|
def cloudfront_referrer_policy(referrer_policy):
"""
Property: ReferrerPolicy.ReferrerPolicy
"""
valid_values = [
"no-referrer",
"no-referrer-when-downgrade",
"origin",
"origin-when-cross-origin",
"same-origin",
"strict-origin",
"strict-origin-when-cross-origin",
"unsafe-url",
]
if referrer_policy not in valid_values:
raise ValueError('ReferrerPolicy must be of: "%s"' % (", ".join(valid_values)))
return referrer_policy
|
a5a82c03f1c3f7bbc1c019bff86c8564ede0d0b7
| 25,044
|
def _break_long_text(text, maximum_length=75):
"""
Breaks into lines of 75 character maximum length that are terminated by a backslash.
"""
def next_line(remaining_text):
# Returns a line and the remaining text
if '\n' in remaining_text and remaining_text.index('\n') < maximum_length:
i = remaining_text.index('\n')
return remaining_text[:i+1], remaining_text[i+2:]
elif len(remaining_text) > maximum_length and ' ' in remaining_text:
i = remaining_text[:maximum_length].rfind(' ')
return remaining_text[:i+1] + '\\\n', remaining_text[i+2:]
else:
return remaining_text, ''
remaining_text = text
lines = []
while remaining_text:
line, remaining_text = next_line(remaining_text)
lines += [line]
return ''.join(lines)
|
f4cc668158ea366488f3f3747052a4793b530201
| 25,045
|
from typing import Union
from pathlib import Path
def is_relative_to(absolute_path: Union[str, Path], *other) -> bool:
"""Return True if the given another path is relative to absolute path.
Note:
Adapted from Python 3.9
"""
try:
Path(absolute_path).relative_to(*other)
return True
except ValueError:
return False
|
72076c54a024de3aa83d5355aaa1e04838f53f1e
| 25,046
|
import os
import random
def new_log_files(name, redirect_output):
"""Generate partially randomized filenames for log files.
Args:
name (str): descriptive string for this log file.
redirect_output (bool): True if files should be generated for logging
stdout and stderr and false if stdout and stderr should not be
redirected.
Returns:
If redirect_output is true, this will return a tuple of two filehandles.
The first is for redirecting stdout and the second is for redirecting
stderr. If redirect_output is false, this will return a tuple of two None
objects.
"""
if not redirect_output:
return None, None
logs_dir = "/tmp/raylogs"
if not os.path.exists(logs_dir):
os.makedirs(logs_dir)
log_id = random.randint(0, 100000)
log_stdout = "{}/{}-{:06d}.out".format(logs_dir, name, log_id)
log_stderr = "{}/{}-{:06d}.err".format(logs_dir, name, log_id)
log_stdout_file = open(log_stdout, "a")
log_stderr_file = open(log_stderr, "a")
return log_stdout_file, log_stderr_file
|
9519d39c201a64290e1ce341561c6753da2f69eb
| 25,047
|
import yaml
def get_config(config_file):
""" Read CircleCI config YAMLs as dictionaries """
with open(config_file) as fstream:
try:
return yaml.safe_load(fstream)
except yaml.YAMLError as err:
print(err)
|
13005c11aab352ff96ef422d724ef91ec96074cb
| 25,048
|
from typing import Any
def is_iterable(obj: Any) -> bool:
"""
Return whether an object is iterable or not.
:param obj: Any object for check
"""
try:
iter(obj)
except TypeError:
return False
return True
|
4053ede1d7ac825ec0e9723892df9e5fe638e8b6
| 25,049
|
import os
def generate_base_output_dir(model_type, model_config, exp_tag):
"""
standardise output directory
:param model_type:
:param model_config:
:param exp_tag:
:return: output directory
"""
if 'Mandate' in model_type:
type = 'mandates'
else:
type = 'wearing'
out_path = os.path.join(
"/mnt/sensitivity_analysis", f"1_non_reopenings_full", type, exp_tag
)
if not os.path.exists(out_path):
os.makedirs(out_path)
return out_path
|
6e0dcf556f4a23432b1e1c6287fe8f40a7ce00c1
| 25,050
|
def get_x_y(df):
"""
Split the dataframe into the features and the target
:param df: The data frame
:return: X, y - The features and the target respectively
"""
X = df.drop('isFraud', axis=1)
y = df.isFraud
return X, y
|
902aba32c2ed36b31ac2e59a804a0ad009fb32d2
| 25,051
|
def get_child_schema(self):
"""An optional function which returns the list of child keys that are associated
with the parent key `docs` defined in `self.schema`.
This API returns an array of JSON objects, with the possible fields shown in the example.
Hence the return type is list of lists, because this plugin returns
a list of objects, each with this possible set of keys.
Returns:
[['year', 'title', 'description', 'mediatype', 'publicdate', 'downloads', 'week',
'month', 'identifier', 'format', 'collection', 'creator', 'score']]
Example of one of the child objects in the array associated with `docs`:
{
year: 1998,
title: "AAPL CONTROL ROOM AERO ACOUSTIC PROPULSION LABORATORY AND CONTROL ROOM PERSONNEL",
description: "AAPL CONTROL ROOM AERO ACOUSTIC PROPULSION LABORATORY AND CONTROL ROOM PERSONNEL",
mediatype: "image",
publicdate: "2009-09-17T17:14:53Z",
downloads: 5,
week: 0,
month: 0,
identifier: "GRC-C-1998-853",
format: [
"JPEG",
"JPEG Thumb",
"Metadata"
],
collection: [
"nasa",
"glennresearchcentercollection"
],
creator: [
"NASA/Glenn Research Center"
],
score: 2.617863
}
"""
return [['year', 'title', 'description', 'mediatype', 'publicdate', 'downloads', 'week',
'month', 'identifier', 'format', 'collection', 'creator', 'score']]
|
4d91ff18ab8e3f6cec5610169dc6d52e7a647960
| 25,052
|
def requirements():
"""
Other modules required for kinship: base.
"""
return ['base']
|
89018df086070008e01ba768cdf6f5e7c3a294eb
| 25,053
|
from typing import Tuple
def calculate_broadcasted_elementwise_result_shape(
first: Tuple[int, ...],
second: Tuple[int, ...],
) -> Tuple[int, ...]:
"""Determine the return shape of a broadcasted elementwise operation."""
return tuple(max(a, b) for a, b in zip(first, second))
|
5d565b2b5f38c84ab1f1573f4200fc65d6ae8e6a
| 25,055
|
import threading
def gamethread(func):
"""Decorator for functions that are Timer game threads.
Thread removes self from registry of threads in module state."""
def new_func(*args, **kwargs):
state = args[1]
state['threads'].pop(threading.current_thread().ident, None)
func(*args, **kwargs)
new_func.__name__ = func.__name__
new_func.__doc__ = func.__doc__
new_func.__dict__.update(func.__dict__)
return new_func
|
88a9ef9812a91d0ab448bded4c557c9f20e52718
| 25,056
|
from typing import List
from typing import Dict
from typing import Any
from typing import Tuple
from typing import Optional
def prepare_outputs_for_categories(records: List[Dict[str, Any]]) -> \
Tuple[List[Dict[str, Optional[Any]]], List[Dict[str, Optional[Any]]]]:
"""
Prepares human readables and context output for 'bmc-remedy-category-details-get' command.
:param records: List containing records of categories from rest API.
:return: Tuple containing human-readable and context-ouputs.
"""
outputs = list()
hr_output = list()
for each_record in records:
temp = dict()
temp1 = dict()
temp["Id"] = temp1["Id"] = each_record.get("Id")
temp["Name"] = temp1["Name"] = each_record.get("Name")
temp["Children Count"] = temp1["ChildrenCount"] = each_record.get("BMCServiceDesk__children__c")
hr_output.append(temp)
outputs.append(temp1)
return hr_output, outputs
|
8215214d79f46666aec167ed2b99bf73663692eb
| 25,057
|
def findStreams(media, streamtype):
"""Find streams.
Args:
media (Show, Movie, Episode): A item where find streams
streamtype (str): Possible options [movie, show, episode] # is this correct?
Returns:
list: of streams
"""
streams = []
for mediaitem in media:
for part in mediaitem.parts:
for stream in part.streams:
if stream.TYPE == streamtype:
streams.append(stream)
return streams
|
4a959d24b9c5113b05e87aad6b5451d9d007969b
| 25,058
|
def tex(text, env=None):
"""Create html code for any object with a string representation
Parameters
----------
text : any
Object to be converted to HTML. If the object has a ``.get_html()``
method the result of this method is returned, otherwise ``str(text)``.
env : dict
Environment for FMTXT compilation.
"""
if hasattr(text, 'get_tex'):
if env is None:
env = {'math': False}
elif 'math' not in env:
env['math'] = False
return text.get_tex(env)
else:
return str(text)
|
34f3ed1365643ef89a1db0cdd71f28ea84ba9329
| 25,059
|
from copy import copy
def wrap_method(cls, method):
""" Helper function to help wrap _restler* methods when more
than on decorator is used on a model.
:param method: method to wrap
"""
method_name = method.__func__.__name__
method_param = method
if hasattr(cls, method_name):
orig_cls_method = getattr(cls, method_name)
@classmethod
def wrap(cls_):
setattr(cls, method_name, method_param)
method = getattr(cls, method_name)
aggregate = copy(orig_cls_method())
if isinstance(aggregate, list): # _restler_types()
aggregate = set(aggregate)
aggregate.update(method())
aggregate = list(aggregate)
elif isinstance(aggregate, dict): # _restler_property_map
aggregate.update(method())
elif isinstance(aggregate, str):
# Developer shouldn't really do this, but we'll try
# to do the correct thing and use the most recently defined name
aggregate = method() # _restler_serialization_name
return aggregate
setattr(cls, method_name, wrap)
else:
setattr(cls, method_name, method)
|
a888d1203ae3c04ce35db051749448eb490d8217
| 25,060
|
def get_raw_title(title):
"""get raw title"""
if title[-2:] == "_0":
return title[:-2]
return title
|
fe05f90a0ffa42dc18bf71be48f30405aef39dfd
| 25,061
|
def get_relation_field(model, field_name, reverse=False):
"""获取模型指定名称的关系字段
Params:
model 模型类
field_name 字段名
reverse bool 是否包含反向的关系字段
Returns:
field object 字段对象
"""
try:
field = model._meta.get_field(field_name)
if not field.is_relation:
return
if reverse:
return field
elif field.concrete:
return field
except Exception:
return
|
e00924180e4465223b436a8fccccb97da6ae2192
| 25,062
|
import math
def divide_into_subsets(list_of_element, subset_size):
"""
Given a list of elements, divide into subsets. e.g. divide_into_subsets([1,2,3,4,5], subset_size=2) == [[1, 2], [3, 4], [5]]
:param list_of_element:
:param subset_size:
:return:
"""
element_gen = (el for el in list_of_element)
return [[nextel for _, nextel in zip(range(subset_size), element_gen)] for _ in range(int(math.ceil(float(len(list_of_element))/subset_size)))]
|
7014135efd2866e42be78def18c578eaef0e9a4e
| 25,063
|
from typing import Optional
from typing import Dict
def env_vars_for_test(
sim: Optional[str], toplevel_lang: Optional[str], gpi_interface: Optional[str]
) -> Dict[str, str]:
"""Prepare the environment variables controlling the test run."""
e = {}
if sim is not None:
e["SIM"] = sim
if toplevel_lang is not None:
e["TOPLEVEL_LANG"] = toplevel_lang
assert not (toplevel_lang == "verilog" and gpi_interface != "vpi")
if toplevel_lang == "vhdl" and gpi_interface is not None:
e["VHDL_GPI_INTERFACE"] = gpi_interface
return e
|
daa2267edc779b21265fafe715e9be2b8c882c36
| 25,064
|
def prepare_target_name(i):
"""
Input: {
host_os_dict - host OS dict
target_os_dict - target OS dict
cus - custom meta
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
tool - tool name
}
"""
cus=i['cus']
hosd=i['host_os_dict']
tosd=i['target_os_dict']
hplat=hosd['ck_name']
tplat=tosd['ck_name']
tool=''
sdirs=hosd.get('dir_sep','')
plat=tplat
osd=tosd
if cus.get('soft_file_from_host_os','')=='yes':
plat=hplat
osd=hosd
tool=cus.get('soft_file_universal','')
if tool=='':
tool=cus.get('soft_file',{}).get(plat,'')
file_extensions=hosd.get('file_extensions',{})
# Check file extensions from OS (for example, to specialize .dylib vs .so for MacOS)
for k in file_extensions:
v=file_extensions[k]
tool=tool.replace('$#file_ext_'+k+'#$',v)
tool=tool.replace('$#sep#$', sdirs)
return {'return':0, 'tool':tool}
|
1132bcaac85b454324a531169777247e1af98e10
| 25,065
|
def parse_str(s):
"""Try to parse a string to int, then float, then bool, then leave alone"""
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
if s.lower() == 'true':
return True
if s.lower() == 'false':
return False
return s
|
251cd0bdbf47464a6a424e012ef22cbf0f38452f
| 25,066
|
def generate_lambda_key(domain):
"""Generate the S3 key name for the lambda's zip file.
Args:
domain (str): Use the domain as part of the key.
Returns:
(str)
"""
key = 'dynamodb_autoscale.' + domain + '.zip'
return key
|
d8dcd6897c2773a1ba31df72c2e8336badc42e68
| 25,067
|
def remove_brace(value):
"""Remove braces (which indicete capital leters in latex).
Args:
value (str): string which have ``{``, ``}``.
Returns:
str (``{``, ``}`` is removed.)
Examples:
>>> val = "The {CNN}-based ..."
>>> remove_brace(val)
"The CNN-based ..."
"""
value = str(value).replace("{", "").replace("}", "")
return value
|
b4144fde9890128572a1f88caa26f6515f13b924
| 25,070
|
import subprocess
import re
def getpdfsize(fname):
""" returns (size_x, size_y, number_of_pages)
"""
try:
cmd = ['pdfinfo', fname ]
out = subprocess.check_output(cmd)
except OSError:
raise Exception('pdfinfo not found')
out = out.decode() # python 3 returns bytes
m = re.search(r'Page size:\s+(\d+) x (\d+)', out)
if m and len(m.groups())==2:
sx, sy = int(m.group(1)), int(m.group(2))
else:
raise Exception('cannot read "pdfinfo" output')
m = re.search(r'Pages:\s+(\d+)', out)
if m and len(m.groups())==1:
np = int(m.group(1))
else:
raise Exception('cannot read "pdfinfo" output')
return sx, sy, np
|
cf62ab3fb582084f8e677313306c1c16c97a772e
| 25,071
|
def generate_list_articles(bib):
"""Description of generate_list_articles
From the bib file generates a ReadMe-styled table like:
| [Name of the article](Link to the .pdf) | Code's link if available |
"""
articles = ""
for entry in bib:
if "title" in entry:
if "link" in entry:
articles += "| [" + entry["title"] + "](" + entry["link"] + ") | "
else:
articles += "| " + entry["title"] + " | "
if "code" in entry:
if "No" in entry["code"]:
articles += "No "
else:
if "github" in entry["code"]:
articles += "[GitHub"
else:
articles += "[Website"
articles += "](" + entry["code"] + ") "
else:
articles += "No "
articles += "|\n"
return articles
|
362439030116376687f6d00bce5963a6a6587309
| 25,073
|
from typing import Union
import torch
def cam_init2orig(cam, scale: Union[float, torch.Tensor], start_pt: torch.Tensor, N=224):
"""
Args:
cam (bs, 3): (s, tx, ty)
scale (bs,): scale = resize_h / orig_h
start_pt (bs, 2): (lt_x, lt_y)
N (int): hmr_image_size (224) or IMG_SIZE
Returns:
cam_orig (bs, 3): (s, tx, ty), camera in original image coordinates.
"""
# This is camera in crop image coord.
cam_crop = torch.cat(
[N * cam[:, 0:1] * 0.5, cam[:, 1:] + (2. / cam[:, 0:1]) * 0.5],
dim=1
)
# This is camera in orig image coord
cam_orig = torch.cat(
[cam_crop[:, 0:1] / scale, cam_crop[:, 1:] + (start_pt - N) / cam_crop[:, 0:1]],
dim=1
)
return cam_orig
|
99edd5049b28cb09c479e9e1bef9c4c820bd2e12
| 25,074
|
def find_prefixed_labels(labels, prefix):
"""Util for filtering and cleaning labels that start with a given prefix.
Given a list of labels, find only the specific labels with the given prefix.
Args:
prefix: String expected to be prefix of relevant labels
labels: List of string labels
Return:
Filtered labels (i.e. all labels starting with prefix)
"""
changelog_labels = []
for l in labels:
l = l.strip()
if l.startswith(prefix) and len(l) > len(prefix):
changelog_labels.append(l)
return changelog_labels
|
49631b8cf257bad0e2c3ec1be4e2c48c5e49e963
| 25,075
|
def getAllNormalSamples():
"""Return tumor and normal samples"""
ignoreSamples = ('TARGET-30-PARKGJ-10A-02D',
'TARGET-50-PAKJGM-11A-01D',
'TARGET-10-PANYYV-10A-01D')
sample2chrom = []
with open('/mnt/isilon/diskin_lab/target_pe/target_meta/working/ntStatus') as f:
for line in f:
sample, status = line.strip('\n').split('\t')
if not sample in ignoreSamples and status == 'normal':
sample2chrom.append(sample)
return sample2chrom
|
d0760df7fcb67e90f82651c9bffc99e0d97de9e0
| 25,076
|
def rank(x):
"""
Return the rank of ``x``.
EXAMPLES:
We compute the rank of a matrix::
sage: M = MatrixSpace(QQ,3,3)
sage: A = M([1,2,3,4,5,6,7,8,9])
sage: rank(A)
2
We compute the rank of an elliptic curve::
sage: E = EllipticCurve([0,0,1,-1,0])
sage: rank(E)
1
"""
return x.rank()
|
632ad787722e6039ae587c0711e4ded77f0e9cfc
| 25,078
|
import os
def get_firmware():
"""Get user's system firmware.
Modules
-------
os: "Export all functions from posix"
Returns
-------
efi, firmware: "Strings containing system firmware type"
"""
if os.path.isdir('/sys/firmware/efi/efivars'):
firmware = 'uefi'
if '64' in open('/sys/firmware/efi/fw_platform_size').read():
efi = 'x64'
else:
efi = 'x86'
else:
efi = None
firmware = 'bios'
return efi, firmware
|
08f53c93c9dce0eb793e6129b1a633b06c7ee837
| 25,080
|
import re
def resolve_value(value):
"""
Convert "1k" to 1 000, "1m" to 1 000 000, etc.
"""
if value is None:
return None
tens = dict(k=10e3, m=10e6, b=10e9, t=10e12)
value = value.replace(',', '')
match = re.match(r'(-?\d+\.?\d*)([kmbt]?)$', value, re.I)
if not match:
return None
factor, exp = match.groups()
if not exp:
return float(factor)
return int(float(factor)*tens[exp.lower()])
|
ef3880c532f143b7bc3493d6cc942529329e040a
| 25,082
|
def pkcs7_pad(inp, block_size):
"""
Using the PKCS#7 padding scheme, pad <inp> to be a multiple of
<block_size> bytes. Ruby's AES encryption pads with this scheme, but
pycrypto doesn't support it.
Implementation copied from pyaspora:
https://github.com/mjnovice/pyaspora/blob/master/pyaspora/diaspora/protocol.py#L209
"""
val = block_size - len(inp) % block_size
if val == 0:
return inp + (bytes([block_size]) * block_size)
else:
return inp + (bytes([val]) * val)
|
5a5aae6f588e5e67dc30c85ab6a6afcdb9c728c0
| 25,083
|
def stround(x, force_dec=-99):
"""automatic str(round(x))"""
if force_dec != -99:
return str(round(x, force_dec))
#else
if x < 0.05: force_dec = 4
elif x < 0.5: force_dec = 3
elif x < 2: force_dec = 2
else: force_dec = 0
return str(round(x, force_dec))
|
770c33215e0847e6a85b9f690b8a708c99c6a769
| 25,085
|
import re
def data_preprocessing(data):
"""
:param data:
:return:
"""
comment = re.sub('[n\{\}\[\]\/?,.;:|\)*~`!^\-_+<>@\#$%&\\\=\(\'\"]', '', data).strip()
emoticon_pattern = re.compile("["
u"\U0001F600-\U0001F64F"
u"\U0001F300-\U0001F5FF"
u"\U0001F680-\U0001F6FF"
u"\U0001F1E0-\U0001F1FF"
"]+", flags=re.UNICODE)
comment = emoticon_pattern.sub(r'', comment)
return comment.strip()
|
1dbeb43661d658f20e87195226db2c4bf149df4f
| 25,086
|
import torch
def dr_transformation_cate(
y: torch.Tensor,
w: torch.Tensor,
p: torch.Tensor,
mu_0: torch.Tensor,
mu_1: torch.Tensor,
) -> torch.Tensor:
"""
Transforms data to efficient influence function/aipw pseudo-outcome for CATE estimation
Parameters
----------
y : array-like of shape (n_samples,) or (n_samples, )
The observed outcome variable
w: array-like of shape (n_samples,)
The observed treatment indicator
p: array-like of shape (n_samples,)
The treatment propensity, estimated or known. Can be None, then p=0.5 is assumed
mu_0: array-like of shape (n_samples,)
Estimated or known potential outcome mean of the control group
mu_1: array-like of shape (n_samples,)
Estimated or known potential outcome mean of the treatment group
Returns
-------
d_hat:
EIF transformation for CATE
"""
if p is None:
# assume equal
p = torch.full(y.shape, 0.5)
EPS = 1e-7
w_1 = w / (p + EPS)
w_0 = (1 - w) / (EPS + 1 - p)
return (w_1 - w_0) * y + ((1 - w_1) * mu_1 - (1 - w_0) * mu_0)
|
7bcff5f42f5aa664fd0250dd9faba49889621205
| 25,087
|
import os
def url_to_filename(url, directory=None):
"""Return the last component of the given C{url}.
@param url: The URL to get the filename from.
@param directory: Optionally a path to prepend to the returned filename.
@note: Any trailing slash in the C{url} will be removed
"""
filename = url.rstrip("/").split("/")[-1]
if directory is not None:
filename = os.path.join(directory, filename)
return filename
|
2e25b521a417b95c9b1a81497f1a478817122e6b
| 25,088
|
from typing import List
import sys
def get_all_names() -> List[str]:
""" get participants names from terminal input """
names: List[str] = []
print("\nEnter all participants one by one (press C-D to stop)")
while True:
try:
tmp = input(">> ")
names.append(tmp)
except EOFError:
print(names)
print("\nAre all participants here (y/n) ?", end=" : ")
ans = input("")
if ans != 'y' and ans != "yes":
print("Continue then\n")
continue
else:
return names
except :
print("Bye", file=sys.stderr)
sys.exit(1)
|
c63938451f3abfb83ec87714905a95963ef15be1
| 25,089
|
def build_category_filters():
""" Generate category_filters because easier."""
category_filters = {}
for name, condition in (("1v1", "= 1",), ("2v2", "= 2",), ("team", "> 1",)):
category_filters[
"All {}".format(name)
] = "AND game_type = 0 AND team_size {}".format(condition)
category_filters[
"Arabia {}".format(name)
] = "AND game_type = 0 AND team_size {} AND map_type = 9".format(condition)
category_filters[
"Arena {}".format(name)
] = "AND game_type = 0 AND team_size {} AND map_type = 29".format(condition)
category_filters[
"Others {}".format(name)
] = "AND game_type = 0 AND team_size {} AND map_type NOT IN (9,29)".format(
condition
)
return category_filters
|
d5ef2baadefd5e981a9ba1ca8a33a2ce8cecae92
| 25,090
|
import bisect
def sub_interval(start, end, array):
""" 寻找满足区间[start, end]的array值
Args:
start (int): 区间左侧
end (int): 区间右侧
array (list): 有序数组
>>> array = [0,1,3, 4, 5, 6, 8]
>>> rst = sub_interval(2, 5, array)
>>> print array[rst[0]: rst[1]]
"""
i = bisect.bisect_left(array, start)
if i != len(array):
t_start = i
else:
raise ValueError
i = bisect.bisect_right(array, end)
if i:
t_end = i
else:
raise ValueError
return (t_start, t_end)
|
41cfd1f0e94163ea11859581c565d12e001c1642
| 25,091
|
def create_adjusted_coefficients(targets):
"""
Create a dictionary of "adjusted-coefficient" elements (as XML text) for the given targets,
where the key is the year and the value is the text for all elements starting at that year.
"""
template = '<adjusted-coefficient year="{year}">{coefficient}</adjusted-coefficient>\n'
# reverse a copy of the targets
targets = sorted(targets, key=lambda tup: tup[0], reverse=True) # sort by year, descending
xml_dict = {}
xml = ''
for year, coefficient in targets:
xml = template.format(year=year, coefficient=coefficient) + xml
xml_dict[year] = xml
return xml_dict
|
89f6ba9d6a1a1977ac4a175b28f8db652ba9ae37
| 25,093
|
def link_to_url(link):
"""
>>> from scrapy.link import Link
>>> link_to_url(Link("http://example.com/?foo=bar"))
'http://example.com/?foo=bar'
>>> link_to_url(Link("http://example.com/?foo=bar", fragment="id1"))
'http://example.com/?foo=bar#id1'
>>> link_to_url(Link("http://example.com/?foo=bar", fragment="!start"))
'http://example.com/?foo=bar#!start'
"""
if link.fragment and link.fragment != '#':
return "#".join([link.url, link.fragment])
return link.url
|
6a6bf4a1f33748175ac7a95899c1fcffcf9751b0
| 25,095
|
import subprocess
def turn_off(logger) -> bool:
"""Turn off mumble client via command line
:param logger: For logging purpose
:return: True if mumble client successfully turned off, else False
"""
kill_intercom = "tmux kill-sess -t intercom"
logger.info("Turning off rpi_in Mumble CLI client...")
try:
mum_proc = subprocess.run(kill_intercom, shell=True)
except Exception:
logger.exception("ERROR: unable to turn off Mumble client")
mum_proc = None
if mum_proc is not None:
logger.info("Mumble client OFF.")
return mum_proc is not None
|
b36a5d1bef8a2737e7f3baa862c651240e68af45
| 25,097
|
import torch
def to_onehot(values, max):
"""
Args:
values: dim batch_size
max:
Returns:
"""
return torch.zeros(values.size(0), max).type_as(values).scatter_(1, values, 1).to(torch.float)
|
49f177d265a4dc512da95e834c3c296dc5cc93e0
| 25,098
|
def results_dd_max_percentage(results_list, collection_dictionary, max_percentage):
"""Find the max_percentage maximums in the Density Distribution results.
This function returns a list of max points in the density distribution based on a percentage.
For example, if the percentage is 60% then only the values higher than the 60% of the highest
maximum will be returned. This technique is used to avoid returning local maximums that are not
significant related to the global maximum.
:param results_list: List of results by word for Density Distribution algorithm.
:type query: list.
:param collection_dictionary: passage dictionary generated during parsing.
:type collection_dictionary: dictionary.
:param max_percentage: percentage over the global maximum to return values.
:type max_percentage: int.
:returns: list -- A list of maximum points IDs higher than the max_percentage of the global maximum.
"""
for i in results_list:
collection_dictionary[i[0]][1] = i[1]
results = []
max_scores = []
max_ids = []
for element in range(0, len(collection_dictionary)):
current = collection_dictionary[element][1]
if element == 0:
past = 0
following = collection_dictionary[element + 1][1]
elif element == (len(collection_dictionary) - 1):
following = 0
past = collection_dictionary[element - 1][1]
else:
past = collection_dictionary[element - 1][1]
following = collection_dictionary[element + 1][1]
counter = element + 1
while current == following:
counter = counter + 1
if counter >= len(collection_dictionary):
break
else:
following = collection_dictionary[counter][1]
if (past < current) and (following < current or following == current):
max_scores.append(collection_dictionary[element][1])
max_ids.append(element)
if len(max_ids) != 0:
max_score_percentage = max(max_scores) * max_percentage * 0.01
for index, id in enumerate(max_ids):
if max_scores[index] >= max_score_percentage:
results.append(id)
return results
else:
return []
|
969df7dce4229fba36c20a0e8332665fcff37d07
| 25,100
|
def averager():
"""Returns a function that returns the average of the values \
that have been passed to it as arguments in all previous calls.
"""
avg = 0
called = 0
def avg_counter(num=None):
nonlocal avg,called
if num:
avg = avg * called + num
called += 1
avg /= called
return avg
return avg_counter
|
5f5f051db1615d2807d651317a5ee34d471d9274
| 25,101
|
def getDMDir(strPathScene):
""" Return directory containing cloud masks
mask bsqs in this dir are named DM_yyyy.mm.dd_X.bsq
"""
return strPathScene + r'envi_aux\Images'
|
f6b5c666d3dbcc15891cf7a131f687f8f56c0897
| 25,103
|
def get_sub_frame(df_in, cond_dict):
"""Returns the sub frame for which the conditions cond_dict hold True. """
curr_keys = cond_dict.keys()
df_curr = df_in
for key in curr_keys:
df_curr = df_curr[df_curr[key]==cond_dict[key]]
return df_curr
|
1a8af8ac9f5e19d6f177f2848010ad3144bce386
| 25,104
|
def zeroPrepender(source, length):
"""
Append extra zeros to a source number based on the specified length
"""
if (not source and source != 0) or not length:
return None
result = str(source)
if len(result) >= length:
return result
for i in range(length - len(result)):
result = '0' + result
return result
|
ce9fc94e4b745f5782af818dcd7c66789857a342
| 25,105
|
def check_radio_bulk_go(radiourls, radioversion):
"""
Replace radio version and URLs, and keep going.
:param radiourls: Radio URLs to check.
:type radiourls: list(str)
:param radioversion: Radio version.
:type radioversion: str
"""
rad2 = input("RADIO VERSION: ")
radiourls = [url.replace(radioversion, rad2) for url in radiourls]
radioversion = rad2
return radiourls, radioversion
|
27b9d9c932908b918d2b06cf5d191dc66f22c057
| 25,106
|
def bounds_elementwise(lst):
"""Given a non-empty list, returns (mins, maxes) each of which is the same
length as the list items.
>>> bounds_elementwise([[0,6,0], [5,0,7]])
([0,0,0], [5,6,7])
"""
indices = list(range(len(lst[0])))
mins = [min(el[i] for el in lst) for i in indices]
maxes = [max(el[i] for el in lst) for i in indices]
return (mins, maxes)
|
5fa4fbe75db310d971005c88fc6d04058d3cd998
| 25,108
|
import inspect
def _pred(aclass):
"""
:param aclass
:return: boolean
"""
isaclass = inspect.isclass(aclass)
return isaclass and aclass.__module__ == _pred.__module__
|
98693f3706d86d1f801857848632f7f6f439dc04
| 25,109
|
import os
from pathlib import Path
def get_mpivars_path():
"""Get path to the mpivars script"""
env = os.environ.copy()
if 'I_MPI_ROOT' not in env:
raise EnvironmentError('I_MPI_ROOT not found in the system environment')
mpi_roots = [Path(mpi_root) for mpi_root in env['I_MPI_ROOT'].split(os.pathsep)]
mpivars_paths = [mpi_root / 'intel64' / 'bin' / 'mpivars.bat' for mpi_root in mpi_roots]
existing_mpivars = [mpivars_path for mpivars_path in mpivars_paths if mpivars_path.exists()]
if not existing_mpivars:
raise EnvironmentError(f'Could not found neither {", ".join(str(p) for p in mpivars_paths)}')
first_mpivars_path, *_ = existing_mpivars
return first_mpivars_path
|
707a7ab2284f2226f9644b27d4885717438f9228
| 25,110
|
def sort_list(list):
"""Sort the values of a nested list"""
sorted_list = []
for row in range(len(list)):
new_row = sorted(list[row])
sorted_list.append(new_row)
return sorted_list
|
a6301b2aa21d2355c3336ebb86aabff87262d98d
| 25,111
|
def get_remome_centre_sequence_genes( r ):
"""Returns a set of ensembl ids"""
return set( s.centre_sequence.gene_id for s in r.get_aligned_sequences() )
|
848f52ce641ea48467f862c850b7974268dd91e6
| 25,113
|
from typing import Counter
def get_probs(occurrences):
"""
Computes conditional probabilities based on frequency of co-occurrences
Parameters
----------
occurrences: occurences[x][y] number of times with (X=x and Y=y)
Returns
-------
probs : probs[x][y] = Pr(Y=y | X=x)
reverse_probs : reverse_probs[y][x] = Pr(X=x | Y=y)
"""
probs = {}
reverse_probs = {}
y_occ = Counter()
for x, ys in occurrences.items():
total = sum(ys.values())
probs[x] = {}
for y, occ in ys.items():
probs[x][y] = occ / total
y_occ[y] += occ
for x, ys in occurrences.items():
for y, occ in ys.items():
reverse_probs.setdefault(y, {})[x] = occ / y_occ[y]
return probs, reverse_probs
|
2e4dc69646a1800496bd5366bde1be78c3d18061
| 25,115
|
import requests
def check_corp_network():
"""
check_corp_network()
Check to see if machine is on the corp net
"""
# NOTE: update interal.com to an internal website/ip
return requests.get("http://internal.com").status_code == requests.codes.ok
|
2f74672a157774506823f0fc97800445b2db57dd
| 25,117
|
import os
def getDirsMkvs(sourceFile):
"""
Returns a list of directories, within the current directory,
that contain 'sourceFile'.
"""
lsdir = os.listdir('.')
dirs = []
for x in lsdir:
if os.path.isdir(x):
mkvpath = os.path.join(x, sourceFile)
if os.path.isfile(mkvpath):
dirs.append(x)
dirs.sort()
return dirs
|
2eee2df04045a590e4c0da62921909a533687fb1
| 25,120
|
def remove_hex(text):
"""
Example:
"\xe3\x80\x90Hello \xe3\x80\x91 World!"
"""
res = []
i = 0
while i < len(text):
if text[i] == "\\" and i+1 < len(text) and text[i+1] == "x":
i += 3
res.append(" ")
else:
res.append(text[i])
i += 1
# text = text.encode('utf-8')
# text = text.encode('ascii', 'ignore')
# text = text.encode('ascii', errors='ignore')
# text = unicode(text)
# text = re.sub(r'[^\x00-\x7f]', r'', text)
# filter(lambda x: x in printable, text)
return "".join(res)
|
9c4c463100b753862fc4a52d716b23e4365dfed3
| 25,121
|
import os
def get_all_folders_in_directory(directory: str):
"""get all folders inside a directory
Args:
directory (str): the directory path
Returns:
list: all folders inside directory
"""
# print()
# print()
# print("[def] get_all_folders_in_directory(")
# print(" directory : ")
# print(" " + directory )
# print(" )")
# print("{")
all_files_and_folders = os.listdir(directory)
absolute_path_to_all_files_and_folders = []
for item in all_files_and_folders:
# print("item: " + item)
absolute_path = directory + "/" + item
absolute_path_to_all_files_and_folders.append(absolute_path)
...
# print()
# pretty_print_array(all_files_and_folders,"all_files_and_folders")
# print()
only_folders = []
# print("[for] item in all_files_and_folders:")
for item in absolute_path_to_all_files_and_folders:
# print("item: " + item)
# print()
# print("[if] os.path.isdir(item): " + str(os.path.isdir(item)))
# print()
if os.path.isdir(item):
# print()
# print("!!")
# print("!! adding " + item + " to only_folders")
# print("!!")
# print()
only_folders.append(item)
# pretty_print_array(only_folders,"only_folders")
# print()
...
# print(only_folders)
# print()
# pretty_print_array(only_folders,"only_folders")
# print()
# print("}")
# print()
# print()
return only_folders
|
4b37dece142543244fd18c28778b533ffd3c43ed
| 25,122
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.