content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
from unittest.mock import call
def has_kerberos_ticket():
"""Checks to see if the user has a valid Kerberos ticket"""
#stream = os.popen('klist -s')
#output = getoutput('klist -4')
#resp = call(["klist", "-s"])
return True if call(["klist", "-s"]) == 0 else False
|
bd52d47967b7088f446dff73f3c748348f268834
| 25,123
|
def get_countN(x,n):
"""Count the number of nucleotide n in the string."""
return x.upper().count(n.upper())
|
7e5577dcf2a5666f77a915dd943cd5f59e2bd260
| 25,124
|
import uuid
import requests
def BingTranslate(api_key, filtered_sent, language_from, language_to):
"""Bing Microsoft translator
If you encounter any issues with the base_url or path, make sure
that you are using the latest endpoint:
https://docs.microsoft.com/azure/cognitive-services/translator/reference/v3-0-translate
Arguments:
api_key = Bing Microsoft Translate API key
filtered_sent = dictionary of original sentence to list of filtered sentences
language_from = Source language code
language_to = Target language code
returns translation dictionary from source sentence to target sentence
"""
base_url = 'https://api.cognitive.microsofttranslator.com'
path = '/translate?api-version=3.0'
params = '&language='+ language_from +'&to=' + language_to
constructed_url = base_url + path + params
headers = {
'Ocp-Apim-Subscription-Key': api_key,
'Content-type': 'application/json',
'X-ClientTraceId': str(uuid.uuid4())
}
text = []
for or_sent, filtered_sents in enumerate(filtered_sent):
text.append(or_sent)
text.extend(filtered_sents)
body = [{'text': x} for x in text]
# You can pass more than one object in body.
request = requests.post(constructed_url, headers=headers, json=body)
response = request.json()
translation_dic = {}
for i in range(len(text)):
translation_dic[text[i]] = response[i]["translations"][0]["text"].replace(''',"'").replace('"',"'")
return translation_dic
|
22585db5ccff0b27037a23984440a4eb868343c9
| 25,125
|
def default_get_id():
"""Default get_id"""
return None
|
4a8b047cd5396286f344f66a5c0c02149b45facd
| 25,126
|
def growth_func(z, Om, Ol, Ok=0):
"""
The growth function.
.. math::
P(k,z) = 2*\\pi^{2} * norm_z0 * pfunc * growth_func(z)^{2}
"""
D0 = 2.5*Om / (Om**(4./7) - Ol + (1 + 0.5*Om)*(1 + Ol/70.))
zp1 = 1 + z
zp1_2 = zp1**2
zp1_3 = zp1**3
temp = Ol + Ok*zp1_2 + Om*zp1_3
omegaz = Om * zp1_3 / temp
omegalz = Ol / temp
temp = omegaz**(4./7) - omegalz + (1 + 0.5*omegaz)*(1 + omegalz/70.)
Dz = 1./zp1 * 2.5 * omegaz / temp
return Dz / D0
|
35d1c22cbdba53306a770b381c52b4b45a0da203
| 25,127
|
import hashlib
def fileSHA ( filepath ) :
""" Compute SHA (Secure Hash Algorythm) of a file.
Input : filepath : full path and name of file (eg. 'c:\windows\emm386.exe')
Output : string : contains the hexadecimal representation of the SHA of the file.
returns '0' if file could not be read (file not found, no read rights...)
"""
try:
file = open(filepath,'rb')
digest = hashlib.sha256()
data = file.read(65536)
while len(data) != 0:
digest.update(data)
data = file.read(65536)
file.close()
except:
return '0'
else:
return digest.hexdigest()
|
98bddf8ef32c769b77dde704838c188e2b02ad49
| 25,128
|
import pickle
def load_pickle(path):
"""
Load a WordGraph object from a pickle file.
:param path: path to pickled WordGraph object
:return:
"""
with open(path, 'rb') as input:
# G = wg.WordGraph()
return pickle.load(input)
|
3108097ee85f9947606c6b74bfe0d5ba12cea517
| 25,129
|
import yaml
def yaml_config_file_provider(handle, cmd_name): # pylint: disable=unused-argument
"""Read yaml config file from file handle."""
return yaml.safe_load(handle)
|
e95295a0413290957d7b319b73876209ba11b5c6
| 25,130
|
def Command(*_args, **_kw):
"""Fake Command"""
return ["fake"]
|
a3d435534a045fe1b08eaffc7327065492b07026
| 25,132
|
def from_internal_instsp(instsp):
"""Convert instantiation pair consisting of assignments to internal
variables to assignments on normal variables.
"""
tyinst, inst = instsp
tyinst2 = {nm[1:]: T for nm, T in tyinst.items()}
inst2 = {nm[1:]: t for nm, t in inst.items()}
return tyinst2, inst2
|
50080464fbe3f4d8e5205e17394bdc5578ae6c8f
| 25,133
|
def argmin(l) -> int:
"""
From a previous AoC.
>>> argmin([9,9,9,1,9])
3
"""
mini = 0
for i, e in enumerate(l):
mini = i if e < l[mini] else mini
return mini
|
4b8228dd94b57fc03a01865f2e67a5f17e997c5a
| 25,134
|
from typing import List
def relationship_headers() -> List[str]:
"""
Produce headers for a relationship file.
"""
return ["from", "to", "relationship"]
|
7fdaa7d14f0fd7b8a2a01fd808d69aac6d41d9ca
| 25,135
|
def common_inventory_layout():
"""Get common inventory layour for testing FreeIPA project."""
return {
"all": {
"children": {
"ipa": {"children": {"ipaserver": {}, "ipaclient": {}}},
"linux": {"children": {"ipa": {}, "sssd": {}}},
"windows": {"children": {"ad": {}, "client": {}}},
}
}
}
|
a4ce0cfe1a369ccceeb9eebe46cc671ccf788600
| 25,136
|
import os
def get_database_version(path_to_db):
"""
Returns the version number of the database
"""
DATABASE_DIR = os.path.join(path_to_db, "database")
if not os.path.exists(DATABASE_DIR):
return "Missing"
with open(os.path.join(path_to_db, "database", "version.txt")) as f:
return str(f.read().split()[0])
|
64b8d2f15c9521129819de741632593fe3a47e63
| 25,137
|
def signe(a):
""" return 0 if a<0 , 1 else"""
if a < 0:
return 0
else:
return 1
|
71d0891f5dfb1d6d4ea3b8a9ad802e976de6e742
| 25,140
|
import os
import requests
import logging
import time
def flushTileCache(layer_id):
"""
This function will delete the layer cache built for a GEE tiler layer.
"""
apiUrl = 'http://api.resourcewatch.org/v1/layer/{}/expire-cache'.format(layer_id)
headers = {
'Content-Type': 'application/json',
'Authorization': os.getenv('apiToken')
}
try_num=1
tries=4
while try_num<tries:
try:
r = requests.delete(url = apiUrl, headers = headers, timeout=1000)
if r.ok or r.status_code==504:
logging.info('[Cache tiles deleted] for {}: status code {}'.format(layer_id, r.status_code))
return r.status_code
else:
if try_num < (tries-1):
logging.info('Cache failed to flush: status code {}'.format(r.status_code))
time.sleep(60)
logging.info('Trying again.')
else:
logging.error('Cache failed to flush: status code {}'.format(r.status_code))
logging.error('Aborting.')
try_num += 1
except Exception as e:
logging.error('Failed: {}'.format(e))
|
491e131681e936b8d8f4364133ff8af0c87367c6
| 25,141
|
import aiohttp
async def list_packages():
"""
Get a list of package names
"""
# Don't provide endpoint with credentials!
async with aiohttp.ClientSession() as session:
async with session.get(
"https://spack.github.io/packages/data/packages.json"
) as response:
response = await response.json()
return [x.lower() for x in response]
|
bf7cb1df98ae7d1c58023f52611585190d9e3492
| 25,142
|
def _bool_value(element):
"""
Given an xml element, returns the tag text converted to a bool.
:param element: The element to fetch the value from.
:return: A boolean.
"""
return (element.text.lower() == "true")
|
d5eb1c94ec6a09b6a81508124f36bacf8c253fb8
| 25,143
|
def math_round(x):
"""Round to integer
Round arithmetically to nearest integer, with ties going away from
zero.
"""
if abs(x) < 1:
if x >= 0.5:
x = 1
else:
x = 0
else:
if x % int(x) >= 0.5:
x = int(x) + 1
else:
x = int(x)
return x
|
e2c1ff6893bba061cb68a05224e8ae1a30926151
| 25,144
|
def get_hr_val(choices, db_val):
"""
Get the human readable value for the DB value from a choice tuple.
Args:
choices (tuple): The choice tuple given in the model.
db_val: The respective DB value.
Returns:
The matching human readable value.
"""
for pair in choices:
if pair[0] == db_val:
return pair[1]
# Value not found.
return None
|
26791ccc16e6b1e9399bc6d10b4292c7c7780ebd
| 25,145
|
import os
def normalize_path_sep(path: str) -> str:
"""Normalize the separators of a path
Parameters
----------
path: str
Path to normalize
Returns
-------
path normalized
"""
p1 = path.replace('/', os.sep).replace('\\\\', os.sep)
return p1
|
b0600cbf0138eeb96781bfa22c0c9a5e99ec94d6
| 25,146
|
def resolve_templating_engine(args):
""" Figures out what templating engine should be used to render the stack
"""
# Figure out what templating engine to use.
# Only use -t option when stack comes from stdin
if args.stack.name == "<stdin>":
return args.templating_engine
elif ".mako" in args.stack.name[-5:]:
return "mako"
elif ".jinja" in args.stack.name[-6:]:
return "jinja"
elif ".yaml" in args.stack.name[-5:]:
return "yaml"
raise NotImplementedError("Templating engine not supported. Must be set "
"to 'mako', 'jinja', or '' in the command line "
"or by using the equivalent file extension")
|
aef956cd3a5a9cca8451f069a986407af631694e
| 25,147
|
def parse_value(value, base_value=0):
"""Parse a numeric value spec which is one of:
NNN integer
NN.MMM float
NN% proportional to base_value
"""
if not isinstance(value, str):
return value
if value[-1] == '%':
value = base_value * float(value[:-1]) / 100
elif '.' in value:
return float(value)
return int(value)
|
186ef21e453af617449294bc44d97765b28e6676
| 25,148
|
from pathlib import Path
import os
def _report_exec_fail(
env,
file_name: str,
traceback: str,
show_traceback: bool,
template: str,
):
"""Save the traceback to a log file, and create log message."""
reports_dir = Path(env.app.outdir).joinpath("reports")
reports_dir.mkdir(exist_ok=True)
full_path = reports_dir.joinpath(os.path.splitext(file_name)[0] + ".log")
full_path.write_text(traceback, encoding="utf8")
message = template.format(full_path)
if show_traceback:
message += "\n" + traceback
return str(full_path), message
|
356f50b1a1f3ee2b073c3b58d32c3b15c4c072b5
| 25,150
|
def extractData(spark):
"""Load data from txt files
:return: Spark DataFrame
"""
dfRaw = spark.read.text("raw_logs/*/*.txt")
return dfRaw
|
acc8b4528763a2abdff04f6e3cfe8f99daf6b06d
| 25,151
|
def _html_tidy_cell_item(item, key):
"""
Returns HTML-tidied item for putting in table cell.
:param item: item to be tidied
:param key: dictionary key (for reference)
:return: tidied HTML item or string
"""
if isinstance(item, dict):
resp = "<br/>\n".join(["{}: {}".format(key, value) for key, value in item.items()])
resp += "<br/>{}: SUCCESS!".format(int(key) + 1)
return resp
return item
|
748ad64ca025a76472d2146cd9e369f525470bee
| 25,152
|
def make_damage_list(find_list, damage):
"""make damage list
from result, make damage date to list
Args
find_list (list): score data, as raw data
Returns
ret (boolean): damage found or not found
"""
ret = False
temp_list = []
list_num = len(find_list)
# from result, delete same position
for i in range(list_num):
if find_list[i][0] != 0:
if not temp_list:
temp_list = find_list[i]
ret = True
if find_list[i][0] > temp_list[0] + 5:
# different position
damage.append(str(temp_list[1]))
temp_list = find_list[i]
else:
# difference 5 pixels to classify as the same position
if find_list[i][2] > temp_list[2]:
# better match damage found, update value
temp_list = find_list[i]
if ret is True:
damage.append(str(temp_list[1]))
return ret
|
05569b52e031d014ce7c69d2687df6ac52b68f35
| 25,154
|
def ilog(n, base):
"""
Find the integer log of n with respect to the base.
>>> import math
>>> for base in range(2, 16 + 1):
... for n in range(1, 1000):
... assert ilog(n, base) == int(math.log(n, base) + 1e-10), '%s %s' % (n, base)
"""
count = 0
while n >= base:
count += 1
n //= base
return count
|
5c6ccfdbbf89ba544f6fa61ab830881d4a663202
| 25,156
|
def unmerge_m_e(pixel):
""" Unmerges the mantissa and exponent channel to perform operations on the ciphered image"""
val = (pixel // 1000, pixel % 1000)
if val[1] > 100:
return (val[0], -1 * (val[1] % 100))
else:
return val
|
e5f51e096afb1b32547fe37a44e6de17dba7c31b
| 25,158
|
def get_entrypoint(request_path):
""" Get the entrypoint url from a request path, splitting off sub-indexes and query strings """
entrypoint = request_path.replace('/index', '').split('?')[0]
if entrypoint == '':
entrypoint = '/index'
return entrypoint
|
52fec0fd6933e26bc38e26a52b3124d1a5914258
| 25,160
|
def normalizeName(netName):
"""
Remove the numerals from testnet.
Args:
netName (string): The raw network name.
Returns:
string: The network name with numerals stripped.
"""
return "testnet" if "testnet" in netName else netName
|
3387835882636dd4da6ef54b7b43e873f1a047d5
| 25,161
|
from typing import Dict
from pathlib import Path
def create_paths(run_id: str, run_dir: str = "runs/") -> Dict[str, Path]:
"""
Create the necessary directories and sub-directories conditioned on the `run_id` and run directory.
:param run_id: Unique Run Identifier.
:param run_dir: Path to run directory to save model checkpoints and run metrics.
"""
paths = {
# Top-Level Directory for a Given Run
"runs": Path(run_dir, run_id)
}
# Programatically Create Paths for each Directory
for p in paths:
paths[p].mkdir(parents=True, exist_ok=True)
return paths
|
2448d2621647b084161312bf572a2294705ea713
| 25,162
|
def read_config(config_file):
"""
Load configuration parameters from the supplied file path. The file should
contain one parameter on each line with the format `key=value`. Lines
starting with a # are ignored
Parameters
----------
config_file: dict
A dictionary with the necessary parameters to run pylcmodel
:return:
"""
config_dict = {
"port": "22",
"persist_remote_files": False
}
with open(config_file) as fin:
for config_line in fin:
config_line = config_line.strip()
# check for commented out lines
if config_line.startswith("#") or len(config_line) == 0:
continue
key, value = config_line.split("=")
config_dict[key.rstrip()] = value.lstrip()
return config_dict
|
316e056971e41a9f470d6fc2bb7af2f635257914
| 25,163
|
import string
def format_channel_name(author, guild, exclude_channel=None):
"""Sanitises a username for use with text channel names"""
name = author.name.lower()
name = new_name = (
"".join(l for l in name if l not in string.punctuation and l.isprintable()) or "null"
) + f"-{author.discriminator}"
counter = 1
existed = set(c.name for c in guild.text_channels if c != exclude_channel)
while new_name in existed:
new_name = f"{name}_{counter}" # multiple channels with same name
counter += 1
return new_name
|
ffe29c321e3c747dbc6a255042e6d093f1e595b2
| 25,164
|
def getDimensions(self):
"""Gets the number of rows and columns of the map"""
return (self.__numrows__, self.__numcols__)
|
fa56d7ec97ba237fb41cc70bbbb7a2348a621f04
| 25,165
|
import re
def convert_fa_spaces(input_value: str) -> str:
"""
Convert space between Persian MI and De-Yii to zero-width non-joiner (halfspace) char
:param input_value: String contains persian chars
:return: New string with converted space to half space char
"""
# u200C is the code for unicode zwnj character https://en.wikipedia.org/wiki/Zero-width_non-joiner
repl = '\\2\u200C\\4'
# replace space between persian MI.
mi_pattern = r'((\s\u0645\u06CC)+([\s])+([\u0600-\u06EF]{1,}){1,})'
result = re.sub(mi_pattern, repl, input_value, 0)
# replace space between persian De-Yii.
de_yii_pattern = r'(([\u0600-\u06EF]{1,})+([\s])+(ای|ایی|اند|ایم|اید|ام){1})'
result = re.sub(de_yii_pattern, repl, result)
return result
|
f295402cdb086b62c12abbf935bb12b24c223ca0
| 25,167
|
from multiprocessing import RLock as rlockp
from threading import RLock as rlockt
def create_rlock(process=False):
"""Creates a reentrant lock object."""
if process:
return rlockp()
else:
return rlockt()
|
eb4cbdd72c5f649e3907cda0054c96913b722381
| 25,168
|
def to_hex_string(string: str) -> str:
"""Converts UTF-8 string into its hex representation
:param string: str
The string to convert to hex
:return:
Hex representation of the given string
"""
return string.encode('utf-8').hex()
|
62b9b71af31bccdde136aa6d2dabbb2ee3df2ea7
| 25,169
|
def is_not_valid_url(url: str) -> bool:
"""Returns True if there's no http in the url
Args:
url (str): the url to be used in the qrcode
Returns:
bool: True if there's no url
"""
if('http' not in url):
print(f"[Warn] Couldn't generate qrcode for page {url}")
return True
return False
|
63729925f59a4c2cf3e67d0b37fe4bcd64d13cee
| 25,171
|
import os
def get_config_dir() -> str:
"""
Gets the directory in which to store app configuration. Based on
code from https://stackoverflow.com/a/3250952.
On Windows, %APPDATA%\\
On Linux, $XDG_CONFIG_HOME/
If the relevant environment variable for the platform is not set,
defaults to ~/.config/
:return: The directory string.
"""
# Get the system app configuration standard location
if 'APPDATA' in os.environ:
return os.environ['APPDATA']
elif 'XDG_CONFIG_HOME' in os.environ:
return os.environ['XDG_CONFIG_HOME']
else:
return os.path.join(os.environ['HOME'], '.config')
|
9966305f4a857f854dd39f516f6cc1f859fcbac7
| 25,172
|
import random
def sample(p):
"""Given an array of probabilities, which sum to 1.0, randomly choose a 'bin',
e.g. if p = [0.25, 0.75], sample returns 1 75% of the time, and 0 25%;
NOTE: p in this program represents a row in the pssm, so its length is 4"""
r = random.random()
i = 0
while r > p[i]:
r -= p[i]
i += 1
return i
|
886da94e2c9e35bd07ceba606de92d2126197b99
| 25,173
|
def sort_tag(tag):
"""Sort the list by tag."""
list_length = len(tag)
# Length 8 is list -l, 7 is df, 5 is list
if list_length == 9:
_tag = tag[4]
elif list_length == 7:
_tag = tag[6]
elif list_length == 6:
_tag = tag[3]
else:
_tag = tag[1]
_sort = _tag.rsplit('_', 1)
# We want to sort tags that have been created with count > 1. But not
# foo_bar
if len(_sort) > 1 and _sort[1].isdigit():
return _sort[0], int(_sort[1])
else:
return _tag, 0
|
441721e0b2eb8832e7edf848934184457482a330
| 25,174
|
from typing import Tuple
from typing import Any
import random
def pick_card_member(config: dict, author: str, team: str, card_assignments: dict) -> Tuple[Any, Any]:
"""Return a member to assign to the created issue.
In practice, it returns one trello user which is not the PR author, for the given team.
For it to work, you need a `trello_users_$team` table in your ddev configuration,
with keys being github users and values being their corresponding trello IDs (not names).
For example::
[trello_users_integrations]
john = "xxxxxxxxxxxxxxxxxxxxx"
alice = "yyyyyyyyyyyyyyyyyyyy"
"""
users = config.get(f'trello_users_{team}')
if not users:
return None, None
if team not in card_assignments:
# initialize map team -> user -> QA cards assigned
team_members = list(users)
random.shuffle(team_members)
card_assignments[team] = dict.fromkeys(team_members, 0)
member = min([member for member in card_assignments[team] if member != author], key=card_assignments[team].get)
card_assignments[team][member] += 1
return member, users[member]
|
43196abe6c707460c8e52f7b5feeea8959b43b0d
| 25,176
|
import random
def cartesian_choice(*iterables):
"""
A list with random choices from each iterable of iterables
is being created in respective order.
The result list can be seen as an element of the
Cartesian product of the iterables
"""
res = []
for population in iterables:
res.append(random.choice(population))
return res
|
ad9ff73909b17b65d98e61c36ceef7b9ace1a1f3
| 25,177
|
def add(number_one, number_two):
"""
两个数字相加
:param number_one:第一个数字
:param number_two:第二个数字
:return:相加后的结果
"""
result = number_one + number_two
return result
|
ac85b372ebf48c4a6b4dc67b61d74dfa6d9b4246
| 25,178
|
def format_filter(filetypes):
"""Returns a filetype filter formatted for the Open File prompt."""
filt = ""
for t in sorted(filetypes, key=lambda key: filetypes[key]):
filt += "%s (" % (t)
filt += " ".join(e for e in filetypes[t])
filt += ");;"
return filt.strip(";;")
|
0c9ba5eb72d0ac1f215f75fade5d21533d180716
| 25,179
|
import numpy
def principal_axis(alpha_carbons):
"""
Calculate principal inertia axis for the structure along with its geometrical center
---
Parameters:
alpha_carbons: alpha carbons of the structure
---
Return:
center: geometrical center of the structure
axis_direction: direction of the axis
"""
# alpha carbons coordinates as a numpy array
coord = numpy.array(alpha_carbons, float)
# get geometrical center
center = numpy.mean(coord, 0)
coord = coord - center
# create inertia matrix and extract eigenvectors and values
inertia = numpy.dot(coord.transpose(), coord)
e_values, e_vectors = numpy.linalg.eig(inertia)
# sort eigenvalues
order = numpy.argsort(e_values)
# axis1 is the principal axis with the greatest eigenvalue
_, _, axis1 = e_vectors[:, order].transpose()
axis_direction = axis1 / numpy.linalg.norm(axis1)
return center, axis_direction
|
7c09ea18fb84e8051c46602150b3788d567ec047
| 25,181
|
import re
def remove_comments(code):
"""
Removes Lua comments through the string, given to this function.
Then removes empty strings (which also appear after removing comments);
as a side effect, all tabulations are also removed.
@param code: string, containing sourcecode which should be processed.
@return: string withe sourcecode without any comments and tabulations
"""
no_comments_code = re.sub(r"--\[\[[\s\S]*\]\]|" # multi-line comments like --[[ comment ]]
r"--\[=\[[\s\S]*\]=\]|" # multi-line comments like --[=[ comment ]=]
"--.*", # single-line comments like -- comment
"", code)
cleaned_code = re.sub(r"^\s*", "", no_comments_code, flags=re.MULTILINE)
return cleaned_code
|
33d6ed045946782cad9599cbb74b46230feed7e5
| 25,182
|
import pathlib
def get_path_name(name: pathlib.Path | str | None) -> pathlib.Path | str:
"""Get the full name from a directory name or path and a file name or path.
Args:
name (pathlib.Path | str | None): Directory name or file name.
Returns:
str: Full file name.
"""
if name is None:
return ""
return pathlib.Path(name)
|
0eb4abd9cf82f022d0a0c3d380fab3e750c183d3
| 25,184
|
def detectPlusType(plusNode):
"""
Analyses the given "plus" node and tries to figure out if a "string" or "number" result is produced.
"""
if plusNode[0].type == "string" or plusNode[1].type == "string":
return "String"
elif plusNode[0].type == "number" and plusNode[1].type == "number":
return "Number"
elif plusNode[0].type == "plus" and detectPlusType(plusNode[0]) == "String":
return "String"
else:
return "var"
|
38eb3753c3dd85ae264341bc57be7a5d9fba7822
| 25,185
|
def fix_floats(data):
"""This function iterates though lists and dictionaries and tries to
convert strings to floats."""
if isinstance(data, list):
iterator = enumerate(data)
elif isinstance(data, dict):
iterator = data.items()
else:
raise TypeError("can only traverse list or dict")
for i,value in iterator:
if isinstance(value, (list, dict)):
fix_floats(value)
elif isinstance(value, str):
try:
data[i] = float(value)
except ValueError:
pass
return data
|
5fa25d5432e9457777e710ddd877809fa6bb49be
| 25,186
|
def customtype_pyxll_function_1(x):
"""returns x.greeting()"""
return x.greeting()
|
5c87905423f45fa28e9e32abea8ba0ce2b5bee55
| 25,187
|
def get_hue(r, g, b):
"""
get the HUE value from RGB values
:param r: red
:param g: green
:param b: blue
:return: the hue value based on the RGB value
"""
minimum = min(r, g, b)
maximum = max(r, g, b)
if min == max:
return 0
hue = 0.0
if max == r:
hue = (g - b) / (maximum - minimum)
if max == g:
hue = 2.0 + ((b - r) / (maximum - minimum))
if max == b:
hue = 4.0 + ((r - g) / (maximum - minimum))
hue = hue * 60
if hue < 0.0:
hue = hue + 360
return round(hue)
|
ec572db6609002018644ea7020a0615168199db2
| 25,188
|
def getFirstLineContaining(lines, keyLineStart):
"""
Given a split abfinfo text, return a stripped value for the given key.
"""
for line in lines:
if line.startswith(keyLineStart):
line = line.replace(keyLineStart, "")
line = line.strip()
return line
return None
|
ea9128d51d0d32075c212f3f11306b8cbb9f0fb6
| 25,189
|
def tokenize(name):
"""Turn a string into a list of character tokens."""
#name = name.lower() #ignore cases
characters = [c for c in name] #make a list of characters
characters = ['<START>'] + characters + ['<STOP>'] #add beginning and end token
return characters
|
9ed489baf16b6e91f67ed74c1f77c3c40205147e
| 25,190
|
def hex(n):
"""render the given number using upper case hex, like: 0x123ABC"""
return "0x%X" % n
|
2759c53b50214b138da8c986820b3acc61ed0ab2
| 25,191
|
def Event( state_table ):
"""Decorator for indicating state dependant method
Decorator is applied to methods of state machine class to indicate that
invoking the method will call state dependant method. States are implemented
as subclasses of the state machine class with a metaclass qualification.
"""
stateVarName = state_table.inst_state_name
def wrapper(func):
# no adding of event handler to statetable...
def objCall( self, *args, **kwargs):
state_var = getattr(self, stateVarName )
retn = state_var.getFunc(func)(self, *args, **kwargs)
return retn
return objCall
return wrapper
|
c7e4b8a83779c5fa0eeac7d4207d63bf39c84779
| 25,192
|
def parse_plotter_csv(filename: str, log_function=print) -> list:
"""Parse a file that was created with the spansh plotter, probably not used in final version"""
log_function(f"Parsing route file {filename}")
def parse_line(line_: str) -> dict:
line_elements = line_.replace('"', "").replace("\n", "").split(",")
return dict(zip(line_keys, line_elements))
with open(filename, "r") as f:
all_lines = f.readlines()
output = []
line_keys = eval(all_lines[0])
del all_lines[0]
for line in all_lines:
output.append(parse_line(line))
return output
|
ae66b06963e8ae637b717d51224482ee2143fe1e
| 25,193
|
import doctest
def doc_tests():
"""Doctests that can be selected by 'manage.py test thismodule.doctests'"""
return doctest.DocTestSuite('salesforce.backend.introspection')
|
7af3160809f15001a29e1b226c466128bde436f0
| 25,194
|
def es_index_successful(response):
"""
Test if an Elasticsearch client ``index`` method response indicates a
successful index operation. The response parameter should be a dictionary
with following keys:
.. sourcecode:: python
{
'_shards': {
'total': 2,
'failed': 0,
'successful': 1
},
'_index': u'billing-2015-12',
'_type': u'billing',
'_id': u'AVOmKFXgF_o3S6_4PkP1',
'_version': 1,
'created': True
}
According to `Elasticsearch Index API <https://www.elastic.co/guide/en/
elasticsearch/reference/current/docs-index_.html>`, an index operation is
successful in the case ``successful`` is at least 1.
:rtype: bool
"""
return response.get('_shards', {}).get('successful', 0) >= 1
|
e76e84d8461078da03f820089297ea9ca28f0911
| 25,195
|
def verify_header(filename):
"""
verify the signature file header (pastadb)
"""
with open(filename, "rb") as f:
if f.read(7) == "\x70\x61\x73\x74\x61\x64\x62":
return True
return False
|
5958179b0656ac7b4e8681e20d2746176592da3a
| 25,196
|
import requests
def _request_limit_reached(exception):
""" Checks if exception was raised because of too many executed requests. (This is a temporal solution and
will be changed in later package versions.)
:param exception: Exception raised during download
:type exception: Exception
:return: `True` if exception is caused because too many requests were executed at once and `False` otherwise
:rtype: bool
"""
return isinstance(exception, requests.HTTPError) and \
exception.response.status_code == requests.status_codes.codes.TOO_MANY_REQUESTS
|
67280ea48cce3238d0c574ec8ad1b13719df4990
| 25,197
|
import glob
def get_names():
"""Get names of H3 clusters"""
fnames = glob.glob('/home/ana/data/hectochelle/clusters/*MSG*fits')
names = []
for f in fnames:
names += [f.split('_')[0].split('/')[-1]]
return names
|
4947e53b7ceb1a0358cf240bb5239ff01c51fbb6
| 25,198
|
import asyncio
def get_or_create_event_loop():
"""Gets the running event loop or creates a new one and returns it
By default `asyncio` only starts an event loop in the main thread, so when running in another thread we
need to explicitly create a new event loop for that particular thread.
"""
try:
return asyncio.get_event_loop()
except RuntimeError as ex:
if "There is no current event loop in thread" in str(ex):
asyncio.set_event_loop(asyncio.new_event_loop())
return asyncio.get_event_loop()
|
f780d3e809d46a7388ffce6ceb2031343992f511
| 25,201
|
import tempfile
def make_environment(case, method='HEAD', path=u'/', qs=None, host=None,
server_name='localhost', port=80, script_name='',
url_scheme='http', headers={},
content_type=None, content_length=None,
http_protocol='http/1.1'):
"""
Build a WSGI environment for use in testing
"""
if url_scheme == 'https' and port == 80:
port = 443
environment = {
'wsgi.version': (1, 0),
'wsgi.errors': tempfile.NamedTemporaryFile(),
'wsgi.input': tempfile.NamedTemporaryFile(),
'wsgi.multithreaded': False,
'wsgi.multiprocess': False,
'wsgi.run_once': True,
'wsgi.url_scheme': url_scheme,
'SERVER_PROTOCOL': http_protocol,
'REQUEST_METHOD': method,
'PATH_INFO': path,
'SERVER_NAME': server_name,
'SERVER_PORT': str(port),
'SCRIPT_NAME': script_name
}
case.assertIsNotNone(environment['wsgi.input'])
case.assertIsNotNone(environment['wsgi.errors'])
for k, v in headers.items():
environment['HTTP_' + k] = v
if host is not None:
environment['HTTP_HOST'] = host
if qs is not None:
environment['QUERY_STRING'] = qs
if content_type is not None:
environment['CONTENT_TYPE'] = content_type
if content_length is not None:
environment['CONTENT_LENGTH'] = 0
return environment
|
4b2fdd959fa538478b9166f5ade0d157847eb7ca
| 25,202
|
def image_boxes_resize_padding_inverse(image_size, input_size, boxes=None):
"""
resize_image_padding的逆过程
"""
width, height = image_size
scale = min([input_size[0] / width, input_size[1] / height])
new_size = [int(width * scale), int(height * scale)]
pad_w = input_size[0] - new_size[0]
pad_h = input_size[1] - new_size[1]
top, bottom = pad_h // 2, pad_h - (pad_h // 2)
left, right = pad_w // 2, pad_w - (pad_w // 2)
if not boxes is None and len(boxes) > 0:
boxes[:] = boxes[:] - [left, top, left, top]
boxes[:] = boxes[:] / scale
return boxes
|
a3e4498018afd7b22e4395ffba1b4a33d8fd171c
| 25,204
|
import os
def get_org_dbs(db_dir, target_org):
"""Retrieve references to fasta and BLAST databases for included organisms.
"""
fasta_ref = None
org_names = []
db_refs = []
with open(os.path.join(db_dir, "organism_dbs.txt")) as org_handle:
for line in org_handle:
org, db = line.rstrip("\r\n").split("\t")
if db:
if org == target_org:
assert fasta_ref is None
fasta_ref = os.path.join(db_dir, "%s.fa" % db)
org_names.append(org)
db_refs.append(os.path.join(db_dir, db))
assert fasta_ref is not None, "Did not find base organism database"
return fasta_ref, org_names, db_refs
|
4de81b30a2988b68380cb61aa2135cfc2ed56510
| 25,205
|
from typing import List
def parse_raw_value(raw_value: str, linesep: str) -> List[List[str]]:
"""Parse raw values from VEP"""
parsed = list()
for line in raw_value.strip().split(linesep):
# This is the regular case
if "\t" in line:
parsed.append(line.split("\t", 1))
# Special cases where VEP was run on an emtpy input file
elif line == "Lines of input read":
parsed.append(["Lines of input read", "0"])
elif line == "Variants processed":
parsed.append(["Variants processed", "0"])
return parsed
|
4ce5fff141628c553f59e87e9beb4174bd400a19
| 25,208
|
import os
def tomtom_api_key():
"""Returns TomTom API key. Requires setting TOMTOM_API_KEY environment variable."""
return os.environ['TOMTOM_API_KEY']
|
19f63e3b47525bab7d45d7bf752b8de9495c466d
| 25,210
|
def steric_constraints(result, key, array_traceback):
""" Function: steric_constraints()
Purpose: While looking for recursive secondary structure
elements in the loops, check for sterically
infeasible configurations. Add one base at
either side of the loop region.
Input: Set of secondary structure elements, loop start
and end and dynamic programming matrix.
Return: Set of sterically feasible secondary structure elements.
"""
energy, result, effective = array_traceback[key[0]][key[1]]
for item in result:
if (item[0], item[1]) == key:
left_result = array_traceback[key[0]+1][key[1]]
right_result = array_traceback[key[0]][key[1]-1]
if left_result[0] < right_result[0]:
energy, result, effective = left_result
else:
energy, result, effective = right_result
return energy, result, effective
|
a32da6db82854a5b40e31baca8c5e4918c89a582
| 25,211
|
def format_datetime(dt):
"""
Returns ISO 8601 string representation
"""
return dt.isoformat()
|
fbbdec6086618f94826a69230b88bfa9e79ca472
| 25,213
|
def nt2codon_rep(ntseq):
"""Represent nucleotide sequence by sequence of codon symbols.
'Translates' the nucleotide sequence into a symbolic representation of
'amino acids' where each codon gets its own unique character symbol. These
characters should be reserved only for representing the 64 individual
codons --- note that this means it is important that this function matches
the corresponding function in the preprocess script and that any custom
alphabet does not use these symbols. Defining symbols for each individual
codon allows for Pgen computation of inframe nucleotide sequences.
Parameters
----------
ntseq : str
A Nucleotide sequence (normally a CDR3 nucleotide sequence) to be
'translated' into the codon - symbol representation. Can be either
uppercase or lowercase, but only composed of A, C, G, or T.
Returns
-------
codon_rep : str
The codon - symbolic representation of ntseq. Note that if
len(ntseq) == 3L --> len(codon_rep) == L
Example
--------
>>> nt2codon_rep('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC')
'\xbb\x96\xab\xb8\x8e\xb6\xa5\x92\xa8\xba\x9a\x93\x94\x9f'
"""
#
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3, 'a': 0, 'c': 1, 'g': 2, 't': 3}
#Use single characters not in use to represent each individual codon --- this function is called in constructing the codon dictionary
codon_rep ='\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf'
return ''.join([codon_rep[nt2num[ntseq[i]] + 4*nt2num[ntseq[i+1]] + 16*nt2num[ntseq[i+2]]] for i in range(0, len(ntseq), 3) if i+2 < len(ntseq)])
|
0dbb4d522229696fc7f62a3327b869f2c966aab9
| 25,214
|
def format_encode_value(value, state, constset):
"""Formats the encoded value based on the type"""
if constset is None or "type" not in constset:
return str(value)
else:
t = constset["type"]
if t == "checkbox":
return str(bool(value))
elif t == "timestamp" or t == "duration":
return str(value // 60) + "m " + str(value % 60) + "s "
elif t == "rating":
if "max" in constset:
return str(value) + " out of " + str(constset["max"])
else:
return str(value)
elif t == "choice":
if "choices" in constset:
if value >= 0 and value < len(constset["choices"]):
return "<" + str(constset["choices"][value]) + ">"
else:
return str(value)
else:
return str(value)
else:
return str(value)
|
4582b678d70e508c0d692036d9e495530930284f
| 25,215
|
def heron(a, b, c):
""" https://en.wikipedia.org/wiki/Heron%27s_formula """
s = (a + b + c) / 2.0
return (s * (s - a) * (s - b) * (s - c)) ** 0.5
|
6b9baa734f400fd07dbd735cf66d19b61128c0c5
| 25,216
|
def get_labelled_groups(labelled_measures_table, labelname):
"""
Provides a simple way of splitting a labelled measures table into multiple tables each corresponding to a given label.
Args:
labelled_measures_table (Dataframe): A measures table with a column corresponding to a label.
labelname (String): The name of the label to be split on.
Returns:
List of dataframes correspending to membership of a given label.
"""
# get the labels IN ASCENDING ORDER (important)
labels = sorted(set(list(labelled_measures_table[labelname].values)))
player_groups = []
for label in labels:
this_group = labelled_measures_table[
labelled_measures_table[labelname] == label
]
player_groups.append(this_group)
return player_groups
|
40bacfaf0044185034fde3a0eb31f7e1ab7a94ad
| 25,217
|
def divides_fully_against_denominators(target, denominators):
"""Determine of a target integer can be divided by any number amongst a list of denominators, with a remainder of zero.
Args:
target (int): A number to test if it is prime against the denominators.
denominators (list of int): A list of numbers to act as denominators for the target number.
Returns:
bool: False if the target number can be fully divided against at least one number in denominators. True if it cannot.
"""
denominators.sort()
for num in denominators:
if target % num == 0:
return False
return True
|
0f0d5c4c367ad112c78dd326bf9857ce18ec6f76
| 25,218
|
def _get_from_dictionary(dictionary, key):
"""
Safely returns the value from a dictionary that has the given key.
if the dictionary is None or does not contain the specified key, None is returned
:return: a dictionary
"""
if dictionary and key in dictionary:
return dictionary[key]
else:
return None
|
f93609a0ff9c2f01cadb10adcd0aed49a3b06a95
| 25,219
|
def _antnums_to_blpair(antnums):
"""
Convert nested tuple of antenna numbers to baseline-pair integer.
A baseline-pair integer is an i12 integer that is the antenna numbers
+ 100 directly concatenated (i.e. string contatenation).
Ex: ((1, 2), (3, 4)) --> 101 + 102 + 103 + 104 --> 101102103014.
Parameters
----------
antnums : tuple
nested tuple containing integer antenna numbers for a baseline-pair.
Ex. ((ant1, ant2), (ant3, ant4))
Returns
-------
blpair : <i12 integer
baseline-pair integer
"""
# get antennas
ant1 = antnums[0][0] + 100
ant2 = antnums[0][1] + 100
ant3 = antnums[1][0] + 100
ant4 = antnums[1][1] + 100
# form blpair
blpair = int(ant1*1e9 + ant2*1e6 + ant3*1e3 + ant4)
return blpair
|
47cbdee6212ae4a2857df3578b5755c98d928d55
| 25,221
|
def get_user_html(user):
"""Return standard HTML representation for a User object"""
return (
'<a title="{}" href="mailto:{}" data-toggle="tooltip" '
'data-placement="top">{}'
'</a>'.format(user.get_full_name(), user.email, user.username)
)
|
d988e872266767ea23f23836dcd588d74b0060a8
| 25,222
|
import sys
import os
def _create_log_filename(log_dir: str, sub_dir: str, filename: str) -> str:
""" create log filename """
if log_dir is not None:
file_path = sys.argv[0]
file_path = log_dir
os.makedirs(file_path, exist_ok=True)
if sub_dir is not None:
file_path = os.path.join(log_dir, sub_dir)
os.makedirs(file_path, exist_ok=True)
filename = os.path.join(file_path, filename)
return filename
|
3422873e22c734fc3926d86b378685465f77bf6c
| 25,223
|
from typing import Dict
from typing import Any
def dict_without(dictionary: Dict[Any, Any], without: Any) -> Dict[Any, Any]:
"""
Remove a key from a dictionary and return the copy without.
"""
new = dictionary.copy()
new.pop(without)
return new
|
4008e7e56690363e58da41f272e93ebf88ccd907
| 25,224
|
def map_rows_to_cols(rows, cols):
"""
Returns a list of dictionaries.
Each dictionary is the column name to its corresponding row value.
"""
mapped_rows = []
for row in rows:
mapped_rows.append({cols[i]: row[i] for i in range(len(cols))})
return mapped_rows
|
9efb7d48f69d5bab8d910cbedd75f4c4d001ef7b
| 25,227
|
def clean_side_panel(sidepanel):
"""
Cleans the SidePanel data and stores it in a dict.
Parameter:
sidepanel: html-sidepanel extracted using bs4
Returns:
data: dict of extracted data
"""
data = dict()
for x in sidepanel:
x = x.text.strip().replace('\n', '')
index = x.find(':')
if index == -1:
continue
y, x = x[:index], x[index+1:].strip()
data[y] = x
return data
|
c9fab309b64b788283e0848a0777fd3ef80014ad
| 25,228
|
import warnings
def _deprecated_kwargs(kwargs, arg_newarg):
""" arg_newarg is a list of tuples, where each tuple has a pair of strings.
('old_arg', 'new_arg')
A DeprecationWarning is raised for the arguments that need to be
replaced.
"""
warn_for = []
for (arg, new_kw) in arg_newarg:
if arg in kwargs.keys():
val = kwargs.pop(arg)
kwargs[new_kw] = val
warn_for.append((arg, new_kw))
if len(warn_for) > 0:
if len(warn_for) == 1:
warnings.warn("Argument '{}' is deprecated. Use {} instead".
format(warn_for[0][0], warn_for[0][1]),
DeprecationWarning, stacklevel=4)
else:
args = ", ".join([x[0] for x in warn_for])
repl = ", ".join([x[1] for x in warn_for])
warnings.warn(
"Arguments '{}' are deprecated. Use '{}' instead respectively".
format(args, repl),
DeprecationWarning, stacklevel=4)
return kwargs
|
f70d7e9af7a432253b0cfc7d4457443e4ba4f0db
| 25,230
|
from typing import OrderedDict
def object_attributes_to_ordered_dict(obj, attributes):
"""Returns the specified attributes from the object in an OrderedDict."""
dict = OrderedDict()
object_vars = vars(obj)
for attribute in attributes:
dict[attribute] = object_vars[attribute]
return dict
|
2aa1e75669bbe13f8d3fa238dc0c2bb681aa8b72
| 25,232
|
def get_distro():
"""
Name of your Linux distro (in lowercase).
"""
with open("/etc/issue") as f:
return f.read().lower().split()[0]
|
865917340c7add959f86072030b3b6172ecccdf2
| 25,233
|
def _get_object_info_from_revision(revision, known_type):
""" returns type and id of the searched object, if we have one part of
the relationship known.
"""
object_type = revision.destination_type \
if revision.source_type == known_type \
else revision.source_type
object_id = revision.destination_id if \
revision.source_type == known_type \
else revision.source_id
return object_type, object_id
|
e8f73296f0d7080c6290148142dd3d2902646ed1
| 25,234
|
def fast_comp(seq1, seq2, transpositions=False):
"""Compute the distance between the two sequences `seq1` and `seq2` up to a
maximum of 2 included, and return it. If the edit distance between the two
sequences is higher than that, -1 is returned.
If `transpositions` is `True`, transpositions will be taken into account for
the computation of the distance. This can make a difference, e.g.:
>>> fast_comp("abc", "bac", transpositions=False)
2
>>> fast_comp("abc", "bac", transpositions=True)
1
This is faster than `levenshtein` by an order of magnitude, but on the
other hand is of limited use.
The algorithm comes from `http://writingarchives.sakura.ne.jp/fastcomp`.
I've added transpositions support to the original code.
"""
replace, insert, delete = "r", "i", "d"
L1, L2 = len(seq1), len(seq2)
if L1 < L2:
L1, L2 = L2, L1
seq1, seq2 = seq2, seq1
ldiff = L1 - L2
if ldiff == 0:
models = (insert+delete, delete+insert, replace+replace)
elif ldiff == 1:
models = (delete+replace, replace+delete)
elif ldiff == 2:
models = (delete+delete,)
else:
return -1
res = 3
for model in models:
i = j = c = 0
while (i < L1) and (j < L2):
if seq1[i] != seq2[j]:
c = c+1
if 2 < c:
break
if transpositions and ldiff != 2 \
and i < L1 - 1 and j < L2 - 1 \
and seq1[i+1] == seq2[j] and seq1[i] == seq2[j+1]:
i, j = i+2, j+2
else:
cmd = model[c-1]
if cmd == delete:
i = i+1
elif cmd == insert:
j = j+1
else:
assert cmd == replace
i,j = i+1, j+1
else:
i,j = i+1, j+1
if 2 < c:
continue
elif i < L1:
if L1-i <= model[c:].count(delete):
c = c + (L1-i)
else:
continue
elif j < L2:
if L2-j <= model[c:].count(insert):
c = c + (L2-j)
else:
continue
if c < res:
res = c
if res == 3:
res = -1
return res
|
4b9c6e19d4c9940c5ed249c97f8fdb8a04b48708
| 25,236
|
def _retrieve_header(response, header):
""" Little compatibility utility for response.get_header() method.
response.get_header() was introduced in falcon 1.0 but we want to retrieve
response header values in al versions in consitent manner.
Args:
response (falcon.Request): request object instance
header (str): case-insensitive header name
"""
try:
return response.get_header(header)
except AttributeError:
# compat: on falcon<1.0 there is not get_header() method so we must
# access _headers dictionary directly
# note: _headers dictionary stores headers with lower-case names but
# get_header is case-insensitive so make make it lowercase to
# ensure consistency acros versions.
return response._headers[header.lower()]
|
25f69d397992fbee67bf6c210556a3b1c10780cd
| 25,239
|
def get_transitions(sequence):
"""
Extracts a list of transitions from a sequence, returning a list of lists containing each transition.
Example
--------
>>> sequence = [1,2,2,1,2,3,2,3,1]
>>> ps.get_transitions(sequence)
[[1, 2], [2, 1], [1, 2], [2, 3], [3, 2], [2, 3], [3, 1]]
"""
transitions = []
for position in range(len(sequence) - 1):
if sequence[position] != sequence[position + 1]:
transitions.append([sequence[position], sequence[position + 1]])
return transitions
|
25c2e7de0f4701517c1f41f466a3710a7f124c4d
| 25,240
|
import traceback
def exception_to_string(e: Exception) -> str:
""" Convert exception to printable string """
stack = traceback.extract_stack()[:-3] + traceback.extract_tb(e.__traceback__)
pretty_out = traceback.format_list(stack)
return f"{pretty_out}\n {e.__class__} {e}"
|
b5b0e873dd3ad2d923d0cc16de5ab4016e73565e
| 25,242
|
import os
import json
def get_json(path, filename):
"""
Return data object from a JSON file.
Parameters
----------
path : string
Directory path to file.
filename : string
Name of the file. Name must include extension.
Returns
-------
object
Object contained in the JSON file.
See also
--------
write_json, write_jsons
"""
if not isinstance(path, str):
raise TypeError('`path` must be str')
elif not isinstance(filename, str):
raise TypeError('`filename` must be str')
with open(os.path.join(path, filename)) as file:
return json.load(file)
|
b6cf8857a72f0264a0def2bafc03922f1669927a
| 25,243
|
def get_middle_node(ll):
"""
It is assumed the linked list class is implemented as in class
"""
curr_node = ll.head
middle = ll.size // 2
if ll.size % 2 == 0:
for _ in range(middle - 1): # loop through the linkedlist until we get the index
curr_node = curr_node.next
return (curr_node.data, curr_node.next.data)
else:
for _ in range(middle):
curr_node = curr_node.next
return (curr_node.data)
|
a68d0680d348db0bc6d2a1453a3a9f3ddd0bb1c2
| 25,244
|
def get_dependent_nodes(nodes):
"""Get all dependent nodes connected to the list of nodes.
Looking for connections outside of the nodes in incoming argument.
Arguments:
nodes (list): list of nuke.Node objects
Returns:
connections_in: dictionary of nodes and its dependencies
connections_out: dictionary of nodes and its dependency
"""
connections_in = dict()
connections_out = dict()
node_names = [n.name() for n in nodes]
for node in nodes:
inputs = node.dependencies()
outputs = node.dependent()
# collect all inputs outside
test_in = [(i, n) for i, n in enumerate(inputs)
if n.name() not in node_names]
if test_in:
connections_in.update({
node: test_in
})
# collect all outputs outside
test_out = [i for i in outputs if i.name() not in node_names]
if test_out:
# only one dependent node is allowed
connections_out.update({
node: test_out[-1]
})
return connections_in, connections_out
|
3a436fa704226f7466761a2b87a71ba91ca419b3
| 25,245
|
import os
import click
def get_list_of_iso(mount_path):
"""Returns a list of iso files at the mount location passed as parameter
Parameters:
mount_path (str): The folder where ISOs are mounted
Returns:
list: A list of absolute pathnames of ISOs at the mount_path
"""
list_of_iso = []
for file in os.listdir(mount_path):
if file.endswith(".iso"):
list_of_iso.append(os.path.join(mount_path, file))
else:
click.secho(f"File {file} is not an ISO. Skipping file {file}...", fg='yellow', bold=True)
return list_of_iso
|
6d6cad1a52b73c6f271dbe1de2f5cdcd168ef915
| 25,246
|
import math
import torch
def gaussian_probability(sigma, mu, data):
"""Returns the probability of `data` given MoG parameters `sigma` and `mu`.
Arguments:
sigma (BxGxO): The standard deviation of the Gaussians. B is the batch
size, G is the number of Gaussians, and O is the number of
dimensions per Gaussian.
mu (BxGxO): The means of the Gaussians. B is the batch size, G is the
number of Gaussians, and O is the number of dimensions per Gaussian.
data (BxI): A batch of data. B is the batch size and I is the number of
input dimensions.
Returns:
probabilities (BxG): The probability of each point in the probability
of the distribution in the corresponding sigma/mu index.
"""
data = data.unsqueeze(1).expand_as(sigma)
ret = 1.0 / math.sqrt(2 * math.pi) * torch.exp(-0.5 *
((data - mu) / sigma) ** 2) / sigma
return torch.prod(ret, 2)
|
5757e051af16b692fba9e483990df1d5c4fd3870
| 25,247
|
def length(head) -> int:
"""
The method length, which accepts a linked list
(head), and returns the length of the list.
:param head:
:return:
"""
i = 0
if head is None:
return 0
while head.next is not None:
head = head.next
i += 1
return i + 1
|
f1ad7c64dc15620340f505671281e59267eb4b2b
| 25,248
|
def Mabs2L(Mabs,MUVsun=5.5):
"""
Converting absolute magnitude(s) to luminosity in erg/s
Using a default absolute magnitude of the sun (in UV) of 5.5 from http://www.ucolick.org/~cnaw/sun.html
"""
Lsun = 3.839e-11 # 1e44 erg/s
Lobj = 10**((MUVsun-Mabs)/2.5)*Lsun # Luminosity in erg/s
return Lobj
|
85a6f7b1e58dbc086a7dd36659e76b37849a8b04
| 25,249
|
def list_prod(lst):
"""
Calculate the product of all numbers in a python list.
"""
prod = 1
for itm in lst:
prod *= itm
return prod
|
b0f5911e6eeb289aae7efe7f1fe99a2ca0f83cc4
| 25,251
|
def _get_commit_interval(distance: float, commit_count: int) -> int:
"""Get right interval for given commit distance using distance thresholds,
interval indices are in [0,3] for 5 thresholds."""
commit_distance_thresholds = [
0,
round(0.25 * commit_count),
round(0.5 * commit_count),
round(0.75 * commit_count), commit_count
]
k = 0
while commit_distance_thresholds[k] < distance:
k += 1
return k - 1
|
19f968e5a8bdf90fc3877dca412884bdc26ff579
| 25,254
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.