content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import os
def load_sentiments(file_name="data"+os.sep+"data/sentiments.csv"):
"""Read the sentiment file and return a dictionary containing the sentiment
score of each word, a value from -1 to +1.
"""
sentiments = {}
for line in open(file_name, encoding='utf8'):
word, score = line.split(',')
sentiments[word] = float(score.strip())
return sentiments
|
7713037d3c524d9a3c1a943840e96654f0552bbb
| 696,189
|
import os
def readConfig():
"""
Read conf file and returns (editorName, pathToSave)
"""
confPath = os.path.expanduser("~")
confFile = os.path.join(confPath, ".forcecode")
if not os.path.isfile(confFile):
fileI = open(confFile, "w")
fileI.write("editor:gedit:")
fileI.write("path:~/.forceCode/:")
fileI.close()
return ('gedit','~/.forceCode/')
else:
file = open(confFile, "r")
data = file.read()
data = data.split(':')
return (data[1], data[3])
|
d0d11749e6a17fa395fc925b55b8fdc32f834d14
| 696,192
|
def switch(*args):
""":yaql:switch
Returns the value of the first argument for which the key evaluates to
true, null if there is no such arg.
:signature: switch([args])
:arg [args]: mappings with keys to check for true and appropriate values
:argType [args]: chain of mapping
:returnType: any (types of values of args)
.. code::
yaql> switch("ab" > "abc" => 1, "ab" >= "abc" => 2, "ab" < "abc" => 3)
3
"""
for mapping in args:
if mapping.source():
return mapping.destination()
|
c0d152b4004866826c4892a1340865b79feefa2c
| 696,194
|
import subprocess
def cmdline(command):
"""This loads the bestmatchfinder homepage."""
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
out, error = process.communicate()
# print("out", out)
# print("error", error)
return out
|
929482b4ca2272ed41005fc54bdaebdffd4ede15
| 696,195
|
def extract_segment_types(urml_document_element, namespace):
"""Return a map from segment node IDs to their segment type
('nucleus', 'satellite' or 'isolated').
"""
segment_types = \
{namespace+':'+seg.attrib['id']: seg.tag
for seg in urml_document_element.iter('nucleus', 'satellite')}
for seg in urml_document_element.iter('segment'):
seg_id = namespace+':'+seg.attrib['id']
if seg_id not in segment_types:
segment_types[seg_id] = 'isolated'
return segment_types
|
30d2050055a9c2e66da66e3663df27a9cc6852e1
| 696,196
|
def convert2voxels(x_um_rw, imExtends, voxelSize):
"""
Converting from real world um coordinates to 0 origin voxel.
:param x_um_rw: coordinates in real world frame, dimensions in um
:param imExtends (list of lists): the first list are the initial extends of the image, and the second list the
final ones. Dimensions are um and they are used to localize the image in the real world frame
:param voxelSize: voxel size
:return: coordinates in 0 centered frame, dimensions in voxels
"""
# First we bring the coordinates origin to 0
x_um_0 = x_um_rw - imExtends[0]
# And then we transform the dimensions to voxels
X_voxel_0 = x_um_0 / voxelSize
return X_voxel_0
|
123414615e40bb41802b8f5f072bb994f859f3d7
| 696,197
|
import os
def get_blacklist(api):
"""Returns a function to filter unimportant files normally ignored."""
git_path = os.path.sep + '.git' + os.path.sep
svn_path = os.path.sep + '.svn' + os.path.sep
return lambda f: (
f.startswith(api.IGNORED) or
f.endswith('.pyc') or
git_path in f or
svn_path in f)
|
c33dce4f6e6035ad3ec4a6353f3b42f7b0848f1c
| 696,198
|
def moeda(x=0.0, moeda='R$'):
"""
==> Formata valor de x para f'string de moeda local (BRL).
:param x: Valor que será formatado.
:param moeda: Cifrão da moeda local (BRL).
:return: retorna f'string formatada para moeda local (BRL).
"""
return f'{moeda}{x:.2f}'.replace('.', ',')
|
7a9002ad9fbc476090d1a82a2fb18222567d1c71
| 696,199
|
def fill_big_gaps(array, gap_size):
"""
Insert values into the given sorted list if there is a gap of more than ``gap_size``.
All values in the given array are preserved, even if they are within the ``gap_size`` of one another.
>>> fill_big_gaps([1, 2, 4], gap_size=0.75)
[1, 1.75, 2, 2.75, 3.5, 4]
"""
result = []
if len(array) == 0:
raise ValueError("Input array must be len > 0")
last_value = array[0]
for value in array:
while value - last_value > gap_size + 1e-15:
last_value = last_value + gap_size
result.append(last_value)
result.append(value)
last_value = value
return result
|
11ecb164b9e54c75db249ca27cbbdd582ed47945
| 696,201
|
from pathlib import Path
def get_package_root() -> Path:
"""Returns package root folder."""
conf_folder = Path(__file__).parent.parent
dirs_in_scope = [x.name for x in conf_folder.iterdir() if x.is_dir()]
if "mapservices" not in dirs_in_scope:
msg = (
f"Not the right root directory. ({conf_folder.absolute()}) "
"Did you change the project structure?"
)
raise ValueError(msg)
return conf_folder
|
7ff402b510528f7256ee6033ecbe6d5054bf487d
| 696,202
|
def inversion(permList):
"""
Description - This function returns the number of inversions in a
permutation.
Preconditions - The parameter permList is a list of unique positve numbers.
Postconditions - The number of inversions in permList has been returned.
Input - permList : list
Output - numInversions : int
"""
if len(permList)==1:
return 0
else:
numInversion=len(permList)-permList.index(max(permList))-1
permList.remove(max(permList))
return numInversion+inversion(permList)
|
45868e69ceaed9f9a901da8f0157cc6fddace306
| 696,203
|
def match_rules(rules, app, action):
"""
This will match rules found in Group.
"""
for rule in rules.split(','):
rule_app, rule_action = rule.split(':')
if rule_app == '*' or rule_app == app:
if rule_action == '*' or rule_action == action or action == '%':
return True
return False
|
dd0f76819f2211111551a866c46496c36a1f385b
| 696,204
|
def calc_acc(top_action, bot_aspect_action, bot_opinion_action, gold_labels, mode):
"""
Get accuracy.
Args:
top_action (list): List of predicted sentiments with their positions in a sequence.
bot_action (list): List of list(s) of sequence length, where each list of predicted entity corresponds to a position in top_action, in respective order.
gold_labels (list): List of dictionaries of ground truth labels.
mode (list): List of experiment modes.
"""
acc, cnt, tot = 0, 0, len(gold_labels)
used = [0 for i in range(len(top_action))]
for label in gold_labels:
tp, aspect_tags, opinion_tags = label['type'], label['aspect_tags'], label['opinion_tags']
j, ok = 0, 0
for i in range(len(top_action)):
# if sentiment matches ground truth, ground truth is a sentiment, position is not filled, and gold triplet has not been correctly matched before
if top_action[i] == tp and tp > 0 and used[i] == 0 and ok == 0:
match = 1
if "NER" in mode:
# remove impossible predictions when not calculating rewards
# 1) skip if either beginning tag of aspect or opinion span is absent or there is more than 1 of either
if bot_aspect_action[j].count(2) !=1 or bot_opinion_action[j].count(2) !=1:
j += 1
continue
# 2) skip if no aspect or opinion span detected or more than 1 of either is detected
aspect_cnt = 0
prev_aspect = False
for k, aspect_k in enumerate(bot_aspect_action[j]):
if aspect_k > 0:
prev_aspect = True
if k == len(bot_aspect_action[j]) - 1:
aspect_cnt += 1
elif aspect_k == 0:
if prev_aspect:
aspect_cnt += 1
prev_aspect = False
if aspect_cnt > 1:
break
opinion_cnt = 0
prev_opinion = False
for k, opinion_k in enumerate(bot_opinion_action[j]):
if opinion_k > 0:
prev_opinion = True
if k == len(bot_opinion_action[j]) - 1:
opinion_cnt += 1
elif opinion_k == 0:
if prev_opinion:
opinion_cnt += 1
prev_opinion = False
if opinion_cnt > 1:
break
if aspect_cnt != 1 or opinion_cnt != 1:
j += 1
continue
# make sure there is an exact match for both aspect and opinion spans
if bot_aspect_action[j] != aspect_tags or bot_opinion_action[j] != opinion_tags:
match = 0
if match == 1:
ok = 1
used[i] = 1
if top_action[i] > 0:
j += 1
cnt += 1
acc += ok
cnt //= tot
return acc, tot, cnt
|
2c247de46a132f708981c29a41cb2068b5efb91e
| 696,206
|
def thrift_attrs(obj_or_cls):
"""Obtain Thrift data type attribute names for an instance or class."""
return [v[1] for v in obj_or_cls.thrift_spec.values()]
|
7c4f75f0e00ca08ca8d889d537ceee355c4c6552
| 696,207
|
def s3so(worker_id):
"""
Returns s3 storage options to pass to fsspec
"""
endpoint_port = (
5000 if worker_id == "master" else 5550 + int(worker_id.lstrip("gw"))
)
endpoint_uri = f"http://127.0.0.1:{endpoint_port}/"
return {"client_kwargs": {"endpoint_url": endpoint_uri}}
|
bee24bca3dfefd1568c5376d2636c4354cda8633
| 696,208
|
def find_one_or_more(element, tag):
"""Return subelements with tag, checking that there is at least one."""
s = element.findall(tag)
assert len(s) >= 1, 'expected at least one <%s>, got %d' % (tag, len(s))
return s
|
6faee2e8cad1a943da6499cb0ab987cafa79104a
| 696,209
|
def find_max_simultaneous_events(events):
"""
Question 14.5: Given a list of intervals representing
start and end times of events, find the maximum number
of simultaneous events that we can schedule
"""
transitions = []
simultaneous = 0
max_simultaneous = 0
for event in events:
transitions.append((event[0], True))
transitions.append((event[1], False))
sorted_transitions = sorted(transitions, key=lambda x: x[0])
for transition in sorted_transitions:
if transition[1]:
simultaneous += 1
else:
simultaneous -= 1
max_simultaneous = max(simultaneous, max_simultaneous)
return max_simultaneous
|
ffacabf17aa89dc61903a1aab44bace923f224a4
| 696,210
|
def G(x: int, y: int, z: int) -> int:
"""2ラウンド目に行う演算処理"""
return (x & z) | (y & ~z)
|
802338277d7bba0f0e019fe886489beba37ecf18
| 696,211
|
def get_top(cards, colour):
"""
Get the top card played of the given colour string.
"""
iter = [card.rank
for card in cards
if card.colour.lower() == colour.lower()]
if not iter:
return 0
return max(iter)
|
6c11a55c7214b18713462ffc0891a12a0460023f
| 696,212
|
def transform_objects(objects):
"""Transform objects."""
obj_list = []
for i, obj in objects.items():
data = dict(obj, instance_id=i)
if data['destroyed'] is None:
data['destroyed_x'] = None
data['destroyed_y'] = None
obj_list.append(data)
return obj_list
|
e53f21ef107ba38a22b86d1747329aa9dfca2617
| 696,213
|
import math
def distance(x1, y1, x2, y2):
"""distance: euclidean distance between (x1,y1) and (x2,y2)"""
return math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
|
b7b4662a88c9afd4b63d6ab04fc51749916749f1
| 696,214
|
def repeated_definitions_of_repo_in_config(config, repo):
"""Check if there are multiple definitions of the same repository in a
pre-commit configuration object.
Parameters
----------
config : dict
Pre-commit configuration dictionary.
repo : str
Repository to check for multiple definitions.
Returns
-------
bool : ``True`` if there are more than one definition of the passed
repository in the configuration dictionary, ``False`` otherwise.
"""
return len([_repo for _repo in config["repos"] if _repo["repo"] == repo]) > 1
|
10f44cd6d6d1ef2313a2b8b6ab20b81df8294565
| 696,217
|
def get_links_from_wiki(soup, n=5, prefix="https://en.wikipedia.org"):
"""
Extracts `n` first links from wikipedia articles and adds `prefix` to
internal links.
Parameters
----------
soup : BeautifulSoup
Wikipedia page
n : int
Number of links to return
prefix : str, default="https://en.wikipedia.org""
Site prefix
Returns
-------
list
List of links
"""
arr = []
# Get div with article contents
div = soup.find("div", class_="mw-parser-output")
for element in div.find_all("p") + div.find_all("ul"):
# In each paragraph find all <a href="/wiki/article_name"></a> and
# extract "/wiki/article_name"
for i, a in enumerate(element.find_all("a", href=True)):
if len(arr) >= n:
break
if (
a["href"].startswith("/wiki/")
and len(a["href"].split("/")) == 3
and ("." not in a["href"] and ("(" not in a["href"]))
):
arr.append(prefix + a["href"])
return arr
|
5d0b77bf82cc5e09cc3db3fe9e0bd0b58bc81f55
| 696,218
|
import bisect
def find_ge(array, x):
"""Find leftmost item greater than or equal to x.
Example::
>>> find_ge([0, 1, 2, 3], 1.0)
1
**中文文档**
寻找最小的大于等于x的数。
"""
i = bisect.bisect_left(array, x)
if i != len(array):
return array[i]
raise ValueError
|
6f1aaa40da6d00acd15ee86d1db161f714c6d5d3
| 696,219
|
from typing import List
import subprocess
def _commit_files(files: List[str], message: str) -> bool:
"""
Stages the given files and creates a commit with the given message.
Returns True if a new commit was created, False if the files are unchanged.
"""
subprocess.check_call(["git", "add"] + files)
# Skip commit if no changes are staged
result_code = subprocess.call(["git", "diff-index", "--quiet", "--cached", "HEAD"])
if result_code == 0:
return False
else:
subprocess.check_call(["git", "commit", "--no-verify", "--message", message])
return True
|
ce043cd3317c15c0cea1f4dca84dcdb0484a79eb
| 696,220
|
def reverse_list (list):
"""
:param: list
:return: list
Return a list, whose elements are in reversed order
e.g. reverse_list([30,40,50]) returns [50,40,30]
"""
reversed=[]
#Copy the first element of the given list into empty reversed list: reversed list is now [30]
reversed.append(list[0])
#Insert second element 40 into reversed list at index 0, so the list now is [40,30] etc.
for i in list[1:]:
reversed.insert(0,i)
return reversed
|
a3370aa505e19a4e4bca76d765c8f3859ac106d2
| 696,221
|
def inputInt(prompt, min=0, max=100):
"""
inputInt retourne un entier saisit par l'utilisteur. La saisit est
sécurisée: en de mauvaises entrés on redemande à l'utilisateur l'entier.
Si l'utilisateur quitte le proggrame avec Contrôle+C, le programme s'arrête.
"""
while True:
try:
i = int(input(prompt))
except KeyboardInterrupt as e:
print()
exit(0)
except Exception as e:
print(f"Valeur invalide")
continue
if min <= i <= max:
return i
print(f"La valeur doit être entre ${min} et ${max}")
|
fa1a9ca1bcbdbf9dd46c37ecb242bb012c14d9e9
| 696,222
|
import torch
def max_log_loss(logits, targets=None, reduction='mean'):
"""
Loss.
:param logits: predicted classes
:type logits: torch.autograd.Variable
:param targets: target classes
:type targets: torch.autograd.Variable
:param reduction: reduction type
:type reduction: str
:return: error
:rtype: torch.autograd.Variable
"""
max_log = torch.max(
torch.nn.functional.log_softmax(logits, dim=1), dim=1)[0]
if reduction == 'mean':
return torch.mean(max_log)
elif reduction == 'sum':
return torch.sum(max_log)
else:
return max_log
|
d1478f73e3b18c1926f4c859125e120177799c06
| 696,224
|
import re
def clear_comments(data):
"""Return the bibtex content without comments"""
res = re.sub(r"(%.*\n)", '', data)
res = re.sub(r"(comment [^\n]*\n)", '', res)
return res
|
16308058dd608a241109455a31405aa01fe46f2d
| 696,225
|
async def list_top_communities(context, limit=25):
"""List top communities. Returns lite community list."""
assert limit < 100
#sql = """SELECT name, title FROM hive_communities
# WHERE rank > 0 ORDER BY rank LIMIT :limit"""
sql = """SELECT name, title FROM hive_communities
WHERE id = 1344247 OR rank > 0
ORDER BY (CASE WHEN id = 1344247 THEN 0 ELSE rank END)
LIMIT :limit"""
out = await context['db'].query_all(sql, limit=limit)
return [(r[0], r[1]) for r in out]
|
64d73bbb857c2fd6ef3fb680d403869414eab268
| 696,226
|
def get_file_name(path):
"""
:param path:
:return:
"""
parts = path.split("/")
return parts[len(parts) - 1]
|
6fb5ff0044931afd09a81df6be48da78b62e5902
| 696,227
|
def get_op_output_unit(unit_op, first_input_units, all_args=None, size=None):
"""Determine resulting unit from given operation.
Options for `unit_op`:
- "sum": `first_input_units`, unless non-multiplicative, which raises
OffsetUnitCalculusError
- "mul": product of all units in `all_args`
- "delta": `first_input_units`, unless non-multiplicative, which uses delta version
- "delta,div": like "delta", but divided by all units in `all_args` except the first
- "div": unit of first argument in `all_args` (or dimensionless if not a Quantity) divided
by all following units
- "variance": square of `first_input_units`, unless non-multiplicative, which raises
OffsetUnitCalculusError
- "square": square of `first_input_units`
- "sqrt": square root of `first_input_units`
- "reciprocal": reciprocal of `first_input_units`
- "size": `first_input_units` raised to the power of `size`
Parameters
----------
unit_op :
first_input_units :
all_args :
(Default value = None)
size :
(Default value = None)
Returns
-------
"""
all_args = all_args or []
if unit_op == "sum":
result_unit = (1 * first_input_units + 1 * first_input_units).units
elif unit_op == "mul":
product = first_input_units._REGISTRY.parse_units("")
for x in all_args:
if hasattr(x, "units"):
product *= x.units
result_unit = product
elif unit_op == "delta":
result_unit = (1 * first_input_units - 1 * first_input_units).units
elif unit_op == "delta,div":
product = (1 * first_input_units - 1 * first_input_units).units
for x in all_args[1:]:
if hasattr(x, "units"):
product /= x.units
result_unit = product
elif unit_op == "div":
# Start with first arg in numerator, all others in denominator
product = getattr(
all_args[0], "units", first_input_units._REGISTRY.parse_units("")
)
for x in all_args[1:]:
if hasattr(x, "units"):
product /= x.units
result_unit = product
elif unit_op == "variance":
result_unit = ((1 * first_input_units + 1 * first_input_units) ** 2).units
elif unit_op == "square":
result_unit = first_input_units ** 2
elif unit_op == "sqrt":
result_unit = first_input_units ** 0.5
elif unit_op == "cbrt":
result_unit = first_input_units ** (1 / 3)
elif unit_op == "reciprocal":
result_unit = first_input_units ** -1
elif unit_op == "size":
if size is None:
raise ValueError('size argument must be given when unit_op=="size"')
result_unit = first_input_units ** size
else:
raise ValueError("Output unit method {} not understood".format(unit_op))
return result_unit
|
0a873f698886b316c49ed24581bf5786f59be457
| 696,228
|
import getpass
def smtp_config_generator_password(results):
"""
Generate password config.
:param results: Values. Refer to `:func:smtp_config_writer`.
:type results: dict
"""
if results['password'] is None:
results['password'] = getpass.getpass(prompt="PASSWORD: ")
return results
|
b0ea947af703d7c90e124c1d0ecabbd1e6216e15
| 696,229
|
from pathlib import Path
def is_single_repository(repo_path: str) -> bool:
"""
This function returns True if repo_path points to a single repository (regular or bare) rather than a
folder containing multiple repositories.
"""
# For regular repositories
if Path("{}/.git".format(repo_path)).exists():
return True
# For bare repositories
if (Path("{}/hooks".format(repo_path)).exists() and
Path("{}/refs".format(repo_path)).exists()):
return True
return False
|
c9b2c709984b79a36c36d898d0e337d7a9c3f725
| 696,230
|
def requires_to_requires_dist(requirement):
"""Compose the version predicates for requirement in PEP 345 fashion."""
requires_dist = []
for op, ver in requirement.specs:
requires_dist.append(op + ver)
if not requires_dist:
return ''
return " (%s)" % ','.join(sorted(requires_dist))
|
1a394de51d18b0a3cc4cb922364d352a29bccb09
| 696,231
|
def load_cows(filename):
"""
Read the contents of the given file. Assumes the file contents contain
data in the form of comma-separated cow name, weight pairs, and return a
dictionary containing cow names as keys and corresponding weights as values.
Parameters:
filename - the name of the data file as a string
Returns:
a dictionary of cow name (string), weight (int) pairs
"""
# Initialize dictionary
cows = {}
# Open file in read mode using 'with' keyword for context management
with open(filename, 'r') as f:
for line in f:
current_cow = line
# Split the line into a list of the form [cow_name, cow_weight]
cow_name, cow_weight = current_cow.split(',')
cows[cow_name] = int(cow_weight)
return cows
|
6245d9f20791316e5e7f370a8d53d8d590ff87c7
| 696,232
|
import math
def create_space(lat, lon, s=10):
"""Creates a s km x s km square centered on (lat, lon)"""
v = (180/math.pi)*(500/6378137)*s # roughly 0.045 for s=10
return lat - v, lon - v, lat + v, lon + v
|
7f39942cdb65a274ebf77257941211fa59f7cf89
| 696,233
|
import numpy
def aperture(npix=256, cent_obs=0.0, spider=0):
"""
Compute the aperture image of a telescope
Args:
npix (int, optional): number of pixels of the aperture image
cent_obs (float, optional): central obscuration fraction
spider (int, optional): spider size in pixels
Returns:
real: returns the aperture of the telescope
"""
illum = numpy.ones((npix,npix),dtype='d')
x = numpy.arange(-npix/2,npix/2,dtype='d')
y = numpy.arange(-npix/2,npix/2,dtype='d')
xarr = numpy.outer(numpy.ones(npix,dtype='d'),x)
yarr = numpy.outer(y,numpy.ones(npix,dtype='d'))
rarr = numpy.sqrt(numpy.power(xarr,2) + numpy.power(yarr,2))/(npix/2)
outside = numpy.where(rarr > 1.0)
inside = numpy.where(rarr < cent_obs)
illum[outside] = 0.0
if numpy.any(inside[0]):
illum[inside] = 0.0
if (spider > 0):
start = npix/2 - int(spider)/2
illum[start:start+int(spider),:] = 0.0
illum[:,start:start+int(spider)] = 0.0
return illum
|
6ca52e89c50d8ca57416740378231c859e70d8de
| 696,234
|
def compose_tweet(media_id=None, data={}):
""" writes tweet """
if data['status'] == 'tweeted':
status = '.@{} tweeted @ {} (#{})'.format(data['user'], data['timestamp'], data['tweet_id'])
elif data['status'] in ['retweeted', 'replied to']:
status = '.@{} {} @{} @ {} (#{})'.format(data['user'], data['status'], data['original_user'],
data['timestamp'], data['tweet_id'])
else: # deleted
if 'user' in data:
status = '{} {} tweet #{} @ {}'.format(data['user'], data['status'], data['tweet_id'], data['timestamp'])
else:
status = 'Tweet ID, #{}, was deleted @ {}'.format(data['tweet_id'], data['timestamp'])
status += " #MAGA"
if 'urls' in data and data['urls']:
for url in data['urls']:
if len(status) + min(23, len(url)) + 2 + (23 if media_id else 0) <= 140:
status += '\n{}'.format(url)
if len(status) > 140:
status = status[0:139]
params = {'status': status}
if media_id:
params['media_ids'] = media_id
return params
|
3da320190446637a84b15c5e5b9084c0d08d52b2
| 696,235
|
def s_interp(sAvg,xeAvg,deltaP):
"""
Searches for value to interpolate for s vs 1/H, relationships as described
by Ambwain and Fort Jr., 1979.
sAvg = float - average s value over two halves of droplet
xeAvg = float - average xe value over two halves of droplet
"""
if sAvg >= .9:
hInv = (.30715/sAvg**2.84636) + (-.69116*sAvg**3)-(-1.08315*sAvg**2)+ \
(-.18341*sAvg)-(.20970)
elif sAvg >= .68:
hInv = (.31345/sAvg**2.64267) - (.09155*sAvg**2)+(.14701*sAvg)-(.05877)
elif sAvg >= .59:
hInv = (.31522/sAvg**2.62435) - (.11714*sAvg**2)+(.15756*sAvg)-(.05285)
elif sAvg >= .46:
hInv = (.31968/sAvg**2.59725) - (.46898*sAvg**2)+(.50059*sAvg)-(.13261);
elif sAvg >= .401:
hInv = (.32720/sAvg**2.56651) - (.97553*sAvg**2)+(.84059*sAvg)-(.18069);
else:
print('Shape is too spherical');
#Use formula for S > 0.401 even though it is wrong
hInv = (.32720/sAvg**2.56651) - (.97553*sAvg**2)+(.84059*sAvg)-(.18069);
surfTenGuess = deltaP*9.81*(2*xeAvg)**2*hInv;
return surfTenGuess
|
4bc8424c026ab03d4e6ed046966c16275450743a
| 696,236
|
import math
def count_taxes(common, precious, mineral, nether):
"""
Conta quanti IC devono essere pagati per quelle api
"""
total = 0
common_category = math.ceil(common/15)
precious_category = math.ceil(precious/15)
mineral_category = math.ceil(mineral/15)
nether_category = math.ceil(nether/15)
#print(common, precious, mineral, nether)
#print(common_category, precious_category, mineral_category, nether_category)
if common_category > 1:
total += common * 2500
if precious_category == 1:
total += precious * 2500
elif precious_category == 2:
total += precious * 5000
elif precious_category == 3:
total += precious * 10000
elif precious_category == 4:
total += precious * 20000
elif precious_category == 5:
total += precious * 35000
elif precious_category == 6:
total += precious * 42000
elif precious_category == 7:
total += precious * 62000
elif precious_category > 8:
total += precious * 85000
if mineral_category == 1:
total += mineral * 2500
elif mineral_category == 2:
total += mineral * 5000
elif mineral_category == 3:
total += mineral * 10000
elif mineral_category == 4:
total += mineral * 20000
elif mineral_category == 5:
total += mineral * 35000
elif mineral_category == 6:
total += mineral * 42000
elif mineral_category == 7:
total += mineral * 62000
elif mineral_category > 8:
total += mineral * 85000
if nether_category == 2:
total += nether * 500
elif nether_category == 3:
total += nether * 1000
elif nether_category == 4:
total += nether * 5000
elif nether_category == 5:
total += nether * 35000
elif nether_category == 6:
total += nether * 42000
elif nether_category > 7:
total += nether * 85000
return total
|
b310cee73ae978c2698335a59bd8548012ab0da3
| 696,237
|
import random
def uniqueof20(k, rep=10000):
"""Sample k times out of alphabet, how many different?"""
alphabet = 'ACDEFGHIKLMNPQRSTVWY'
reps = [len(set(random.choice(alphabet)
for i in range(k)))
for j in range(rep)]
return sum(reps) / len(reps)
|
349f1bd964419585df46e13d3cde64d8f5a42c86
| 696,238
|
import itertools
import glob
import os
def find_strings_files():
"""Return the paths of the strings source files."""
return itertools.chain(
glob.iglob("strings*.json"), glob.iglob(f"*{os.sep}strings*.json")
)
|
24324d092d773ceafc7b5f5c3e73edb114c00f58
| 696,239
|
def policy_name_as_regex(policy_name):
"""Get the correct policy name as a regex
(e.g. OOF_HAS_vCPE.cloudAttributePolicy ends up in policy as OOF_HAS_vCPE.Config_MS_cloudAttributePolicy.1.xml
So, for now, we query it as OOF_HAS_vCPE..*aicAttributePolicy.*)
:param policy_name: Example: OOF_HAS_vCPE.aicAttributePolicy
:return: regexp for policy: Example: OOF_HAS_vCPE..*aicAttributePolicy.*
"""
p = policy_name.partition('.')
return p[0] + p[1] + ".*" + p[2] + ".*"
|
5b60a6f35a30af5f3514a43c24c7bee25505adfb
| 696,240
|
import struct
def int2hexstr(num, intsize=4):
"""
Convert a number to hexified string
"""
if intsize == 8:
if num < 0:
result = struct.pack("<q", num)
else:
result = struct.pack("<Q", num)
else:
if num < 0:
result = struct.pack("<l", num)
else:
result = struct.pack("<L", num)
return result
|
bafdd3960a42a48b6b1ce689fb4b9455643388f9
| 696,241
|
def get_primary_key_params(obj):
"""
generate a dict from a mapped object suitable for formatting a primary key logline
"""
params = {}
for key in obj.__table__.primary_key.columns.keys():
params[key] = getattr(obj, key)
return params
|
aad247b31925389bca21ef35fc6416c286587eee
| 696,243
|
def mean(numbers):
"""Return the arithmetic mean of a list of numbers"""
return float(sum(numbers)) / float(len(numbers))
|
26601c23b8b6af48895a43f6e596e25eb626e7d6
| 696,245
|
def PySelectCoins_MultiInput_SingleValue( \
unspentTxOutInfo, targetOutVal, minFee=0):
"""
This method should usually be called with a small number added to target val
so that a tx can be constructed that has room for user to add some extra fee
if necessary.
However, we must also try calling it with the exact value, in case the user
is trying to spend exactly their remaining balance.
"""
target = targetOutVal + minFee
outList = []
sumVal = 0
for utxo in unspentTxOutInfo:
sumVal += utxo.getValue()
outList.append(utxo)
if sumVal>=target:
break
return outList
|
c882a720b56c988ecd9d595b4778451cab8e298b
| 696,246
|
def annual_Norm(dataframe):
"""This is a function that calculateing the annual production of the panel
Args:
dataframe(string):The name of the dataframe you want to adjust
Returns:
annual_values(list): the sum of the past 12 months energy production
month(list):the month corresponding to the annual_values
"""
# create three list
lenth_list = list(range(12,len(dataframe.index)))
annual_values = []
month = []
for i in range(len(lenth_list)):
single_values = dataframe['Energy'][lenth_list[i]-12:lenth_list[i]].sum()/dataframe['DC Capacity'][0]
#rolling_average.append(each_period)
single_month = dataframe['Date'][lenth_list[i]]
# append the result we want to the list
annual_values.append(single_values)
month.append(single_month)
month.pop(0) # Here we drop the first month data
annual_values.pop(0)
return(annual_values,month)
|
13c811f49f86c2514052defad39b96ad6689df47
| 696,247
|
def margined(arr, prop):
"""Returns (min(arr) - epsilon, max(arr) - epsilon), where
epsilon = (max(arr) - min(arr)) * prop. This gives the range of
values within arr along with some margin on the ends.
ARR: a NumPy array
PROP: a float"""
worst = arr.min()
best = arr.max()
margin = (best - worst) * prop
return (worst - margin, best + margin)
|
65ea4e99453ae300b08094f1891a31cca3d302bd
| 696,248
|
import requests
def framework_check(pypi_url: str) -> str:
"""
7. Are the tests running with the latest Integration version?
"""
response = requests.get(pypi_url).json()
classifiers = response.get("info").get("classifiers")
frameworks = [s.replace("Framework Django", "Framework").replace(" ::", "") for s in classifiers if "Framework" in s]
if frameworks:
framework = [s for s in classifiers if "Framework" in s][-1].replace(" :: ", " ")
message = f"[green]The project supports the following framework as it's latest[bold] {framework}"
else:
message = "[green]This project has no associated frameworks"
return message
|
dd268cf19eedd885c4723cb635d9a55540524e0b
| 696,249
|
def faceAreaE3(face):
"""Computes the area of a triangular DCEL face with vertex coordinates given
as PointE3 objects.
Args:
face: A triangular face of a DCEL with vertex .data given by PointE3 objects.
Returns:
The area of the triangular face.
"""
p0, p1, p2 = [v.data for v in face.vertices()]
v0 = p1 - p0
v1 = p2 - p0
return v0.cross(v1).norm() * 0.5
|
412031297213a702f0579f8c86c23c93baaff8c8
| 696,250
|
import asyncio
def get_active_loop() -> asyncio.AbstractEventLoop:
"""returns the current active asyncio loop or creates
a new one.
Returns:
asyncio.AbstractEventLoop
"""
loop = asyncio.events._get_running_loop()
loop = asyncio.new_event_loop() if loop is None else loop
return loop
|
42c15961326d2a5a372237a8455fbe4f292dff80
| 696,251
|
def gen_datavalue_list(soup):
"""Create a list of all the datavalues on the BLS EAG webpage."""
datavalue_list_soup = soup.findAll('span', 'datavalue')
datavalue_list = []
for datavalue in datavalue_list_soup:
datavalue_list.append(datavalue.text)
return datavalue_list
|
8901511f0f65945c200b2ccc77d4cb011da41453
| 696,252
|
def bet_size_sigmoid(w_param, price_div):
"""
Part of SNIPPET 10.4
Calculates the bet size from the price divergence and a regulating coefficient.
Based on a sigmoid function for a bet size algorithm.
:param w_param: (float) Coefficient regulating the width of the bet size function.
:param price_div: (float) Price divergence, forecast price - market price.
:return: (float) The bet size.
"""
return price_div * ((w_param + price_div**2)**(-0.5))
|
cbc6c8d70f6f000e701f140ccbae34b55d7a46df
| 696,253
|
import re
def slugify(text):
"""
Returns a slug of given text, normalizing unicode data for file-safe
strings. Used for deciding where to write images to disk.
Parameters
----------
text : string
The string to slugify
Returns
-------
slug : string
A normalized slug representation of the text
.. seealso:: https://bit.ly/2NW7s1j
"""
slug = re.sub(r"[^\w]+", " ", text)
slug = "-".join(slug.lower().strip().split())
return slug
|
8ac550ed32627a6c8a145b9442960e064ebd44e2
| 696,254
|
def flight_time_movies_1_brute_force(movie_lengths, flight_length):
"""
Solution: Brute force iterative solution compares each movie length with
all subsequent movie lengths.
Complexity:
Time: O(n^2)
Space: O(1)
"""
if len(movie_lengths) < 2:
raise ValueError('movie length list must be at least 2 items long')
# For each movie length
for index, movie_length_first in enumerate(movie_lengths):
movie_lengths_sub = movie_lengths[0:index] + movie_lengths[
index + 1:len(
movie_lengths)]
# Check all other movie lengths (skipping over the first movie length)
for movie_length_second in movie_lengths_sub:
if movie_length_first + movie_length_second == flight_length:
return True
return False
|
612825507cf1aea086bcfa37b702e6a778d85b7c
| 696,256
|
from datetime import datetime
def datetime_type(string):
""" Validates UTC datetime. Examples of accepted forms:
2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """
accepted_date_formats = ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%MZ', '%Y-%m-%dT%HZ', '%Y-%m-%d']
for form in accepted_date_formats:
try:
return datetime.strptime(string, form)
except ValueError:
continue
raise ValueError("Input '{}' not valid. Valid example: 2017-02-11T23:59:59Z".format(string))
|
36acb1d83b38310c463b2376fa2284fb6e9ad73e
| 696,257
|
import re
def _remove_line_end_ellipsis_or_pass_keyword(line: str) -> str:
"""
Remove ellipsis or pass keyword from end of line
(e.g., `def sample_func(): ...` or `def sample_func(): pass`).
Parameters
----------
line : str
Target line string.
Returns
-------
result_line : str
Line string that removed ellipsis or pass keyword string.
"""
if line.endswith(' ...'):
line = re.sub(pattern=r' ...$', repl='', string=line)
return line
if line.endswith(' pass'):
line = re.sub(pattern=r' pass$', repl='', string=line)
return line
return line
|
5ff12264670184737b2d9cc69f9fb2d8eca66cd9
| 696,258
|
import re
def parse_p0f_entry(entry) :
""" converts p0f log entry to dict object and returns """
# date data
r = re.compile(r'\[(\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2})\] (.+)')
match = r.match(entry)
# valid date format as extra validity check
if match is None :
return
datetime = match.group(1)
vpairs = match.group(2)
# place key / value pairs into a dict
vpairs_s = vpairs.split('|')
kv = {}
for vpair in vpairs_s :
vpair_s = vpair.split('=')
kv[vpair_s[0]] = vpair_s[1]
# sperate client / sever ip's and ports
kv['cli_ip'], kv['cli_port'] = kv['cli'].split('/')
kv['srv_ip'], kv['srv_port'] = kv['srv'].split('/')
del kv['cli']
del kv['srv']
return kv
|
7f16d7bf7a42290ec76aa427fab678cdf02bd759
| 696,259
|
def check_dup(items_, items2_):
"""
check for duplicates in lists
"""
result = {}
for item in items_:
is_dup = False
for item2 in items2_:
if item.lower() == item2.lower():
is_dup = True
if not is_dup:
result[item] = item
return list(result.keys())
|
34c8100a6e090404fee8615a9dd0e4c52f6c69a0
| 696,260
|
def get_count(input_str: str) -> int:
"""
Função responsável por fazer a contagem de vogais em uma palavra
"""
num_vowels = 0
lista = [i for i in input_str if i in 'aeiou']
num_vowels = len(lista)
return num_vowels
|
ae175e1dc24f655685d62d9ab955f8252427d14d
| 696,261
|
def i_to_r(i, L, dx):
"""Return coordinates of lattice indices in continuous space.
Parameters
----------
i: integer array, shape (a1, a2, ..., d)
Integer indices, with last axis indexing the dimension.
It's assumed that all components of the vector
lie within plus or minus (L / dx) / 2.
L: float
Length of the lattice, assumed to be centred on the origin.
dx: float
Spatial lattice spacing.
This means the number of lattice points is (L / dx).
Returns
-------
r: float array, shape of i
Coordinate vectors of the lattice points specified by the indices.
"""
return -L / 2.0 + (i + 0.5) * dx
|
3c9f6ecc87a5220d487b434873f63c04f9933720
| 696,262
|
def _process_for_token(request):
"""
Checks for tokens in formdata without prior knowledge of request method
For now, returns whether the userid and token formdata variables exist, and
the formdata variables in a hash. Perhaps an object is warranted?
"""
# retrieve the formdata variables
if request.method == 'GET':
formdata_vars = request.GET
else:
formdata_vars = request.form
formdata = {
'vars': formdata_vars,
'has_userid_and_token':
'userid' in formdata_vars and 'token' in formdata_vars}
return formdata
|
14088de395c977ce6a59da3384fa8d49b213e791
| 696,263
|
def evaluate_score(rate, level):
"""
基金评分标准
基金和基准差值在[-1,1] 之间评分为 0,和基准表现相似,平庸
基金和基准差值在(1,5) 之间评分为 1,基本跑赢标准,及格
基金和基准差值在(5,10) 之间评分为 2,基本跑赢标准,优良
基金和基准差值在(10,20)之间评分为 3,基本跑赢标准,
基金和基准差值在(20 +) 评分为 4,基本跑赢标准,优秀
:param rate: 基金表现
:param level: 基准表现
:return: 返回评分
"""
diff = rate - level
result = 0
if diff >= -1 and diff <= 1:
result = 0
if abs(diff) > 1 and abs(diff) <= 5:
result = 2
if abs(diff) > 5 and abs(diff) <= 10:
result = 3
if abs(diff) > 10 and abs(diff) <= 20:
result = 4
if abs(diff) >= 20 and abs(diff) <= 40:
result = 5
if abs(diff) >= 40:
result = 6
if diff > 0:
return result
else:
return 0 - result
|
e3607f7a51f6fc4b68938257b6e793f281939b02
| 696,264
|
def make_response(error, message=None, image_base64=None):
"""
Generates the ObjectCut JSON response.
:param error: True if the response has to be flagged as error, False otherwise.
:param message: Message to return if it is a error response.
:param image_base64: Image result encoded in base64 if it is a success response.
:return: ObjectCut JSON response.
"""
response = dict(error=error)
if error:
response['message'] = message
else:
response['response'] = dict(image_base64=image_base64)
return response
|
3be80141811fa493441a1ab964b4b6014a183dd1
| 696,265
|
def _cut(match):
"""
Cut matched characters from the searched string.
Join the remaining pieces with a space.
"""
string, start, end = match.string, match.start(), match.end()
if start == 0:
return string[end:]
if end == len(string):
return string[:start]
return ' '.join((string[:start], string[end:]))
|
5a9b1ac7a4030b972d14b04c81acb343066b3f2b
| 696,266
|
def parse_metadata_words(language='english', quality='low'):
"""
Identifies words corresponding to different metadata in the language
Parameters:
-----------------------------------
language : str
Name of the language whose testing data to fetch
quality : str
size of the dataset to consider
Returns:
-----------------------------------
metadata_words : dict
A dictionary with all the words grouped by metadata
"""
metadata_words = {}
filepath = "psynlp/data/{}-train-{}".format(language, quality)
file = open(filepath, 'r')
for line in file.readlines():
source, dest, metadata = line.split("\t")
if "*" not in source and "*" not in dest:
metadata = metadata.strip()
if metadata in metadata_words:
metadata_words[metadata].append((source, dest))
else:
metadata_words[metadata] = []
return metadata_words
|
7b8430c3e9c553167e5d710ef5e1d0ddaaeded00
| 696,267
|
def make_set(labels):
"""
Create a new equivalence class and return its class label
"""
labels[0] += 1
labels[labels[0]] = labels[0]
return labels[0].astype(int)
|
68ce42228c59259f341ce3b16fd1a9811a486c6b
| 696,268
|
import torch
def bitmap_precision_recall(output, target, threshold=0.5):
""" Computes the precision recall over execution bitmap given a interpretation threshold """
with torch.no_grad():
target_one = (target == 1)
# target_one_total = torch.sum(target_one).item()
output_pred_binary = (output > threshold)
true_positive = (output_pred_binary & target_one)
false_negative = (torch.logical_not(output_pred_binary) & target_one)
false_positive = (output_pred_binary & torch.logical_not(target_one))
# true_negative = ( torch.logical_not(output_pred_binary) & torch.logical_not(target_one))
tp = true_positive.float().sum().item()
fn = false_negative.float().sum().item()
fp = false_positive.float().sum().item()
# tn = true_negative.float().sum().item()
precision_div = (tp + fp)
precision = tp / precision_div * 100. if precision_div != 0 else 0.
# prevent div 0
precision_div = 1e-6 if precision_div == 0 else precision_div
recall_div = (tp + fn)
recall = tp / recall_div * 100. if recall_div != 0 else 0.
# prevent div 0
recall_div = 1e-6 if recall_div == 0 else recall_div
return precision, precision_div, recall, recall_div
|
cee0d3381bcb45c450832923dba274f7cb39164d
| 696,269
|
def device(request):
"""
This fixture will be called once to return the existing Device instance
that was setup in the session start hook function.
"""
return request.config._nrfu.device
|
6b7da9f5d3b62e21eff3509e539ba60713dc629b
| 696,270
|
from typing import List
def split_list(lst: List[str], wanted_parts: int = 1) -> List[List[str]]:
""" Splits a list into a list of lists of each of size 'wanted_parts'
Args:
lst: List to be split into smaller parts
wanted_parts: Desired size of each smaller list
Returns:
A list of lists of each of size 'wanted_parts'
"""
length = len(lst)
return [lst[i * length // wanted_parts: (i + 1) * length // wanted_parts] for i in range(wanted_parts)]
|
204711aa6b8c14c54673182e6d89addd8ccdc857
| 696,271
|
def merge_new_data(data, key, value):
""" key: cpu.utilization
value: {'avg': 1.3}
"""
item = key.split('.')
key1 = item[0] # cpu
key2 = item[1] # utilization
data1 = data.get(key1, {})
data2 = data1.get(key2, {})
data2.update(value)
data1[key2] = data2
data[key1] = data1
return data
|
83bf41250d486e7de99ce7d78ddabdf3857850cc
| 696,272
|
import os
def get_markdown_file_list(directory: str) -> list:
"""Return the list of Markdown files from
directory. Scan subdirectories too.
"""
file_list = []
for root, dirs, files in os.walk(directory):
for name in files:
if os.path.splitext(name)[1] == '.md':
file_list.append(os.path.join(root, name))
return file_list
|
cbb247228286b73cc8dc9eb6dd0d374030a2f063
| 696,273
|
def gamma_to_tau_hard_threshold(gamma):
"""Converts gamma to tau for hard thresholding
"""
return 0.5 * gamma ** 2
|
4034918244477182cd3d7a14b97710c0731e03e3
| 696,274
|
def supervisor_client(client, supervisor):
"""A client with a supervisor logged in"""
client.force_login(supervisor)
return client
|
2991812b91eb04e7ec58d3ecf0cb8f89bc5f41fe
| 696,275
|
def build_sample_map(flowcell):
"""Build sample map ``dict`` for the given flowcell."""
result = {}
rows = [(lane, lib["name"]) for lib in flowcell["libraries"] for lane in lib["lanes"]]
i = 1
for _, name in sorted(set(rows)):
if name not in result:
result[name] = "S{}".format(i)
i += 1
return result
|
faf43ca65146093462ae26a9c18ebb238e23a7ff
| 696,276
|
def get_reshaped_data(dataloader):
"""Reshape data to fit into DeepHistone Model
:param dataloader: HistoneDataset
:type dataloader: HistoneDataset wraped by torch.utils.data.DataLoader
"""
(x, y, gene_name) = next(iter(dataloader)) # this step is slow since it actually loads all data
x_histone, x_seq = x
# print(x_histone.shape,x_seq.shape,y.shape,len(gene_name),len(set(gene_name)))
n_genes, n_features_histone, n_bins_histone = x_histone.shape
x_histone = x_histone.reshape(n_genes, 1, n_features_histone, n_bins_histone)
_, n_bins_seq, n_features_seq = x_seq.shape
x_seq = x_seq.reshape(n_genes, 1, n_features_seq, n_bins_seq)
y = y.reshape(n_genes, 1, 1)
# print(x_histone.shape,x_seq.shape,y.shape)
return x_histone, x_seq, y, list(gene_name)
|
750c302898e423cf62002a1050e05171eafda26c
| 696,277
|
def get_scan_dir_and_rsfwd(voc, ascending, r_diff, ncompliance):
"""Determine the scan direction and forward bias series resistance of a sweep.
Scan direction is dermined based on the sweep direction and sign of Voc. This may
fail for truly dark I-V sweeps that don't have a Voc.
Parameters
----------
voc : float or int
Open-circuit voltage.
ascending : bool
Flag for ascending sweep voltage.
r_diff : numpy.array
Differential resistance array.
ncompliance : list
Data points not in compliance.
"""
if type(voc) is not str:
if ascending and voc < 0 or (ascending or voc >= 0) and not ascending:
scan_dir = "fwd"
rsfwd = r_diff[ncompliance][r_diff[ncompliance] >= 0][1]
else:
scan_dir = "rev"
rsfwd = r_diff[ncompliance][r_diff[ncompliance] >= 0][-2]
else:
scan_dir = "NA"
rsfwd = "nan"
return scan_dir, rsfwd
|
9310397abd12ee7f3a176e22faea1c1c8d097994
| 696,278
|
def 더하기(a,b):
"""Returns the sum of a and b
>>> 더하기(1,2)
3
"""
return 0
|
d528db54471b81050ccefe303ef2847dbc449618
| 696,279
|
def indent(
text, # Text to indent
char=' ', # Character to use in indenting
indent=2 # Repeats of char
):
"""
Indent single- or multi-lined text.
"""
prefix = char * indent
return "\n".join([prefix + s for s in text.split("\n")])
|
f170745f99a2bb151e79c2f468cf23880d60b3e5
| 696,281
|
def first_element_or_none(element_list):
"""
Return the first element or None from an lxml selector result.
:param element_list: lxml selector result
:return:
"""
if element_list:
return element_list[0]
return
|
df9c3437f38a50db96f0f4f946ede41916e5e2cf
| 696,282
|
def is_public(data, action):
"""Check if the record is fully public.
In practice this means that the record doesn't have the ``access`` key or
the action is not inside access or is empty.
"""
return "_access" not in data or not data.get("_access", {}).get(action)
|
2c5c80f8e16014f08df2cc34696cea8249e633b1
| 696,283
|
def __slice_scov__(cov, dep, given):
"""
Slices a covariance matrix keeping only the covariances between the variables
indicated by the array of indices of the independent variables.
:param cov: Covariance matrix.
:param dep: Index of dependent variable.
:param given: Array of indices of independent variables.
:return: A |given| x |given| matrix of covariance.
"""
row_selector = [x for x in range(cov.shape[0]) if x in given and x != dep]
col_selector = [x for x in range(cov.shape[1]) if x in given and x != dep]
v = cov[row_selector, :]
v = v[:, col_selector]
return v
|
a5742c8dc4db245521477b0bf8c6ad8f3b463a2b
| 696,286
|
def forward_pass(output_node, sorted_nodes):
"""
Performs a forward pass through a list of sorted nodes
Arguments:
'output_node'
'sorted_nodes'
Returns the output node's value
"""
for n in sorted_nodes:
n.forward()
return output_node.value
|
7783878c99db24165141c61106ec4435f02e87a9
| 696,287
|
def _val(var, is_percent=False):
"""
Tries to determine the appropriate value of a particular variable that is
passed in. If the value is supposed to be a percentage, a whole integer
will be sought after and then turned into a floating point number between
0 and 1. If the value is supposed to be an integer, the variable is cast
into an integer.
"""
try:
if is_percent:
var = float(int(var.strip('%')) / 100.0)
else:
var = int(var)
except ValueError:
raise ValueError('invalid watermark parameter: ' + var)
return var
|
b1525cd11d1fd385720f223f34cc5b5580410247
| 696,288
|
import re
def check_project_name(name: str):
"""project name example: flask_demo
:param name:
:return:
"""
match = re.match('^[a-zA-z]\w+', name)
if not match:
return False
if name != match.group():
return False
return True
|
43cc39d4338c55c45dff27f120535ba855c14785
| 696,289
|
def blend_html_colour_to_white(html_colour, alpha):
"""
:param html_colour: Colour string like FF552B or #334455
:param alpha: Alpha value
:return: Html colour alpha blended onto white
"""
html_colour = html_colour.upper()
has_hash = False
if html_colour[0] == '#':
has_hash = True
html_colour = html_colour[1:]
r_str = html_colour[0:2]
g_str = html_colour[2:4]
b_str = html_colour[4:6]
r = int(r_str, 16)
g = int(g_str, 16)
b = int(b_str, 16)
r = int(alpha * r + (1 - alpha) * 255)
g = int(alpha * g + (1 - alpha) * 255)
b = int(alpha * b + (1 - alpha) * 255)
out = '{:02X}{:02X}{:02X}'.format(r, g, b)
if has_hash:
out = '#' + out
return out
|
a65efec4ca165614b93d1e0654907ad14d087211
| 696,290
|
import random
import string
def rnd_string(n=10):
"""Generate a random string."""
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(n))
|
1869afa5e950c24d75a446c54b9f53abd942c007
| 696,291
|
def generate_data_for_plot_distribution(x_data, y_data):
"""
输出每个类别对应的数据的列表的字典
:param x_data: 类别数据列表
:param y_data: 数值型数据列表
:return: {类别:[数据列表]}的字典
"""
digit_data_dict = {}
for key, value in zip(x_data, y_data):
if key in digit_data_dict:
digit_data_dict[key].append(value)
else:
digit_data_dict[key] = [value]
return digit_data_dict
|
b9e6851be47378ddcc2f5009fe20274080ef18f1
| 696,292
|
import subprocess
import os
import random
import string
def make_in_filename(cesar_in):
"""Make filename for the CESAR input."""
if cesar_in: # is defined by user
# cesar in - file addr, False --> to mark it is not temp
return cesar_in, False
# is not defined --> create a folder in dev shm and make random string
whoami = subprocess.check_output("whoami", shell=True).decode("utf-8")[:-1]
try: # on MacOS one cannot access /dev/shm
temp_dir = f"/dev/shm/{whoami}"
os.mkdir(temp_dir) if not os.path.isdir(temp_dir) else None
except FileNotFoundError: # /dev/shm not found
temp_dir = "/tmp/{0}".format(whoami)
os.mkdir(temp_dir) if not os.path.isdir(temp_dir) else None
filename = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) + ".fa"
cesar_in_path = os.path.join(temp_dir, filename)
# mark that it is a temp file with True
return cesar_in_path, True
|
cbe138787fbfb9f488c96c19361c3bcec7231bf1
| 696,293
|
from dill import pickles, copy
import sys
import sys
def errors(obj, depth=0, exact=False, safe=False):
"""get errors for objects that fail to pickle"""
if not depth:
try:
pik = copy(obj)
if exact:
assert pik == obj, \
"Unpickling produces %s instead of %s" % (pik,obj)
assert type(pik) == type(obj), \
"Unpickling produces %s instead of %s" % (type(pik),type(obj))
return None
except Exception:
return sys.exc_info()[1]
_dict = {}
for attr in dir(obj):
try:
_attr = getattr(obj,attr)
except Exception:
_dict[attr] = sys.exc_info()[1]
continue
if not pickles(_attr,exact,safe):
_dict[attr] = errors(_attr,depth-1,exact,safe)
return _dict
|
f57ced20947d9f9ed446e2a139a4d07fd2b54c1a
| 696,294
|
def replace_digraphs(word_):
"""Return the given word processed for orthographic changes.
Parameters
----------
word_ : str
Returns
-------
str
"""
word_ = word_.lower()
word_ = word_.replace('ch', 'S')
word_ = word_.replace('lh', 'L')
word_ = word_.replace('nh', 'N')
word_ = word_.replace('ss', 's')
word_ = word_.replace('rr', 'R')
return word_
|
991278e31d9beecd8d249af7d12f4e01ec87911a
| 696,295
|
import itertools
def chain_generators(*sprite_generators):
"""Chain generators by concatenating output sprite sequences.
Essentially an 'AND' operation over sprite generators. This is useful when
one wants to control the number of samples from the modes of a multimodal
sprite distribution.
Note that factor_distributions.Mixture provides weighted mixture
distributions, so chain_generators() is typically only used when one wants
to forces the different modes to each have a non-zero number of sprites.
Args:
*sprite_generators: Callable sprite generators.
Returns:
_generate: Callable returning a list of sprites.
"""
def _generate(*args, **kwargs):
return list(itertools.chain(*[generator(*args, **kwargs)
for generator in sprite_generators]))
return _generate
|
a6f57b44957807429a1cb8f28e62a571a2cdeb0d
| 696,296
|
def checkSlashes(item='', sl_char='/'):
"""\
This function will make sure that a URI begins with a slash and does not end
with a slash.
item - the uri to be checked
sl_char - the character to be considered a 'slash' for the purposes of this
function
"""
if not item.startswith(sl_char):
item = sl_char + item
return item.rstrip('/')
|
1ebca2ea2e43d1b795350ee3df893e42218c475f
| 696,297
|
from typing import Any
from typing import Iterable
from typing import Optional
def find_in_list(element: Any, container: Iterable[Any]) -> Optional[Any]:
"""
If the element is in the container, return it.
Otherwise, return None
:param element: to find
:param container: container with element
:return: element or None
"""
return next(iter([elem for elem in container if elem == element]), None)
|
4fd775e93472f466b90eb0c2ee4fda6aa6ead69e
| 696,298
|
import collections
def _get_tasks_by_domain(tasks):
"""Returns a dict mapping from task name to a tuple of domain names."""
result = collections.defaultdict(list)
for domain_name, task_name in tasks:
result[domain_name].append(task_name)
return {k: tuple(v) for k, v in result.items()}
|
53483e8ddf28490b6c00a4d456b77b48de1aeb7d
| 696,299
|
def splitFrom(df, attr, val):
"""
Split DataFrame in two subset based on year attribute
:param df: DataFrame to split
:param attr: attribute on which split data
:param val: value of attribute where do split
:return: two subset
"""
if attr not in df.columns:
raise ValueError("******* "+attr+" not in DataFrame *******")
subfd1 = df.loc[df[attr] < val]
subfd2 = df.loc[df[attr] >= val]
return subfd1, subfd2
|
e2c4f0c03d4b15ec2915e22bb905d4202cdf66cb
| 696,300
|
import sys
def check_env(env):
"""
Ensures all key and values are strings on windows.
"""
if sys.platform == "win32":
win_env = {}
for key, value in env.items():
win_env[str(key)] = str(value)
env = win_env
return env
|
37a311fad6f84a1fbb239911d10661794db00041
| 696,301
|
import sys
import os
def argparse(argv):
"""Parse commandline arguments.
Expects input file path
"""
try:
inputfile = str(argv[1])
except:
error = argv[0] + " <inputfile>"
print(error)
sys.exit(2)
if os.path.isfile(inputfile):
print('Input file is', inputfile)
return inputfile
else:
print("Inputfile does not exist")
sys.exit(2)
|
bb27082b32df1abcba938bc031a721a4f9a11916
| 696,302
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.