content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import numpy as np
def level_correction(img):
"""
Do level correction for an input image img in the format of numpy 2d array
returns the result image in numpy 2d array
Parameters
----------
img : numpy.array
An image in 2d numpy.array
Returns
-------
result : numpy.array
Level corrected image in 2d numpy.array
"""
m, n = img.shape
assert m >= 2 and n >= 2
X1, X2 = np.mgrid[:m, :n]
X = np.hstack((np.reshape(X1, (m * n, 1)), np.reshape(X2, (m * n, 1))))
X = np.hstack((np.ones((m * n, 1)), X))
YY = np.reshape(img, (m * n, 1))
theta = np.dot(np.dot(np.linalg.pinv(np.dot(X.transpose(), X)), X.transpose()), YY)
plane = np.reshape(np.dot(X, theta), (m, n))
return img - plane | 0ab944ee3d57e5e15645c8642c611b4bf6aba531 | 40,544 |
import inspect
def steal_signature_from(original_func, *, steal_docstring=True):
# noinspection PyUnresolvedReferences
"""
Makes a decorator that will replace original_func with the decorated argument.
The decorated argument will have the same apparent signature as the initial
function, which is useful for defining decorators, etc.
Example usage::
>>> def foo(a, b, c, d):
... ...
>>> @steal_signature_from(foo)
... def bar(*args, **kwargs):
... print(foo, args, kwargs)
... return foo(*args, **kwargs)
>>> inspect.signature(bar)
(a, b, c, d)
Parameters:
original_func:
The original function to steal from.
steal_docstring:
Defaults to True. Specifies whether the docstring should be appended
to the new function's existing docstring.
See:
:func:`steal_docstring_from`
"""
def decorator(new_func):
"""Decorates the function with the original_func signature."""
# Update the signature
orig_sig = inspect.signature(original_func)
setattr(new_func, "__signature__", orig_sig)
if steal_docstring:
new_doc = getattr(new_func, "__doc__") or ""
new_doc += "\n\n"
new_doc += inspect.getdoc(original_func) or ""
new_doc = new_doc.lstrip()
setattr(new_func, "__doc__", new_doc)
return new_func
return decorator | b3d29181042649d12998e2b81d614c11720e6c9b | 40,546 |
def rename_header(df):
"""Rename as follows
Chr -> chr
Pos -> pos
Chr_Allele -> ref
Alternative_Allele -> alt
Args:
df (pandas.DataFrame)
Returns:
df (pandas.DataFrame)
"""
df.drop("Type", axis=1, inplace=True)
df = df.rename(
columns={
"Chr": "chr",
"Pos": "pos",
"Chr_Allele": "ref",
"Alternative_Allele": "alt",
}
)
return df | 14104fc5a3e42b99afe18ed21bebf7dcdfece4a2 | 40,547 |
def iterate_pagerank(corpus, damping_factor):
"""
Return PageRank values for each page by iteratively updating
PageRank values until convergence.
"""
Page_Rank= dict()
n= len(corpus)
#change in page rank value after each iteration
change = dict()
#initilze the dicionary with 1/n for each page
for page in corpus:
Page_Rank[page]= (1/n)
change[page]= 0
#iteratively calculate pagerank untill convergence
while True:
for page in corpus:
#if page is chosen randomly from corpus
p1= ((1-damping_factor)/n)
#if page is chosen from current page
p2= 0
for link in corpus:
if link == page: #ignore current page
continue
else:
if page in corpus[link]:
p2= p2 + Page_Rank[link]/len(corpus[link])
#page having no links is interpreted as one having one link to every page inc itself
elif len(corpus[link])==0:
p2= p2 + Page_Rank[link]/n
p2= p2* damping_factor
#update change in page rank
change[page] = (p1+ p2) - Page_Rank[page]
#update page's pagerank
Page_Rank[page]= p1 + p2
#check for convergence
counter= 0
for page in change:
if change[page]<(0.001):
counter= counter +1
if counter==n:
break
return Page_Rank | ffddec54eaab95e514c3f8e6fa71d5b2eddfa459 | 40,548 |
def _get_centre(coordinates):
"""
Get centre of a map based on given lat and lon coordinates
"""
centre = (coordinates.max(dim="locs") + coordinates.min(dim="locs")) / 2
return dict(
lat=centre.loc[dict(coordinates="lat")].item(),
lon=centre.loc[dict(coordinates="lon")].item(),
) | f220232ab04138ef28e79946ebec6c5e8749c5c9 | 40,549 |
from typing import Dict
from typing import Any
from typing import Optional
def __get(data: Dict[str, Any], key: str, src: Optional[str] = None) -> Any:
"""
Get a value from a dictionary; if the key does not exist, raise an
exception that identifies the missing key and the configuration section in
which it was expected.
:param data: The dictionary from which to get the value.
:param key: The key name.
:param src: The configuration section associated with the dictionary.
"""
try:
return data[key]
except KeyError:
if src is None:
src = 'Configuration'
if src:
raise ValueError('{}: "{}" is missing'.format(src, key))
else:
raise ValueError('"{}" is missing'.format(key)) | 263f8e13e28b304cdf50546e0df8c7ed5ae8589e | 40,551 |
import re
import yaml
def load_recipe_yaml_str_no_classes(recipe_yaml_str: str) -> str:
"""
:param recipe_yaml_str: YAML string of a SparseML recipe
:return: recipe loaded into YAML with all objects replaced
as a dictionary of their parameters
"""
pattern = re.compile(r"!(?P<class_name>(?!.*\.)[a-zA-Z_][a-zA-Z^._0-9]+)")
classless_yaml_str = pattern.sub(r"OBJECT.\g<class_name>:", recipe_yaml_str)
return yaml.safe_load(classless_yaml_str) | 7b2bff3f55df84fe65da8a67397bb68fb9660ea9 | 40,552 |
def sublist(lst, stopper):
"""Reads a list of strings until a stopper value is found at the beginning of a string"""
gathered = []
for item in lst:
if item.startswith(stopper):
break
gathered.append(item)
return gathered | c3bd48afd91930b1f9fabdf45fa250390407a744 | 40,553 |
def month_diff (d1, d2) :
""" Difference of two month which may be in suceeding years
>>> month_diff (Date ('2018-01-02'), Date ('2018-12-01'))
11
>>> month_diff (Date ('2018-12-01'), Date ('2019-01-01'))
1
>>> month_diff (Date ('2018-12-12'), Date ('2019-12-11'))
12
>>> month_diff (Date ('2018-12-12'), Date ('2019-12-12'))
12
"""
yd = d2.year - d1.year
md = d2.month - d1.month
return md + 12 * yd | 3e00700f6b128ebd7fad8442f53f1108ea46e574 | 40,557 |
def user_is_resource_reviewer(user):
"""
Single test for whether a user is in the Resource Reviewer group
"""
return user.groups.filter(name='Resource Reviewer').exists() | d36e34cd0d02b9df2cf1ed9a265229cc5045a26a | 40,558 |
import csv
import xxhash
def csv_index(key, csvf, delimiter=';'):
"""Index csv file records and return dict with key and hash of record"""
bindex = {}
reader = csv.DictReader(csvf, delimiter=delimiter)
for r in reader:
rh = xxhash.xxh64(str(r)).hexdigest()
bindex[r[key]] = rh
return bindex | d13d03568e1a8e7cea74adc429af936dd9448ef5 | 40,560 |
def collatz_sequence(initial_word, deletion_number = 2, production_rules = {'a': 'bc', 'b': 'a', 'c': 'aaa'}):
""" Computes a collatz sequence, wherein the words are determined (until length < deletion numbers) by:
1) deleting the first m (deletion number) symbols
2) appending production word P(x) calculated using the first symbol x.
2) appending production word P(x) calculated using the first symbol x.
Args:
initial_word (string): The starting word.
deletion_number (int): The positive integer used to determine deletion and halting.
production_rules (dict): Production rules associating symbols with production words.
"""
word = initial_word
sequence = [initial_word]
while len(word) >= deletion_number:
word = word[deletion_number:] + production_rules[word[0]]
sequence.append(word)
return sequence | 8979fcfa918fd2accfe26d9d3661dc1fef080e14 | 40,561 |
import shelve
import os
def shelve_load(file_name: str, *args):
"""Load a set of objects from a shelve.
Parameters
----------
file_name: The name of one of the files from a shelve.
*args: depreciated, does nothing.
"""
res = {}
with shelve.open(os.path.splitext(file_name)[0]) as db:
for k, v in db.items():
res[k] = v
return res | c08efe472fac312c8d01da8100f9ed239e22dc85 | 40,562 |
import difflib
def diff(before, after):
"""Return diff of two files."""
return ''.join(difflib.unified_diff(
before.splitlines(True),
after.splitlines(True),
lineterm='\n')) | a61963a553d7b5237514eeea4ca6660f942ba15e | 40,563 |
def known_letters_by_pos(words: list, letters: list) -> list:
"""Remove words from word_list that don't have letters in the letter position in the letters list and return."""
# if all letters are @ return the word list
if all(letter == "@" for letter in letters):
return words
new_word_list = []
# check if all of the letters in letters are in the word in the correct positions and add to new_word_list
for word in words:
remove = any(letters[i] != "@" and word[i] != letters[i] for i in range(len(word)))
if not remove:
new_word_list.append(word)
return new_word_list | 22ee62ec8c046a9396f20263135eef3511f957ac | 40,569 |
def check_data(data_converter, data):
"""Store class method returns `None` in case the reaktor call returns `void`.
For empty result (<> void), the data converter is still run because the backend
omits empty and null attributes for bandwidth efficiency.
In case of data being an array, a collection of items is assumed and each item
is converted using the provided `data_converter`.
"""
if isinstance(data, list):
return [data_converter(d) for d in data]
if data is not None:
return data_converter(data) | ac44e8858d6981da552e394745972a8525be4d8c | 40,570 |
from datetime import datetime
def genereate_model_name():
"""
Generates filename for trained model, e.g. "model_12-24-2019_17:00.h5"
Returns
-------
str
Model filename
"""
current_time = datetime.now()
date_time = current_time.strftime("%m-%d-%Y_%H:%M")
return "model_" + date_time + ".h5" | 77a34431864cd69841a634bc51bcc50db64f9be5 | 40,571 |
def cols_to_drop(df, columns):
"""Drop selected columns and return DataFrame.
Args:
df: Pandas DataFrame.
columns: List of columns to drop.
Returns:
Original DataFrame without dropped columns.
"""
for col in columns:
df.drop([col], axis=1, inplace=True)
return df | b6a80fd6346473ceaa5f9983853fce25d6408d89 | 40,572 |
import hashlib
def hash (s):
"""
Genera el hash de un string (para cambiarlo facilmente y encapsular).
"""
## Funcion de hash
myhash = hashlib.sha1()
myhash.update(s)
return myhash.hexdigest() | f0c47125a6aa87f70236f030d37b49e153b295c7 | 40,573 |
def listMean(a):
"""
mean function for lists
"""
return sum(a)/float(len(a)) | 639b2823f69bc9acacbf6b984f34acd8fd106a7c | 40,574 |
def flatten(list_of_lists):
"""Flatten a list of list.
This is a utility function that takes a list of lists and
flattens it. For example the list ``[[1, 2, 3], [4, 5]]`` would be
converted into ``[1, 2, 3, 4, 5]``.
:param list_of_lists: List to be flattened
:type list_of_lists: Python List of Lists
:return: The list flattened
:rtype: Python List
"""
return [item for sublist in list_of_lists for item in sublist] | fe3d5e935268c4dfd7646475465480a588b77926 | 40,575 |
import base64
def base64_encode(message: str) -> str:
"""Зашифровывает строку в base64"""
message_bytes = message.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
base64_message = base64_bytes.decode('ascii')
return base64_message | 79cd6f900b95c8eca28d1d6fc3c25f0335529012 | 40,576 |
def split_into_page_and_sent_id(x, separator="_"):
"""
converts evidence id to the official fever scorer format
:param x: evidence id in format pagename_linenum
:return: evidence id in format [pagename, linenum'
"""
p, sid = x.rsplit(separator, 1)
return [p, int(sid.strip("\""))] | 5a4f88ceecef0393ca79600c76f520bcc4f16d7f | 40,577 |
import tempfile
def new_der_tempfile(der_data):
"""this is just a convenience wrapper to create a tempfile and seek(0)"""
tmpfile_der = tempfile.NamedTemporaryFile()
tmpfile_der.write(der_data)
tmpfile_der.seek(0)
return tmpfile_der | 5b3d93eab7c56fea8d4987ff3eb1dad45fc162d4 | 40,578 |
import os
def convertFilePath(path):
""" Auxiliary function, used only in the case the opearating system is windows. Converts Windows style paths to their `cygpath`_ equivalent
:param path: (String) path to be changed
:returns: (String) cygpath-style path
.. _cygpath: https://cygwin.com/cygwin-ug-net/cygpath.html
"""
if(os.name == 'nt'):
#tentative = check_output(['cygpath', path]).decode().strip('\n')
if os.path.isabs(path):
mainPath = check_output(['cygpath', path])
else:
mainPath = check_output(['cygpath', os.path.join(base_dir, path)])
return mainPath.decode().strip('\n')
else:
return path | bd03999f9b3fabee156d50c225700c7668b2bd82 | 40,579 |
def extract_data(results, party):
"""Extract the data we want from the results."""
area = {}
alle_kommuner = [
i for i in results.groupby(['Kommunenummer']).groups.keys()
]
for kommune in alle_kommuner:
kommune_data = results[results['Kommunenummer'] == kommune].groupby(
['Stemmekretsnummer']
)
for name, group in kommune_data:
idx = group['Oppslutning prosentvis'].idxmax()
maxi = results.iloc[idx, :]
if maxi['Partinavn'] == party:
if kommune not in area:
area[kommune] = {}
area[kommune][name] = {
'partinavn': maxi['Partinavn'],
'oppslutning': maxi['Oppslutning prosentvis'],
'krets': maxi['Stemmekretsnavn'],
'kommune_navn': maxi['Kommunenavn']
}
return area | 23d9bebfcf235e6e8ae216819775ad1753f80913 | 40,580 |
def dec_datestamp(datestamp):
"""Given a 5 character datestamp made by makestamp, it returns it as the tuple :
(daynumber, timestamp).
daynumber and timestamp can either be None *or*
daynumber is an integer between 1 and 16777215
timestamp is (HOUR, MINUTES)
The function 'counttodate' in dateutils will turn a daynumber back into a date."""
daynumber = datestamp[:3]
timechars = datestamp[3:]
daynumber = ord(daynumber[0])*65536 + ord(daynumber[1])*256 + ord(daynumber[2])
if daynumber == 0: daynumber = None
if ord(timechars[0]) == 255:
timestamp = None
else:
timestamp = (ord(timechars[0]), ord(timechars[1]))
return daynumber, timestamp | b24ca8c75a84a1d33bb445ffd150fccfbdd474f2 | 40,582 |
def calc_gas_vol_factor(z_value=1.0,temp=193, pressure=500):
"""Calculates the gas formation volume factor Bg from the gas compressibility factor z (0.25 up to 1.1), the reservoir temperature (F),
and the reservoir pressure (psia). The calculated Bg units are rcf/scf"""
temp_rankin = temp + 459.67
bg = 0.0282793*((z_value*temp_rankin)/pressure)
return bg | bd0f1d1e939d7338cb6ab8c2532907eda4dab730 | 40,584 |
def get_autocomplete_location_query(qs, q):
"""Return qs if ``istartswith`` filter exists, else fallback to ``icontains``."""
startswith_qs = qs.filter(name__istartswith=q)
if startswith_qs.exists():
return startswith_qs
return qs.filter(name__icontains=q) | 5f72aee824779af42a51cfee2fbaee907b30e535 | 40,585 |
import math
def number_of_nodes(numberofsimulations, numberofexperiments):
"""(int, int) --> int
This will calculate the number of nodes to use for a given experiment
it takes ~4 hours for a 12 core node to do 5000 exp
"""
numberofthingspernode = 5000.0 # This is an educated guess
totalnumberofthings = numberofsimulations * numberofexperiments
return int(math.ceil(totalnumberofthings / numberofthingspernode)) | 58c5b1458aee49e8ca2d05fd81a3bc63621f0c8b | 40,586 |
import random
import math
def rational_sol(*args):
"""
Returns rational solutions for linear diophantine equation ax + by = c
Parameters
----------
*args : tuple
Expects three arguments and optionally fourth argument.
First three arguments denotes a, b and c respectively in ax + by = c
Optional Parameters
-------------------
fourth argument in *args : int
denotes number of solutions user wants
Return
------
if fourth argument not given : 1-d array
returns one solution in the form of 1-d array [x,y]
fourth argument given : 2-d array
returns 2-d array of solutions
"""
if(len(args)==3):
a = args[0]
b = args[1]
c = args[2]
x = random.randint(1,7) + math.pow(0.2,random.randint(2,4))
if(a == 0 and b == 0):
return []
elif(a == 0 and b != 0):
return [round(random.randint(1,7) + math.pow(0.2,random.randint(2,4)),2),c/b]
elif(a != 0 and b == 0):
return [c/a,round(random.randint(1,7) + math.pow(0.2,random.randint(2,4)),2)]
y = (c - a*x)/b
return [x,y]
elif(len(args)==4):
a = args[0]
b = args[1]
c = args[2]
n = args[3]
sol_arr = []
if(a == 0 and b == 0):
return []
elif(a == 0 and b != 0):
for i in range(n):
sol_arr.append([round(random.randint(1,1000) + math.pow(0.2,random.randint(2,4)),2),c/b])
return sol_arr
elif(a != 0 and b == 0):
for i in range(n):
sol_arr.append([c/a,round(random.randint(1,7) + math.pow(0.2,random.randint(2,4)),2)])
return sol_arr
x = round(random.randint(1,7) + math.pow(0.2,random.randint(2,4)),2)
for i in range(n):
y = (c - a*x)/b
sol_arr.append([x,y])
x += 0.1
x = round(x,2)
return(sol_arr)
elif(len(args)>4 or len(args)<3):
raise NotImplementedError(
"Invalid Number Of Arguments"
) | 3c34cabbe3eb7cff7c5c75d53471454a32d0a243 | 40,587 |
import random
def rand_sign() -> int:
"""Random sign.
Returns:
Randomly generated -1 or 1.
"""
return 1 if random.random() < 0.5 else -1 | 4260d7e1c55055ea1ed6e286d609e7ca309a06da | 40,588 |
def wrap(headr, data):
"""
Input:
headr -- text of html field
data -- text to be wrapped.
Returns a corresponding portion of an html file.
"""
return '<%s>%s</%s>' % (headr, data, headr) | d956bef0e223d930d5f8b66b76312046a0024d66 | 40,589 |
def get_unique_values_in_column(df, col_name):
"""Get unique values in a column.
Args:
df (spark.DataFrame): Dataframe.
col_name (str): Column name.
Returns:
spark.DataFrame: Unique values.
**Examples**
.. code-block:: python
df = spark.createDataFrame([("a", 1), ("a", 2), ("c", 3)], ["letters", "numbers"])
get_unique_values_in_column(df, "letters")
# [Row(letters='c'), Row(letters='a')]
"""
return df.select(col_name).distinct().collect() | 8dd370afe88cd9a1a5acb3e3cfec1bd4c6fe164c | 40,590 |
import requests
def get_citeas_apa_citation(resource):
"""
Returns a dict with a resource and generated CiteAs citation in APA format.
"""
r = requests.get("https://api.citeas.org/product/" + resource)
citation = r.json()["citations"][0]["citation"]
return {resource: citation} | f01516b54e80304b3b603470f97cb8fa8189f574 | 40,591 |
def _get_num_components(num_components, num_samples, num_dimensions):
"""Get number of components (clusters).
"""
if num_components is None:
num_components = min(num_samples, num_dimensions)
return num_components | b3ea90e64245dae0853af005bdb922cbed517b61 | 40,593 |
def _trace_dense(op): # pragma: no cover
"""Trace of a dense operator.
"""
x = 0.0
for i in range(op.shape[0]):
x += op[i, i]
return x | e309d74c5e39834eb3c4d7382172ba0fd71b7130 | 40,594 |
from functools import reduce
def text2Int(text):
"""Convert a text string into an integer"""
return reduce(lambda x, y: (x << 8) + y, map(ord, text)) | f96c893e065eb792100a72e8293bdef90ec12cab | 40,596 |
def get_environments(nodes):
"""Returns an environments set out of chef_environment values found"""
envs = set()
counts = {}
for node in nodes:
env = node.get('chef_environment', 'none')
envs.add(env)
counts.setdefault(env, 0)
counts[env] += 1
return [{'name': env, 'counts': counts[env]} for env in sorted(envs)] | 2a8b490fc59d4cff18593cc68cd89757fe1078d2 | 40,598 |
def isStructure(s):
"""
Checks if the structure constraint only contains "(", ")", and "." and legal fuzzy structure constraint characters.
"""
returnvalue = 1
for a in range(0, len(s)):
if s[a] not in ".()[]{}<>":
if s[a] not in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
returnvalue = 0
return returnvalue | 4f7397349e626072653180f4205c5bccdea86557 | 40,600 |
def Model(inits, simstep, totpop, theta=0, npass=0, bi={}, bp={}, values=(), model=None):
"""
This function implements the SEQIAHR model
- inits = (E,I,S)
- theta = infectious individuals from neighbor sites
:param inits: Initial conditions
:param simstep: Simulation step
:param totpop: Total population on node
:param theta: number of infectious arriving
:param npass: total number of individual arriving
:param bi: state variables (dict)
:param bp: parameters (dict)
:param values: Extra variables passed on nodes.csv
:param model: reference to model instance
:return:
"""
# print(bi)
##### Get state variables' current values
if simstep == 1: # get initial values
E, I, A, H, S = (bi[b'e'], bi[b'i'], bi[b'a'], bi[b'h'], bi[b's'])
else: # get last step value, in the order returned by this function
E, I, A, H, S = inits
##### Defining N, the total population
N = totpop
##### Getting values for the model parameters
beta, alpha, chi, phi, delta, rho, q, p = (
bp[b'beta'], bp[b'alpha'], bp[b'chi'], bp[b'phi'], bp[b'delta'], bp[b'rho'], bp[b'q'], bp[b'p'])
##### Defining a Vacination event (optional)
if bp[b'vaccineNow']:
S -= bp[b'vaccov'] * S
##### Modeling the number of new cases (incidence function)
Lpos = beta * S * (I + A + (1 - rho) * H) # Number of new cases
##### Epidemiological model (SIR)
Epos = E + Lpos - alpha * E
Ipos = I + (1 - p) * alpha * E - (phi + delta) * I
Apos = A + p * alpha * E - delta * A
Hpos = H + phi * I - delta * H
Spos = S - Lpos
Rpos = N - S + E + I + A + H
# Number of infectious individuals commuting.
migInf = Ipos + Apos
return [Epos, Ipos, Apos, Hpos, Spos], Lpos, migInf | 3196395f9cf33645dbe4ee2a16f43c940b914649 | 40,601 |
def append_host(host, pool):
"""Encode pool into host info."""
if not host or not pool:
return host
new_host = "#".join([host, pool])
return new_host | 513aaf79a626f8fc1af6ba6c13898be13221b843 | 40,602 |
import subprocess
def get_realsense():
"""
Gets the devfs path to a Realsense Camera by scraping the output
of the lsusb command
The lsusb command outputs a list of USB devices attached to a computer
in the format:
Bus 002 Device 009: ID 16c0:0483 Intel Corp.
The devfs path to these devices is:
/dev/bus/usb/<busnum>/<devnum>
So for the above device, it would be:
/dev/bus/usb/002/009
This function generates that path.
"""
proc = subprocess.Popen(['lsusb'], stdout=subprocess.PIPE)
out = proc.communicate()[0].decode()
lines = out.split('\n')
for line in lines:
if 'Intel Corp' in line:
parts = line.split()
bus = parts[1]
dev = parts[3][:3]
return '/dev/bus/usb/%s/%s' % (bus, dev) | 9df65b0f7076e102bfd943a74df291f4b804b6f6 | 40,605 |
def tt_atleast_1d(x):
"""
Attempt of theano equiavalent to numpy atleast_1d
"""
if x.broadcastable == ():
return x.dimshuffle("x")
return x | 414a56ebe18d6d106ad538cf2b216d8c3e18fe1d | 40,606 |
def _critical_nemenyi_value(p_value, num_models):
"""Critical values for the Nemenyi test.
Table obtained from: https://gist.github.com/garydoranjr/5016455
"""
values = [# p 0.01 0.05 0.10 Models
[2.576, 1.960, 1.645], # 2
[2.913, 2.344, 2.052], # 3
[3.113, 2.569, 2.291], # 4
[3.255, 2.728, 2.460], # 5
[3.364, 2.850, 2.589], # 6
[3.452, 2.948, 2.693], # 7
[3.526, 3.031, 2.780], # 8
[3.590, 3.102, 2.855], # 9
[3.646, 3.164, 2.920], # 10
[3.696, 3.219, 2.978], # 11
[3.741, 3.268, 3.030], # 12
[3.781, 3.313, 3.077], # 13
[3.818, 3.354, 3.120], # 14
[3.853, 3.391, 3.159], # 15
[3.884, 3.426, 3.196], # 16
[3.914, 3.458, 3.230], # 17
[3.941, 3.489, 3.261], # 18
[3.967, 3.517, 3.291], # 19
[3.992, 3.544, 3.319], # 20
[4.015, 3.569, 3.346], # 21
[4.037, 3.593, 3.371], # 22
[4.057, 3.616, 3.394], # 23
[4.077, 3.637, 3.417], # 24
[4.096, 3.658, 3.439], # 25
[4.114, 3.678, 3.459], # 26
[4.132, 3.696, 3.479], # 27
[4.148, 3.714, 3.498], # 28
[4.164, 3.732, 3.516], # 29
[4.179, 3.749, 3.533], # 30
[4.194, 3.765, 3.550], # 31
[4.208, 3.780, 3.567], # 32
[4.222, 3.795, 3.582], # 33
[4.236, 3.810, 3.597], # 34
[4.249, 3.824, 3.612], # 35
[4.261, 3.837, 3.626], # 36
[4.273, 3.850, 3.640], # 37
[4.285, 3.863, 3.653], # 38
[4.296, 3.876, 3.666], # 39
[4.307, 3.888, 3.679], # 40
[4.318, 3.899, 3.691], # 41
[4.329, 3.911, 3.703], # 42
[4.339, 3.922, 3.714], # 43
[4.349, 3.933, 3.726], # 44
[4.359, 3.943, 3.737], # 45
[4.368, 3.954, 3.747], # 46
[4.378, 3.964, 3.758], # 47
[4.387, 3.973, 3.768], # 48
[4.395, 3.983, 3.778], # 49
[4.404, 3.992, 3.788], # 50
]
if num_models < 2 or num_models > 50:
raise ValueError("num_models must be in [2, 50].")
if p_value == 0.01:
return values[num_models - 2][0]
elif p_value == 0.05:
return values[num_models - 2][1]
elif p_value == 0.10:
return values[num_models - 2][2]
else:
raise ValueError("p_value must be in {0.01, 0.05, 0.10}") | 54b0e64d2d543d6e122a0148c96cc78b1bef1b54 | 40,607 |
def choose_token_getter(getters):
"""Ask the user to choose a token getter amongst those available."""
if len(getters) == 1:
_label, getter = getters[0]
return getter
print("cogite needs a personal access token. There are several ways to get one:")
for idx, (label, _) in enumerate(getters, start=1):
print(f"{idx}. {label}")
choices = range(1, len(getters) + 1)
while 1:
choices_help = ', '.join(str(i) for i in range(1, len(getters)))
choices_help += f' or {len(getters)}'
choice = input(
f"Please choose one of the methods above by typing {choices_help}: "
)
try:
choice = int(choice)
except ValueError:
pass
else:
if choice in choices:
_label, getter = getters[choice - 1]
return getter
print("Wrong choice. Try again.") | 69eba75c4c0bb4e2e9e0464a4d3a4b0a41708b1c | 40,609 |
from typing import Dict
def normalize(dict_: Dict) -> Dict:
"""
Normalize the values of a dict.
Parameters
----------
dict_ : Dict
Returns
-------
argmax : Dict
Example
-------
>>> sorted(normalize({'a': 10, 'b': 70, 'c': 20}).items())
[('a', 0.1), ('b', 0.7), ('c', 0.2)]
"""
sum_ = sum(value for key, value in dict_.items())
dict_ = {key: value / float(sum_) for key, value in dict_.items()}
return dict_ | 8fab8bd8c169345f698d074a8d6f426f880e733e | 40,610 |
from datetime import datetime
def query_dependency_release_dates_from_bigquery(bigquery_client, dep_name, curr_ver_in_beam, latest_ver):
"""
Query release dates of current version and the latest version from BQ tables.
Args:
bigquery_client: a bq client object that bundle configurations for API requests
dep_name: dependency name
curr_ver_in_beam: the current version used in beam
latest_ver: the later version
Return:
A tuple that contains `curr_release_date` and `latest_release_date`.
"""
try:
curr_release_date, is_currently_used_bool = bigquery_client.query_dep_info_by_version(dep_name, curr_ver_in_beam)
latest_release_date, _ = bigquery_client.query_dep_info_by_version(dep_name, latest_ver)
date_today = datetime.today().date()
# sync to the bigquery table on the dependency status of the currently used version.
if not is_currently_used_bool:
currently_used_version_in_db, currently_used_release_date_in_db = bigquery_client.query_currently_used_dep_info_in_db(dep_name)
if currently_used_version_in_db is not None:
bigquery_client.delete_dep_from_table(dep_name, currently_used_version_in_db)
bigquery_client.insert_dep_to_table(dep_name, currently_used_version_in_db, currently_used_release_date_in_db, is_currently_used=False)
if curr_release_date is None:
bigquery_client.insert_dep_to_table(dep_name, curr_ver_in_beam, date_today, is_currently_used=True)
else:
bigquery_client.delete_dep_from_table(dep_name, curr_ver_in_beam)
bigquery_client.insert_dep_to_table(dep_name, curr_ver_in_beam, curr_release_date, is_currently_used=True)
# sync to the bigquery table on the dependency status of the latest version.
if latest_release_date is None:
bigquery_client.insert_dep_to_table(dep_name, latest_ver, date_today, is_currently_used=False)
latest_release_date = date_today
except Exception:
raise
return curr_release_date, latest_release_date | 02e4df02f33f0cf4f4483760315066a60b2ecbb6 | 40,611 |
def _pretty_frame_relation_type(freltyp):
"""
Helper function for pretty-printing a frame relation type.
:param freltyp: The frame relation type to be printed.
:type freltyp: AttrDict
:return: A nicely formated string representation of the frame relation type.
:rtype: str
"""
outstr = "<frame relation type ({0.ID}): {0.superFrameName} -- {0.name} -> {0.subFrameName}>".format(freltyp)
return outstr | 86b59f1313f8785287441d379cc92fe86844ae38 | 40,613 |
def l2s(inputs):
"""Transforms a list into list of strings.
Arguments:
inputs: objects @type list
Returns:
list of str(object)
Raises:
"""
return [str(inp) for inp in inputs] | 5624db90212520be703a50d835a1740891526feb | 40,615 |
def get_rtl_seq(seq):
"""Return the rtl variant of the sequence, if it has one, else the empty
sequence.
"""
# Sequences with ZWJ in them will reflect. Fitzpatrick modifiers
# however do not, so if we reflect we make a pass to swap them back into their
# logical order.
# Used to check for TAG_END 0xe007f as well but Android fontchain_lint
# dislikes the resulting mangling of flags for England, Scotland, Wales.
ZWJ = 0x200d
def is_fitzpatrick(cp):
return 0x1f3fb <= cp <= 0x1f3ff
if ZWJ not in seq:
return ()
rev_seq = list(seq)
rev_seq.reverse()
for i in range(1, len(rev_seq)):
if is_fitzpatrick(rev_seq[i-1]):
tmp = rev_seq[i]
rev_seq[i] = rev_seq[i-1]
rev_seq[i-1] = tmp
return tuple(rev_seq) | e6b939b2850f9f544004b6f7486bec6041d035c3 | 40,617 |
def moyenne(donnees):
"""
Cette fonction calcule la moyenne d'une série de valeurs.
"""
somme = 0
n = 0
for valeur in donnees:
n = n+1
somme = somme + valeur
#print(f"La valeur actuelle de somme est : {somme}")
print()
moyenne = somme / n
#print(f"La moyenne est {moyenne}")
return moyenne | 5848afa7ea9273d2ca6456b223086fe4109248f5 | 40,618 |
def create_unique_name(prefix, names, separator="_"):
""" Creates a name starting with 'prefix' that is not in 'names'. """
i = 1
name = prefix
while name in names:
name = prefix + separator + str(i)
i += 1
return name | 82e8e30135cd94db3a470827bf999a72269f1efb | 40,619 |
def get_sdf_from_index_list(orig_sdf, index_list):
"""generate sdf_lists after clustering"""
cluster_sdf = [orig_sdf[x] for x in index_list]
return cluster_sdf | a564faf0d9671c666b449e9597d18459801a0df4 | 40,621 |
def _pose_equal(pose1, pose2):
""" True if pose1 is a different position or orientation than pose2
:param pose1:
:param pose2:
:return:
"""
p1_pos = pose1.pose.pose.position
p1_orient = pose1.pose.pose.orientation
p2_pos = pose2.pose.pose.position
p2_orient = pose2.pose.pose.orientation
if p1_pos.x != p2_pos.x or p1_pos.y != p2_pos.y or p1_orient.z != p2_orient.z or p1_orient.w != p2_orient.w:
return False
return True | c69897968bc6654fe246c02c5458f2276d445de6 | 40,623 |
def get_parameter_value(fhir_operation, parameter_name):
"""
Find the parameter value provided in the parameters
:param fhir_operation: the fhir operation definition
:param parameter_name: the name of the parameter to get the value of
:return: a string representation of th value
"""
parameter_value = ''
for param in fhir_operation.parameter:
if param.name == parameter_name:
parameter_value = param.binding.valueSetReference.identifier.value
return parameter_value | 0bab2226309123da14045ed99202bd9491b579e0 | 40,624 |
import math
def is_prime(value: int) -> bool:
"""Detect whether a value is prime or not.
Args:
value (int): value.
Returns:
bool: Is the value prime.
"""
if value < 2:
return False
if value == 2:
return True
if value % 2 == 0:
return False
for k in range(3, int(math.sqrt(value)) + 1, 2):
if value % k == 0:
return False
return True | 88906c1c6f7101ef1ad421ba18c246bbd5eea6ca | 40,625 |
import os
def get_root() -> str:
"""Returns the root of the nomulus build tree."""
cur_dir = os.getcwd()
if not os.path.exists(os.path.join(cur_dir, 'buildSrc')) or \
not os.path.exists(os.path.join(cur_dir, 'core')) or \
not os.path.exists(os.path.join(cur_dir, 'gradle.properties')):
raise Exception('You must run this script from the root directory')
return cur_dir | 4d9f47c8f24c29a969765ec6ec1aeaed137609be | 40,626 |
from typing import Any
from typing import List
def _dict_rec_get(d: dict[Any, Any], path: List[Any], default: Any) -> Any: # type: ignore # reason: dict
"""
Get an element of path from dict.
>>> d = {'a': 'a', 'b': {'c': 'bc', 'd': {'e': 'bde'}}}
Simple get:
>>> _dict_rec_get(d, ['a'], None)
'a'
Returns default if key does not exist:
>>> _dict_rec_get(d, ['c'], None) is None
True
>>> _dict_rec_get(d, ['c'], 0)
0
Get recursive:
>>> _dict_rec_get(d, ['b', 'c'], None)
'bc'
>>> _dict_rec_get(d, ['b', 'd'], None)
{'e': 'bde'}
>>> _dict_rec_get(d, ['b', 'd', 'e'], None)
'bde'
>>> _dict_rec_get(d, ['b', 'nopath'], None) is None
True
"""
assert isinstance(path, list)
while len(path) != 0:
p = path[0]
path = path[1:]
if isinstance(d, dict) and (p in d): # type: ignore
d = d[p]
else:
return default
return d | 3c124b12bbe1d933239d73f9a6d4ace2156bc3ed | 40,629 |
def id(obj): # pylint: disable=redefined-builtin,invalid-name
"""Return ``id`` key of dict."""
return obj['__id'] | e3a5ef6af8218dfd7efb5948899db6a76826c73a | 40,630 |
def bin2dec(x):
"""
Convert binary string to decimal number.
For instance: '11' -> 3
"""
return int(x, 2) | c64d4599ffb2c633abed18c3a1bc298f0da7ff2c | 40,632 |
def stripped_string_concat(str1, str2):
"""Concatenates passed strings and truncates spaces in the result.
:param str1: First string
:type str1: str
:param str2: Second string
:type str2: str
:return: A string with truncated spaces
:rtype: str
"""
return f'{str1} {str2}'.strip() | 17e45018e03f68ac5b635c149cc413e41a2cb343 | 40,633 |
from typing import Optional
def _is_version_at_least(version: str, major: int, minor: Optional[int] = None) -> bool:
"""
Check that a given version meets the minimum requirements.
:param version:
Version string in the form "<major>.<minor>[.<more>]"
:param major:
Major version requirement.
:param minor:
Minor version requirement, if any.
:return:
Whether the given version is sufficient.
"""
parts = version.split(".", maxsplit=3)
if int(parts[0]) < major:
return False
if minor and int(parts[1]) < minor:
return False
return True | 568370d751ec814a8d6cd94e0486085dce5f62c5 | 40,636 |
def create_new_tarball_name(platform, program, version):
""" Converts the name of a platform as specified to the prepare_release
framework to an archive name according to BLAST release naming conventions.
Note: the platform names come from the prepare_release script conventions,
more information can be found in http://mini.ncbi.nih.gov/3oo
"""
retval = "ncbi-" + program + "-" + version
if program == "blast":
retval += "+"
if platform.startswith("Win"):
retval += "-x64-win64"
elif platform.startswith("Linux32"):
retval += "-ia32-linux"
elif platform.startswith("Linux64"):
retval += "-x64-linux"
elif platform.startswith("IntelMAC"):
retval += "-x64-macosx"
elif platform == "SunOSSparc":
retval += "-sparc64-solaris"
elif platform == "SunOSx86":
retval += "-x64-solaris"
else:
raise RuntimeError("Unknown platform: " + platform)
return retval | 2734f6f684ac5dbf7797086452fb4243cf60e4da | 40,637 |
import os
def native_path(path):
"""Convert a build path to a native path"""
return path.replace('/', os.sep) | cec96019d20e0fe934bed96f25cc7019e243cca8 | 40,639 |
def get_all_options(args_string):
"""
Read all of the hyperparameter options from the arguments string.
"""
# Get all the list strings first.
char_ix = 0
list_start = -1
list_strings = []
# Read the dictionaries which contain the options.
for char_ix, curr_char in enumerate(args_string):
if curr_char == "{":
list_start = char_ix
if curr_char == "}":
list_strings.append(args_string[list_start : char_ix + 1])
list_start = -1
# Now convert the list strings into options.
all_options = []
for list_ix, list_string in enumerate(list_strings):
# Remove brackets.
list_str_no_brackets = list_string.split("{")[1].split("}")[0]
# Get options.
list_options = list_str_no_brackets.split(", ")
# Add all all_options.
all_options.append(list_options)
# Return all options.
return all_options | dc7c619bfe12b0fb8e04cea6d7e3a0a371b4736b | 40,640 |
import functools
def logger(fn):
"""Decorator to log method calls from classes"""
@functools.wraps(fn)
def func(class_obj, *args, **kwargs):
"""Logs method calls from classes"""
print(f'{class_obj.__class__.__name__}.{fn.__name__} was run with "{args}" args and {kwargs} kwargs')
return fn(class_obj, *args, **kwargs)
return func | b99d4c5bd4db1bed8eaf312b06de51bed53e814f | 40,641 |
import glob
def get_test_files(test_dir):
"""
Function used to get all test .c scripts
:param test_dir: directory to get all .c files
:return test_files: list of test files
"""
test_files = glob.glob(test_dir, recursive=True)
return test_files | 0bdfd7ae2967a10105a46de1ee9ea4ca69574a1c | 40,642 |
import argparse
def set_args():
"""
Validate the train.py argument running from command line
"""
parser = argparse.ArgumentParser()
parser.add_argument('--img_file', required = True,type = str, help = ' <Fulle path image file name >')
parser.add_argument('--cpt', required = True,type = str, help = 'checkpoint full path ')
parser.add_argument('--top_k',default = 5 ,type = int, help = 'Top # of classes probeblity to display ')
parser.add_argument('--gpu',type=bool, default = False,
help = 'Use this argument to start using GPU else don\'t use it.')
parser.add_argument('--class_dict', type = str, default = "cat_to_name.json" ,
help = ' Json Classes names file full path ')
return parser.parse_args() | 20022b0dfa42b546751b8f52a8ed98438b9bac96 | 40,645 |
def average_change(profit_loss):
"""
Function returns average change of the numbers in the list using formula (last_element - first_element)/(number of elements - 1)
"""
profit_loss_copy = profit_loss[:]
average_ch = (profit_loss_copy[-1] - profit_loss_copy[0])/(len(profit_loss_copy)-1)
return round(average_ch, 2) | 58b158a9adc1482224038c8b8093e79fa645ac10 | 40,646 |
def cli(ctx, datasets):
"""Send a delete task on a list of datasets
Output:
List of the datasets
"""
return ctx.gi.dataset.delete(datasets) | 9a3de6b4a1a07e0179f80d8a4418fc809969664b | 40,648 |
def movesurvived(df):
"""
Move the second column to first. Used for train data set
:param df: Dataframe of training data
:return:
"""
# move Survived to front
cols = df.columns.tolist()
cols = [cols[1]] + cols[0:1] + cols[2:]
df = df[cols]
return df | a5bb3f265da0a84796fa24c0fc1201e8a19238ae | 40,649 |
def get_lr(opt):
"""
get the learning rate
"""
for param_group in opt.param_groups:
return param_group['lr'] | dc971ce1892ea9af4e5dd43136d315a05ad9a3e5 | 40,650 |
def century(year):
"""
The first century spans from the year 1 up to and including the year 100, The second - from the year 101 up to
and including the year 200, etc.
:param year: an integer value.
:return: the current century.
"""
return (year - 1) // 100 + 1 | c4db3fa280b2623e73a1547134c89a28745394cb | 40,651 |
import copy
def __rotate__(piece, repeat=0):
"""
internal method for rotating each piece randomly
"""
# rotates a piece
rotated = copy.deepcopy(piece)
for i in range(repeat):
rotated = list(zip(*rotated[::-1]))
return rotated | ce9538eb8b3ace3f1eeb7119c40382dd19fdb835 | 40,652 |
from datetime import datetime
def squash_dates(obj):
"""squash datetime objects into ISO8601 strings"""
if isinstance(obj, dict):
obj = dict(obj) # don't clobber
for k, v in obj.items():
obj[k] = squash_dates(v)
elif isinstance(obj, (list, tuple)):
obj = [squash_dates(o) for o in obj]
elif isinstance(obj, datetime):
obj = obj.isoformat()
return obj | 4799784d15897e260aada2b4c5f18615b438bda5 | 40,653 |
def get_capillary_diameter(line1, line2):
"""
Defines capillary diameter in pixel length.
line1 = int32 - first point on left edge of capillary
line2 = int32 - second point on right edge of capillary
"""
#express first points on opposite side of capillary as x,z coordinates
L1x,L1y = line1
L2x,L2y = line2
#find straight line distance between points
dist = ((L2x-L1x)**2+(L2y-L1y)**2)**0.5
#Assumption: rotation of image is very small such that the scaled straight
#line distance is very close to true diameter of the capillary
return dist | 5b11b6025c17b373d2f014dbb0519397561b0f30 | 40,654 |
def gcd(a: int, b: int) -> int:
"""
Finds the Greatest Common Divisor of two integers.
"""
a, b = abs(a), abs(b)
# Simple cases
if b == 0:
return a
if a == 0:
return b
sort = sorted([a, b])
if sort[1] % sort[0] == 0:
return sort[0]
return gcd(b, a % b) | 81f05eb5411d8e94debdb53a87b739378f1dbeae | 40,655 |
def find_packages_in_file(path):
"""
Parse a text file containing a list of packages and return their list
"""
with open(path, "r") as pkglist:
return pkglist.read().splitlines() | 365e58266b2eb5d0ae35bce972dc4f1e49a10b6f | 40,656 |
def validate_consumer(consumer):
""" validate a consumer agains oauth2.Consumer object """
if not hasattr(consumer, "key"):
raise ValueError("Invalid consumer.")
return consumer | 41b19e9cbdf128817153c9402927864165b47db9 | 40,657 |
def get_npm_license_from_licenses_array(licenses_array):
"""
Extract licenses name from licenses array and join them with AND
"licenses": [{"type":"MIT"}, {"type":"Apache"}]
Arguments:
json_data {json} -- json data to parse license from
version {str} -- version of the package
Returns:
str -- name on the license or Unknown if not found
"""
if not licenses_array:
return None
license_name = None
for license_item in licenses_array:
license_item_type = (
license_item.get("type", None)
if type(license_item) is dict
else f"{license_item}"
)
if license_name is not None:
license_name = f"{license_name} AND {license_item_type}"
else:
license_name = license_item_type
return license_name | 555bf8fb16411400c1eb1670854899641293bfdf | 40,658 |
import os
import re
def add_imt(fname, imt):
"""
>>> add_imt('/path/to/hcurve_23.csv', 'SA(0.1)')
'/path/to/hcurve-SA(0.1)_23.csv'
"""
name = os.path.basename(fname)
newname = re.sub('(_\d+\.)', '-%s\\1' % imt, name)
return os.path.join(os.path.dirname(fname), newname) | b353700f1afa76f088b278ff9d397e743abdf0e1 | 40,659 |
def error(estimated, fitted):
"""
Calculates mean percentage error for fitted values to estimated values.
:param estimated: estimated values
:param fitted: fitted values
:return: percent error
"""
return sum([abs(e / f - 1) for e, f in zip(estimated, fitted)]) / len(estimated) | 476b8791ab80da59d77c18f71dbd1d2f99555ba6 | 40,661 |
import string
import re
def remove_duplicate_punctuation(text: str) -> str:
"""
Remove duplicate punctuation, which may have been a feature of
gazette design.
"""
pattern = f"([{string.punctuation}])" + "{1,}"
pattern = re.compile(pattern)
text = re.sub(pattern, r"\1", text)
return text | b9afb176e75b2e873e2ec68751830002221bdbfc | 40,662 |
def get_image_name(url, char_limit=60):
"""Get the file name of an image url.
Args:
url (str): Image url.
char_limit (int): Maximum number of characters for the name.
Returns:
str: Image name.
Examples:
>>> url = "https://miguelgfierro.com/static/blog/img/hoaphumanoid.png"
>>> get_image_name(url)
'hoaphumanoid.png'
>>> url = "https://miguelgfierro.com/static/blog/img/hoaphumanoid.png?itok=o-EKrRkB"
>>> get_image_name(url)
'hoaphumanoid.png'
>>> url = "https://miguelgfierro.com/static/blog/img/hoaphumanoid"
>>> get_image_name(url)
'hoaphumanoid.jpg'
>>> url = "https://miguelgfierro.com/012345678901234567890123456789.jpg"
>>> get_image_name(url, 20)
'01234567890123456789.jpg'
"""
name = str(url[(url.rfind("/")) + 1 :])
if "?" in name:
name = name[: name.find("?")]
extensions = (".jpg", ".jpeg", ".gif", ".png", ".bmp", ".svg", ".webp", ".ico")
if any(ext in name for ext in extensions):
pos = name.rfind(".")
ext = name[pos:]
name = name[:pos]
else:
ext = ".jpg"
if len(name) >= char_limit:
name = name[:char_limit]
return name + ext | c2a7da7e2332e31b0580751288699ad408e9b49a | 40,663 |
def modify_reference_model_sparsity(cli_sparsity):
"""
Modify the sparsity of the given reference model in-place to match
the value given in the sparsity argument.
"""
def _modify_model(model):
if cli_sparsity == "dense":
model.set_sparse(False)
elif cli_sparsity == "sparse":
model.set_sparse(True)
return _modify_model | 8d8f9baefe1463a9634f1c57ab42578323c00b03 | 40,664 |
from typing import List
from typing import Set
import ast
def get_local_imports(files: List[str]) -> List[str]:
""" Parses a list of files to determine local imports; external dependencies are discarded """
imports: Set[str] = set()
for file in files:
with open(file) as f:
root: ast.Module = ast.parse(f.read(), file)
for node in ast.walk(root):
# ast.Import is only used for external deps
if not isinstance(node, ast.ImportFrom):
continue
# Only consider imports relevant to GE (note that "import great_expectations as ge" is discarded)
if (
isinstance(node.module, str)
and "great_expectations" in node.module
and node.module.count(".") > 0
):
imports.add(node.module)
return [imp for imp in imports] | fe664ddb406a1ddb8f9d2ba34087f1875fb1e545 | 40,665 |
import hashlib
def es2_activity_hash(activity, flow):
"""Generate unique ID for ecoinvent3 dataset.
Despite using a million UUIDs, there is actually no unique ID in an ecospold2 dataset. Datasets are uniquely identified by the combination of activity and flow UUIDs."""
return str(hashlib.md5((activity + flow).encode('utf-8')).hexdigest()) | b30ce38708a7eadcba06e3615779469cfaba5fda | 40,666 |
import string
def _get_placeholders(template):
"""Get all placeholders from a template string.
Parameters
----------
template : str
The template string to get the placeholders for.
Returns
-------
placeholders : list of str
The list of placeholder names that were found in the template string.
Author: Marijn van Vliet <w.m.vanvliet@gmail.com>
"""
return [p[1] for p in string.Formatter().parse(template)
if p[1] is not None and len(p[1]) > 0] | f378486328afebf86f643cf8beaf1f883ffccd9c | 40,668 |
def get_attn_padding_mask(seq_q, seq_k, pad_token=1):
"""Indicate the padding-related part to mask"""
assert seq_q.dim() == 2 and seq_k.dim() == 2
mb_size, len_q = seq_q.size()
mb_size, len_k = seq_k.size()
pad_attn_mask = seq_k.data.eq(pad_token).unsqueeze(1) # bx1xsk
pad_attn_mask = pad_attn_mask.expand(mb_size, len_q, len_k) # bxsqxsk
return pad_attn_mask | 541bc4e5297c6de91fc076d377ad2a0510899cb4 | 40,669 |
def convert_to_generations(g, sample_times):
"""
Takes a deme graph that is not in time units of generations and converts
times to generations, using the time units and generation times given.
"""
if g.time_units == "generations":
return g, sample_times
else:
for ii, sample_time in enumerate(sample_times):
sample_times[ii] = sample_time / g.generation_time
g = g.in_generations()
return g, sample_times | 2352ff4f4ef5b07bad4e83dccc7c32011e8fa776 | 40,670 |
def parse_inet_login(raw_login):
"""
Преобразует Интернет-логин в нормальный вид
:param raw_login: СПД893623347
:return: 77893623347
"""
if raw_login == '':
return ''
return '77' + raw_login[3:] | db78d37623363273c0afba00b5ecefb8e1280922 | 40,671 |
from typing import List
from typing import Any
def get_internal_arg_copier(total_size: int, memory_dest: int) -> List[Any]:
"""
Copy arguments.
For internal functions, MSTORE arguments and callback pointer from the stack.
:param total_size: total size to copy
:param memory_dest: base memory position to copy to
:return: LLL list that copies total_size of memory
"""
copier: List[Any] = ["seq"]
for pos in range(0, total_size, 32):
copier.append(["mstore", memory_dest + pos, "pass"])
return copier | 6b1c0777a136655bb7a28e82f615f4784e18bd74 | 40,673 |
import random
def fill_matrix(weightlist, static_w, sparsity):
"""Create a weight matrix to use in syn dict."""
weights = []
for row in weightlist:
if isinstance(row, (list, tuple)):
rowlen = len(row)
valid_values = int(rowlen * sparsity)
arow = (
([float(static_w), ] * valid_values) +
([0., ] * int(rowlen - valid_values))
)
random.shuffle(arow)
weights.append(arow)
return weights | 24a05b7058c14c834a97e138b6d21ae7c1bd59e1 | 40,674 |
def parser(data):
"""Parse data into a list of sets."""
parsed_data, current, wip = [], [], set()
for line in data:
if not line:
parsed_data.append(current)
current = []
else:
for letter in line:
wip.add(letter)
current.append(wip)
wip = set()
parsed_data.append(current)
return parsed_data | 0cc798f22189d1abec394958aab54e8f4335c1e3 | 40,675 |
def assign_pml_elems(sorted_elems, pml_elems, pml_partID='2'):
"""assign PML elements in the sorted element matrix
Args:
sorted_elems: sorted element matrix
pml_elems: list of tuples of # PML elems on each axis edge
([[xmin, max], [ymin, ymax], ...)
pml_partID: default = 2
Returns:
sorted_elems (to be written to new file)
"""
sorted_elems['pid'][0:pml_elems[0][0], :, :] = pml_partID
sorted_elems['pid'][-1:-pml_elems[0][1] - 1:-1, :, :] = pml_partID
sorted_elems['pid'][:, 0:pml_elems[1][0], :] = pml_partID
sorted_elems['pid'][:, -1:-pml_elems[1][1] - 1:-1, :] = pml_partID
sorted_elems['pid'][:, :, 0:pml_elems[2][0]] = pml_partID
sorted_elems['pid'][:, :, -1:-pml_elems[2][1] - 1:-1] = pml_partID
return sorted_elems | 4dd0a4daeb3e66dc16f151422500a4f3a075fb82 | 40,676 |
from typing import Callable
import inspect
import functools
def wrap_in_coroutine(func: Callable) -> Callable:
"""Decorator to wrap a function into a coroutine function.
If `func` is already a coroutine function it is returned as-is.
Args:
func: A callable object (function or coroutine function)
Returns:
A coroutine function which executes `func`.
"""
if inspect.iscoroutinefunction(func):
return func
@functools.wraps(func)
async def _wrapper(*args, **kwargs):
return func(*args, **kwargs)
return _wrapper | 3134241771749d63ce5213180a34d7c26f8f0c76 | 40,677 |
def compute_rare_event_rate(y):
"""
In CERENKOV3 we init XGBClassifier(base_score=rare_event_rate)
"""
return sum(y) / len(y) | 6652fe1f4c22c655e12cff82e2e9844e5a2499c3 | 40,679 |
def convert_emoji_to_text(x, emoji_wrapper="[EMOJI]"):
""" """
return (
f" {emoji_wrapper} " + " ".join(x.groups()[0].split("_")) + f" {emoji_wrapper} "
) | f0a6b774dbc949ae3bbd263a80220907bd0145ff | 40,680 |
def make_powspec_params():
"""defaults"""
pEEG = dict(lowpass=20, highpass=2, logscale=False, normalize=True, medianfilter=9, stride=5)
pEMG = dict(lowpass=100, highpass=130, logscale=False, normalize=True, medianfilter=9, stride=10)
dd = dict(spectrogram=dict(EEG=pEEG, EMG=pEMG))
return dd | 9b89f581603d506624160e41dc17b1bcf49cab5e | 40,681 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.