content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def pretty_dict(dic):
"""Printable string representation of a dictionary
Items are listed in alphabetical order of their keys,
to ensure a deterministic representation
(e.g. for unit tests)
Examples:
>>> pretty_dict({1: 2, 3: 4})
'{1: 2, 3: 4}'
>>> pretty_dict({'one': 'two', 'three': 'four'})
'{one: two, three: four}'
Recursive dictionaries are supported, too:
>>> pretty_dict({1: 2, 3: {4: 5, 6: 7}})
'{1: 2, 3: {4: 5, 6: 7}}'
"""
items = []
keys = sorted(dic.keys())
for key in keys:
val = dic[key]
if isinstance(val, dict):
val = pretty_dict(val)
item_str = '{0}: {1}'.format(key, val)
items.append(item_str)
return '{' + ', '.join(items) + '}' | 01b972c10ef661e12d61e50364ffe879c321748f | 106,594 |
def _standardize_county_name(zipcounty):
"""
Standardize county name to match with our 'San Francisco' like formatting.
Takes a zipcounty dict and updates 'countyName' key if exists.
"""
if 'countyName' in zipcounty.keys():
countyname = zipcounty['countyName'].lower()
county_list = [word[0].upper() + word[1:] for word in countyname.split()]
zipcounty['countyName'] = ' '.join(county_list)
return zipcounty | 0cebac1253da22fc12c4b42c0601933af1c1fe51 | 106,599 |
import math
def normpdf(x, mean, sd):
"""
Normal Probability Distribution Function
return the probability based on a normal distribution
guassian curve, given the x value, mean, and standard deviation.
"""
#this because i can't get scipy to install
var = float(sd)**2
pi = 3.1415926
denom = (2*pi*var)**.5
num = math.exp(-(float(x)-float(mean))**2/(2*var))
return num/denom | 9dc44eb3fa025436f915211feaeb746c18f6cb86 | 106,601 |
def get_key(dict, key):
"""Trivial helper for the common case where you have a dictionary and want one value"""
return dict.get(key, None) | 1a2c4f0bba9e1176569c86e9deabccfc840a9229 | 106,602 |
def get_cut_limbs(life):
"""Returns list of cut limbs."""
_cut = []
for limb in life['body']:
if life['body'][limb]['cut']:
_cut.append(limb)
return _cut | d5317253784e48eb8b84620afccd06c4cce41d25 | 106,605 |
from typing import Optional
from pathlib import Path
import logging
def get_specific_ids(specific_ids_path: Optional[Path] = None):
"""
If there is a specific_ids_path file, take the ids within and process them
exclusively.
:param specific_ids_path: The path of a text file with id per line
:return: List of read ids from the specific_ids_path file
"""
if specific_ids_path is None or not specific_ids_path.exists():
return
with open(specific_ids_path) as filo:
specific_ids = [l.strip() for l in filo.readlines()]
logging.info(f"We found specific ids. They are: {specific_ids}")
return specific_ids | a471974f6bb5b5f310693431f5d66ca435befe5f | 106,606 |
def test_is_dict(input):
""" Simple test that returns true if item is a dictionary """
return isinstance(input, dict) | 30419557b5a49642fbf592573cbea7a822dea349 | 106,616 |
def col_to_excel(col):
"""
Converts the column number to the excel column name (A, B, ... AA etc)
Parameters
----------
col : INTEGER
The number of the column to convert. Note that 1 converts to A
Returns
-------
excel_col : STRING
The string which describes the name of the column in Excel
Example
-------
print(col_to_excel(9))
"""
excel_col = ""
div = col
while div>0:
(div, mod) = divmod(div-1, 26) # will return (x, 0 .. 25)
excel_col = chr(mod + 65) + excel_col
return excel_col | cdd3740398506941d70421e852735376c61e3011 | 106,617 |
from typing import Set
def chain_id_set_from_matchmaking_claim(claim: dict, level: int) -> Set[str]:
"""Get a set of chain ids from a matchmaking claim of a particular level
Args:
claim: dictionary of claim from matchmaking
level: level to get the set of chains for
Returns:
set of chain ids
"""
return set(claim["validations"][f"l{level}"].keys()) | 26e03add99a90dcec93467876f8bf4e60d5774f8 | 106,622 |
import re
def parse_github_url(url):
"""Parse github url into organization and repo name."""
tokens = re.split('/|:', url.replace('.git', ''))
repo = tokens.pop()
organization = tokens.pop()
return organization, repo | 8b500b6d5230dabc029443ebbca2bd482e48cf37 | 106,624 |
def voigt_reuss(mod1: float, mod2: float, vfrac: float) -> tuple:
"""
Computes the Voigt-Reuss-Hill bounds and average for a 2-component mixture.
The Voigt Bound is obtained by assuming the strain field remains constant
throughout the material when subjected to an arbitrary average stress field.
The Reuss Bound is obtained by assuming the stress field remains constant
throughout the material in an arbitrary average strain field.
:param mod1: elastic modulus of first mineral
:param mod2: elastic modulus of second mineral
:param vfrac: volumetric fraction of first mineral (net-to-gross)
:return: upper bound, lower bound, Voigt-Ruess-Hill average
Reference
Mavko, G, T. Mukerji and J. Dvorkin (2009), The Rock Physics Handbook: Cambridge University Press.
"""
voigt = vfrac * mod1 + (1 - vfrac) * mod2
reuss = 1 / (vfrac / mod1 + (1 - vfrac) / mod2)
vrh = (voigt + reuss) / 2
return voigt, reuss, vrh | 005f19195bcd1d5bd8a40f50a86e8d73de49bbd3 | 106,625 |
def bit(number, index):
"""Return the indexth bit of number."""
return (number >> index) & 1 | a08882b99caa6ebd321c2cf677ffb26eb4aba4c8 | 106,626 |
from typing import Tuple
from typing import Optional
import re
def deprivatize(name: str) -> Tuple[str, Optional[str]]:
"""
De-privatize a member name if possible.
:param name: Name.
:return: Deprivatized name and owner class name (or None).
"""
matches = re.match(r"^_([^_]+)__[^_]+.*?(?<!__)$", name)
if matches:
cls_name = matches.groups()[0]
return name[len(cls_name) + 1 :], cls_name
return name, None | 5fd34ea9523272371d01c037f1798ce5809898cd | 106,630 |
def remove_offset(data, start_idx, end_idx, set_between=False):
"""
Remove offsets in the data.
Parameters
----------
data : `pint.Quantity`
Date to be operated upon.
start_idx : int
Index that marks the start of the offset.
end_idx : int
Index that marks the end of the offset.
set_between : int
Set the data after the start point up to the end point to have the
value of the start point. Default is `False`.
Returns
-------
data : `pint.Quantity`
Data with offset applied.
"""
# Set the data after the offset to the data minus the offset
offset = data[end_idx] - data[start_idx]
# Set the intemediate data (during the offset) to be the first
# value if so desired.
if set_between:
data[start_idx: end_idx] = data[start_idx]
data[end_idx:] = data[end_idx:] - offset
return data | 84c4aaff673ec0c4a8a71b28118870a55a2d3c0c | 106,632 |
def get_OP_matrix(self):
"""Get the Operating Point Matrix
Parameters
----------
self : LUT
a LUT object
Returns
----------
OP_matrix : ndarray
Operating Point Matrix
"""
if self.simu is None:
return None
var_load = self.simu.get_var_load()
if var_load is None:
return None
return var_load.OP_matrix | 40475ffc30629698fc6fa354f6837660c43d333e | 106,633 |
def gap_count(ts,state="gap",dtype=int):
""" Count missing data
Identifies gaps (runs of missing or non-missing data) and quantifies the
length of the gap in terms of number of samples, which works better for
regular series. Each time point receives the length of the run.
Parameters
----------
ts : :class:`DataFrame <pandas:pandas.DataFrame>`
Time series to analyze
state : `str` one of 'gap'|'good'|'both'
State to count. If state is gap, block size of missing data are counted
and reported for time points in the gap (every point in a given gap will
receive the same value). Non missing data will have a size of zero.
Setting state to 'good' inverts this -- missing blocks are reported as
zero and good data are counted.
dtype : `str` or `type`
Data type of output, should be acceptable to
pandas :meth:`astype <pandas:pandas.DataFrame.astype>`
"""
ts_out = ts.fillna(0).astype(dtype)
s = ts.index.to_series()
for c in ts.columns:
#test missing values
miss = ts[c].isna()
#create consecutive groups that increment each time the "is missing state" (na or not na) changes
g = miss.ne(miss.shift()).cumsum()
# identify beginning (min time) of each state
count = s.groupby(g).count()
# g contains a group index for each member of out, and here
# we map g to out which has cumulative time
ts_out[c] = g.map(count)
if state == "gap":
ts_out.loc[~miss,c] = 0
elif state == "good":
ts_out.loc[miss,c] = 0
return ts_out | ec82f3851378f6a541b34f594ecf2e5c8616c599 | 106,634 |
def parse_module(string: str) -> str:
"""Used to type a module name (i.e., dot-separated module path) in behave.
:param string: a string representing a module to be imported
:type string: str
:return: the input string
:rtype: str
"""
return string | 4a0f8968a20e96c5e97d97eb11615d051518ee3b | 106,636 |
import re
def split_verilog_ports(code):
"""
Splits assignments of individual nets to wide cell ports into assignments
of those nets to 1-bit wide cell ports. Effectively splits cell ports as
well.
"""
def sub_func(match):
port = match.group("port")
conn = match.group("conn").strip().replace("\n", "")
# Get individual signals
signals = [s.strip() for s in conn.split(",")]
# Format new port connections
conn = []
for i, signal in enumerate(signals):
j = len(signals) - 1 - i
conn.append(".\\{}[{}] ({} )".format(port, j, signal))
conn = ", ".join(conn)
return conn
code = re.sub(
r"\.(?P<port>\S+)\s*\(\s*{(?P<conn>[^}]+)}\s*\)",
sub_func,
code,
flags=re.DOTALL
)
return code | 79e75fef8215c56039391ba92dbd59ceaf9ab195 | 106,642 |
def time_into_milliseconds(time_string: str) -> int:
"""Utility function to turn time string into milliseconds from H:M:S.f format."""
hours = int(time_string[:2])
mins = int(time_string[3:5])
seconds = float(time_string[6:])
return int(hours * 3600000 + mins * 60000 + seconds * 1000) | 7dc93f762d5ea8d4bf8dadc25b202c6277c4730d | 106,644 |
def uri_to_pk(uri):
"""
Convert a resource URI to the primary key of the resource.
"""
return uri.rstrip('/').split('/')[-1] | 9f29c13959556d4697510f564d837cc7c1fb4b35 | 106,651 |
def has_no_empty_params(rule):
"""Check whether the route rules contain any empty parameters or not
Arguments:
input takes the route which we want to check
Returns:
Bool: True if the route doesn't contain any empty parameters, otherwise false.
"""
defaults = (rule.defaults if rule.defaults is not None else ())
arguments = (rule.arguments if rule.arguments is not None else ())
return len(defaults) >= len(arguments) | 824932022ecf21e0d1951ecf9743cc55f9e42564 | 106,656 |
import multiprocessing
def set_threads(threads: int, set_global: bool = True) -> int:
"""Parse and set the (global) number of threads.
Parameters
----------
threads : int
The number of threads.
If larger than available cores, it is trimmed to the available maximum.
If 0, it is set to the maximum cores available.
If negative, it indicates how many cores NOT to use.
set_global : bool
If False, the number of threads is only parsed to a valid value.
If True, the number of threads is saved as a global variable.
Default is True.
Returns
-------
: int
The number of threads.
"""
max_cpu_count = multiprocessing.cpu_count()
if threads > max_cpu_count:
threads = max_cpu_count
else:
while threads <= 0:
threads += max_cpu_count
if set_global:
global MAX_THREADS
MAX_THREADS = threads
return threads | 3cc4b85bd56629b6d81b825b4bfb8bb058361905 | 106,657 |
def _get_beacon(to_uuid: str) -> dict:
"""
Get a weaviate-style beacon.
Parameters
----------
to_uuid : str
The UUID to create beacon for.
Returns
-------
dict
Weaviate-style beacon as a dict.
"""
return {
"beacon": f"weaviate://localhost/{to_uuid}"
} | c0af50bc3531745ebd4c322902d1ce089d8a975f | 106,659 |
def passthrough(x):
"""Use in testing simple raster calculator calls."""
return x | 9039452576091d98f920564c2e940b1c2ee4a5cc | 106,660 |
def pascal_triangle(n):
"""Computes N rows of Pascal's Triangle"""
pascal = [[1]]
for _ in range(n - 1):
pascal += [[1] + [pascal[-1][i] + pascal[-1][i + 1]
for i in range(len(pascal[-1]) - 1)] + [1]]
return pascal if n > 0 else [] | d2f1f21055d586a365fd609d727a78981e1b4085 | 106,666 |
def Prefix(string, pad, length):
"""
Prefix the supplied string until it is the desired length, with the given
padding character
"""
assert(len(string) <= length)
assert(len(pad) == 1)
result = ""
while len(string) + len(result) < length:
result += pad
result += string
return result | d3d66bab9365909d5ac52e1eb1695861ac75a6e3 | 106,667 |
def ensure_three_decimal_points_for_milliseconds_and_replace_z(
datetimestring: str,
) -> str:
"""
To convert SciHub Datetimes to Python Datetimes, we need them in ISO format
SciHub Datetimes can have milliseconds of less than 3 digits therefore
we pad them with zeros to the right to make 3 digits, as required by `datetime`
We also need to replace Z at the end with +00:00
:param datetimestring: Str representing a SciHub Datetime
:returns: Str representing a correctly padded SciHub Datetime
"""
datetimestring_stripped = datetimestring.replace("Z", "")
try:
number_of_decimal_points = len(datetimestring_stripped.split(".")[1])
if number_of_decimal_points < 3:
datetimestring_stripped = (
f"{datetimestring_stripped}{(3 - number_of_decimal_points) * '0'}"
)
except IndexError:
datetimestring_stripped = f"{datetimestring_stripped}.000"
return f"{datetimestring_stripped}+00:00" | 0b640f19681f7eed0490bb275be1dda2ae772552 | 106,668 |
import re
def phone_number_to_int(phone):
"""
Converts a string phone number to an integer, as needed by track_by_phone()
>>> phone_number_to_int('503-555-1234')
5035551234
>>> phone_number_to_int('503.555.1234')
5035551234
"""
phone = re.sub(r"[\.-]", "", phone)
return int(phone) | 7edb69670f266aed20f64120e7fe2ead4d05fafb | 106,671 |
def show_percentage(value, show_units):
"""Return nicely formatted percentages."""
if value is None:
return "unknown"
retval = round(float(value) * 100.0, 2)
if show_units:
return f"{retval} %"
return retval | fde7684f7b9c87bb3d51f4894abd6d5d5d70cb94 | 106,677 |
def apply_opacity(im, opacity):
""" Apply opacity to an image. """
if opacity == 255:
return im
if im.mode == 'RGB':
im.putalpha(opacity)
return im
elif im.mode in ('RGBA', 'LA'):
alpha_index = len(im.mode) - 1
a = im.split()[alpha_index]
opacity_scale = opacity / 255
a = a.point(lambda i: i * opacity_scale)
im.putalpha(a)
return im
else:
raise NotImplementedError() | 020187f4a38286e0f0bb98f4c75fe7b10cbd0af8 | 106,679 |
import aiohttp
async def download_file(url, file_name) -> bool:
"""
Download a file to a specified path
:url -> url of the file
:file_name -> file path and name with extension of that
"""
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
with open(f'contents/{file_name}', mode='wb') as byte_file:
byte_file.write(await resp.read())
return resp.status == 200 | 0a42b444d3a00dbc0a1e0b47d203c379ad5358a4 | 106,684 |
def aascore(aa1, aa2, match=1, penaltyA=-10000, penaltyX=0, gap_penalty=-1):
"""Returns matching score for the pair of AAs."""
aa1 = aa1.upper()
aa2 = aa2.upper()
if not aa1.isalpha() or not aa2.isalpha():
return gap_penalty
if aa1 == aa2:
return 1
elif aa1=='X' or aa2=='X':
return penaltyX
else:
return penaltyA | f4d51e20548f7c320fa72fe46ea1d3d454c32527 | 106,693 |
def insert(template, data):
"""Insert data into a template."""
for key, val in data.items():
tag = '§' + key + '§'
template = template.replace(tag, val)
return template | 95c64d29eca1159a74b8fa4f32e7298af1df2cff | 106,694 |
def support(itemset):
"""
Returns the support value for the given itemset
itemset is a pandas dataframe with one row per basket, and one column per item
"""
# Get the count of baskets where all the items are 1
baskets = itemset.iloc[:,0].copy()
for col in itemset.columns[1:]:
baskets = baskets & itemset[col]
return baskets.sum() / float(len(baskets)) | f3d41545890c16e069354c42efe985ba701bbe80 | 106,696 |
import re
def get_url(id, hashtag):
"""Return URL to a hashtag or a user."""
if hashtag:
# Pattern for hashtags.
url = re.match(
"^http[s]?://www.instagram.com/explore/tags/[a-zA-Z0-9_]+", id
)
name = re.match("^[a-zA-Z0-9_]+$", id)
else:
# Patterns for usernames.
url = re.match(
r"^http[s]?://www.instagram.com/[a-zA-Z0-9_\.]{2,30}[/]?$", id
)
name = re.match(r"^[a-zA-Z0-9_\.]{2,30}$", id)
# Return full username or hashtag url.
if url:
return url.group()
elif name and hashtag:
return "https://www.instagram.com/explore/tags/" + name.group()
elif name and not hashtag:
return "https://www.instagram.com/" + name.group()
# Shut down program if some unexpected error occurres.
else:
raise SystemExit(
"Some error occurred, couldn't match username or hashtag."
) | 07bee00cf6b92fd9c1319487c1053ce3f8d7acde | 106,698 |
def format_timedelta(timedelta):
"""
Format a timedelta into a human-friendly string.
Parameters
----------
timedelta : timedelta
Returns
-------
str
Formatted timedelta.
Examples
--------
>>> from datetime import timedelta
>>> from tomputils import mattermost as mm
>>> td = timedelta(days=2, hours=4, seconds=5)
>>> print(mm.format_timedelta(td))
2d 4h 5s
>>>
"""
seconds = timedelta.total_seconds()
days, rmainder = divmod(seconds, 60 * 60 * 24)
hours, rmainder = divmod(rmainder, 60 * 60)
minutes, rmainder = divmod(rmainder, 60)
seconds = rmainder
timestring = ''
if days > 0:
timestring += '%dd ' % days
if hours > 0:
timestring += '%dh ' % hours
if minutes > 0:
timestring += '%dm ' % minutes
if seconds > 0:
timestring += '%ds' % seconds
return timestring.strip() | 4b0a873374ddf34c31d35bc70ba559025369f88b | 106,700 |
import functools
import itertools
def allow_empty_iterable(func):
"""
Some functions do not accept empty iterables (e.g. max, min with no default value)
This returns the function `func` such that it returns None if the iterable
is empty instead of raising a ValueError.
"""
@functools.wraps(func)
def wrap(iterable):
iterator = iter(iterable)
try:
value = next(iterator)
return func(itertools.chain([value], iterator))
except StopIteration:
return None
return wrap | 8fca0b3396f6ae65064c21ffca0a60783fba6df8 | 106,715 |
import json
def decode_metric(encoded_metric_name):
"""
Decode the metric name as encoded by encode_metric_name
Params:
encoded_metric_name: a string encoded in a format as returned by encode_metric_name()
example: 'metricName {"metricTagKey1":"metricValue1","metricTagKey2":"metricValue2"}'
Returns:
The metric name and the dictionary of tags
"""
if encoded_metric_name is None or encoded_metric_name == "":
raise ValueError("Invalid value for encoded_metric_name")
metric_tags = {}
metric_name = encoded_metric_name.strip()
brace_index = encoded_metric_name.find('{')
if brace_index > -1:
try:
metric_tags = json.loads(encoded_metric_name[brace_index:])
metric_name = encoded_metric_name[:brace_index].strip()
except Exception as err:
raise ValueError("Failed to parse the encoded_metric_name %s, invalid format"
% encoded_metric_name, err)
return metric_name, metric_tags | f3c7ed9233186ee9fbaac056d831d12de491e61b | 106,718 |
def find_indices(predicate, List):
"""
Returns an array of all the indices of the
elements which pass the predicate. Returns an
empty list if the predicate never passes.
find-indices even, [1 2 3 4] #=> [1, 3]
>>> find_indices(lambda x: x > 2, [1, 2, 30, 404, 0, -1, 90])
[2, 3, 6]
"""
result = []
for i, x in enumerate(List):
if predicate(x):
result.append(i)
return result | fa7846f6a69def1a91ea2d3834346193db5add38 | 106,725 |
from typing import Iterable
import toml
def construct_path(path: Iterable[str]) -> str:
"""
Construct a dotted path to a key.
:param path: The path elements.
"""
return '.'.join([toml.dumps({elem: 0})[:-5] for elem in path]) | 88a081a3ada3cead526c822d6c9b3efff7d38e7e | 106,726 |
def source2ids(source_words, vocab):
"""Map the source words to their ids and return a list of OOVs in the source.
Args:
source_words: list of words (strings)
vocab: Vocabulary object
Returns:
ids:
A list of word ids (integers); OOVs are represented by their temporary
source OOV number. If the vocabulary size is 50k and the source has 3
OOVs tokens, then these temporary OOV numbers will be 50000, 50001,
50002.
oovs:
A list of the OOV words in the source (strings), in the order
corresponding to their temporary source OOV numbers.
"""
ids = []
oovs = []
unk_id = vocab.UNK
for w in source_words:
i = vocab[w]
if i == unk_id: # If w is OOV
if w not in oovs: # Add to list of OOVs
oovs.append(w)
# This is 0 for the first source OOV, 1 for the second source OOV
oov_num = oovs.index(w)
# This is e.g. 20000 for the first source OOV, 50001 for the second
ids.append(vocab.size() + oov_num)
else:
ids.append(i)
return ids, oovs | ec1db02c29e731a9f04ab64c3785a878360fc8d1 | 106,727 |
def arith_mean(samples):
"""Computes the arithmetic mean of a set of samples.
"""
accum = 0.0
num_samples = float(len(samples))
for sample in samples:
accum += float(sample) / num_samples
return accum | 6df436b5b611335b594696a4355e5edefbd0f997 | 106,729 |
def countGenders(genders):
"""Count genders in list
Parameters
----------
genders : list
Gender list to count from
Returns
-------
female : int
Female count
male : int
Male count
nb : int
Non-binary count
"""
female = genders.count('female')
male = genders.count('male')
nb = genders.count('non-binary')
return female, male, nb | 8e653e897da156c22200b357e9711a8a50ef784b | 106,730 |
def point_respect_line(point, line):
"""
:param point: point of (x, y)
:param line: line of two points (point1, point2),
:return: an integer that >0, ==0, <0, if == 0 means point lies on the line
"""
# Method 1: cross product
# (pnt1, pnt2) = line
# v1 = [pnt2[0] - pnt1[0], pnt2[1] - pnt1[1]]
# v2 = [point[0] - pnt1[0], point[1] - pnt1[1]]
# r = np.cross(v1, v2)
# method 2: algebra mathematical
(pnt1, pnt2) = line
return (pnt1[1] - pnt2[1]) * point[0] + (pnt2[0] - pnt1[0]) * point[1] + pnt1[0] * pnt2[1] - pnt2[0] * pnt1[1] | c7d5068067432132567cd19b4446d6c77cab627d | 106,732 |
def sitk_copy_metadata(img_source, img_target):
"""
Copy metadata (=DICOM Tags) from one image to another
Parameters
----------
img_source : SimpleITK.Image
Source image
img_target : SimpleITK.Image
Source image
Returns
-------
SimpleITK.Image
Target image with copied metadata
"""
for k in img_source.GetMetaDataKeys():
img_target.SetMetaData(k, img_source.GetMetaData(k))
return img_target | fcbbd20bdb5ee9f6c50a2b085e4819894048d1f6 | 106,734 |
def manhattan_distance(position, other):
"""Return Manhattan distance between points.
AKA taxicab distance - sum of absolute differences. Diagonal movement has twice the cost.
"""
return abs(position.x-other.x) + abs(position.y-other.y) | a6d68627d0cd23c86cbd92e12473e59696023af6 | 106,737 |
def test_bit_at_pos(A: int, pos: int) -> bool:
"""0-indexed, little endian
Args:
A:
pos:
Returns:
Examples:
>>> test_bit_at_pos(int("1001", 2), pos=0)
True
>>> test_bit_at_pos(int("1001", 2), pos=1)
False
>>> test_bit_at_pos(int("1001", 2), pos=2)
False
>>> test_bit_at_pos(int("1001", 2), pos=3)
True
>>> test_bit_at_pos(int("1001", 2), pos=4)
False
"""
return (A & 1 << pos) != 0 | 7f535fe743b205404e1ecc14e24b806ad3dd285a | 106,740 |
def int_to_binary(integer: int, num_bits: int) -> str:
"""Convert an integer to a certain length bit string
Args:
integer (int): Integer
num_bits (int): Amount of bits in bit string
Returns:
str: Integer as bit string
"""
return f"{integer:0{num_bits}b}" | b935da7ab2f8727ba1f92d808942610c6bf0a61c | 106,741 |
def temp(card, minutes=None):
"""Retrieve the current temperature from the Notecard.
Args:
card (Notecard): The current Notecard object.
minutes (int): If specified, creates a templated _temp.qo file that
gathers Notecard temperature value at the specified interval.
Returns:
string: The result of the Notecard request.
"""
req = {"req": "card.temp"}
if minutes:
req["minutes"] = minutes
return card.Transaction(req) | 6468d649717443a6d793b5ff81bdb137f1fa12c5 | 106,744 |
def _mock_gethostbyname_ex(aliaslist=None, ipaddrlist=None):
"""Returns a mocked socket.gethostbyname_ex function for testing use
"""
if aliaslist is None:
aliaslist = []
if ipaddrlist is None:
ipaddrlist = ['127.0.0.1']
return lambda hostname: (hostname, aliaslist, ipaddrlist) | 72448ad93e53a89f23ff668d91342d244e68d069 | 106,748 |
def validate_window_size(candidate_window_size: int) -> int:
"""Return validated window size candidate, raising a meaningful error otherwise.
Parameters
-----------------
candidate_window size: int,
The candidate window size value to validate.
Raises
-----------------
ValueError,
If the provided window size parameter is not within
the allowed set of values.
Returns
-----------------
The validated value.
"""
if not isinstance(candidate_window_size, int):
raise ValueError(
(
"The window size parameter must be an integer.\n"
"You have provided `{}`, which is of type `{}`."
).format(
candidate_window_size,
type(candidate_window_size)
)
)
if candidate_window_size < 1:
raise ValueError(
(
"The window size parameter must a strictly positive "
"integer value. You have provided `{}`."
).format(
candidate_window_size
)
)
return candidate_window_size | 6459fea1322ea791485356d4e25899aa2493fe28 | 106,758 |
def get_namespace(label):
"""
returns the namespace of an etree element or None, if the element
doesn't have that attribute.
"""
if 'namespace' in label.attrib:
return label.attrib['namespace']
else:
return None | 5c61e604083367bb6970a0dc9e934e77bb8678d9 | 106,759 |
from typing import Sequence
from typing import Any
from typing import Optional
def rindex(items: Sequence[Any], needle: Any) -> Optional[int]:
"""
Args:
items (sequence): The items to search
needle (object): The object to search for
Returns:
The rightmost index of `needle`, or `None`.
"""
for idx, item in enumerate(reversed(items)):
if item == needle:
return len(items) - idx - 1
return None | 51361177e933a7e18c7cb3f9dac9958552a6b256 | 106,761 |
from typing import List
def RotateMatrix(image: List[List[int]]) -> List[List[int]]:
"""Rotates the specified image by 90 degrees.
>>> RotateMatrix([[0]])
[[0]]
>>> RotateMatrix([[0, 1], [2, 3]])
[[1, 3], [0, 2]]
>>> RotateMatrix([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
[[2, 5, 8], [1, 4, 7], [0, 3, 6]]
>>> RotateMatrix([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
[[3, 7, 11, 15], [2, 6, 10, 14], [1, 5, 9, 13], [0, 4, 8, 12]]
>>> RotateMatrix([[0, 1, 2, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], [16, 17, 18, 19, 20], [21, 22, 23, 24, 25]])
[[5, 10, 15, 20, 25], [4, 9, 14, 19, 24], [2, 8, 13, 18, 23], [1, 7, 12, 17, 22], [0, 6, 11, 16, 21]]
"""
n = len(image)
for i in range(n // 2):
for j in range(i, n - i - 1):
image_ij = image[i][j]
image[i][j] = image[j][n - i - 1]
image[j][n - i - 1] = image[n - i - 1][n - j - 1]
image[n - i - 1][n - j - 1] = image[n - j - 1][i]
image[n - j - 1][i] = image_ij
return image | 8becfd577f28a9fd8138583011f93f6393bfb980 | 106,765 |
def find_sps_between(net, source_np, target_np):
"""Return list of all direct sps from source_np to target_np.
"""
sp_list = []
for s,S in net["synapse_pools"].items():
if target_np == S["target"]:
for i in range(len(S["source"])):
for j in range(len(S["source"][i])):
if source_np == S["source"][i][j] and s not in sp_list:
sp_list.append(s)
return sp_list | 4071211094248d9fc3ad41765d1e5caff15fae72 | 106,770 |
def try_get_value_case_insensitive(d, key_name):
"""
Look in a dictionary for a key with the given key_name, without concern
for case, and return the value of the first key found, or None.
"""
for name, value in d.items():
if name.lower() == key_name.lower():
return value
return None | b5f8281c82ba15d7f7f19443000bec9ce1cd381c | 106,772 |
import json
def analyse_line_dataturk_format(line):
"""
scans a json file line and returns the file name and label
the format is specifically for the one returned by the dataturks labelling
platform
Args:
line: string read from json file
Returns:
file_name: sting with name of the file in the json
label: string with the identified value of label identified
"""
line_dict = json.loads(line)
file_name = line_dict["content"]
file_name = "___".join(file_name.split("___")[1:])
label = None
if "annotation" in line_dict.keys():
if line_dict["annotation"] is not None:
if "labels" in line_dict["annotation"].keys():
labels = line_dict["annotation"]["labels"]
if len(labels) > 1:
print("more than one label contained in file {}".format(file_name))
#raise ValueError("more than one label contained in file {}".format(file_name))
elif len(labels) == 0:
print("No label contained in file {}".format(file_name))
#raise ValueError("No label contained in file {}".format(file_name))
else:
label = labels[0]
elif "label" in line_dict["annotation"].keys():
labels = line_dict["annotation"]["label"]
if len(labels) > 1:
print("more than one label contained in file {}".format(file_name))
#raise ValueError("more than one label contained in file {}".format(file_name))
elif len(labels) == 0:
print("No label contained in file {}".format(file_name))
#raise ValueError("No label contained in file {}".format(file_name))
label = labels[0]
return(file_name, label) | 1154bfa98608f2fa8f1e30a7dad781e8e489453c | 106,774 |
from typing import List
from typing import Tuple
def read_pair_file(filename: str) -> List[Tuple[int, List[int]]]:
"""Read image pairs from text file and output a list of tuples each containing the reference image ID and a list of
source image IDs
Args:
filename: pair text file path string
Returns:
List of tuples with reference ID and list of source IDs
"""
data = []
with open(filename) as f:
num_viewpoint = int(f.readline())
for _ in range(num_viewpoint):
ref_view = int(f.readline().rstrip())
src_views = [int(x) for x in f.readline().rstrip().split()[1::2]]
if len(src_views) != 0:
data.append((ref_view, src_views))
return data | 9696d975a56bdbfbe6762c4fc7b4b7a609c6f93c | 106,779 |
def records_needed(pulse_length, samples_per_record):
"""Return records needed to store pulse_length samples"""
return 1 + (pulse_length - 1) // samples_per_record | 77cdb6f29211fafa5ea5aab399ec858863b52661 | 106,781 |
def MathChallenge_cases(rpn_expression: str) -> int:
"""Returns the value of an arithmetic expression written in Reverse Polish notation
Args:
rpn_expression:
The input string containing the Reverse Polish notation expression which to
evaluate. rpn_expression is composed of only integers and the operators:
+,-,* and /.
Examples:
>>> MathChallenge_cases("1 1 + 1 + 1 +")
4
>>> # ((1 + 1) + 1) + 1
>>> # = (2 + 1) + 1
>>> # = 3 + 1
>>> # = 4
>>> MathChallenge_cases("4 5 + 2 1 + *")
27
>>> # (4 + 5) * (2 + 1)
>>> # = 9 * (2 + 1)
>>> # = 9 * 3
>>> # = 27
>>> MathChallenge_cases("2 12 + 7 / ")
2
>>> # (2 + 12) / 7
>>> # = 14 / 7
>>> # = 2
>>> MathChallenge_cases("1 1 - 1 + 1 +")
2
>>> # ((1 - 1) + 1) + 1
>>> # = (0 + 1) + 1
>>> # = 1 + 1
>>> # = 2
>>> MathChallenge_cases("1 1 - +")
Traceback (most recent call last):
...
ValueError: Malformed RPN arithmetic expression: 1 1 - +; Stack had < 2 elements (≥2 expected)
"""
arithmetic_operators = {"+", "-", "*", "/"}
curr_num = None
operand_stack = []
for char in rpn_expression:
# Build multi-digit string
if char.isdigit():
if curr_num is None:
curr_num = 0
curr_num = curr_num * 10 + int(char)
elif (
char == " " and curr_num is not None
): # delimiter between operands & operators
processed_token, curr_num = curr_num, None
operand_stack.append(processed_token)
elif char in arithmetic_operators:
try:
right_operand, left_operand = operand_stack.pop(), operand_stack.pop()
except IndexError as e:
raise ValueError(
f"Malformed RPN arithmetic expression: {rpn_expression}; "
"Stack had < 2 elements (≥2 expected)"
) from e
if char == "+":
processed_token = left_operand + right_operand
elif char == "-":
processed_token = left_operand - right_operand
elif char == "*":
processed_token = left_operand * right_operand
else: # char == "/"
processed_token = int(
left_operand / right_operand
) # truncate fractions
operand_stack.append(processed_token)
return operand_stack.pop() if operand_stack else 0 | ef8030dfbfade44e562f0971b1bc4356b7d1f47a | 106,783 |
def pltime(self, tmin="", tmax="", **kwargs):
"""Defines the time range for which data are to be displayed.
APDL Command: PLTIME
Parameters
----------
tmin
Minimum time (defaults to the first point stored).
tmax
Maximum time (defaults to the last point stored).
Notes
-----
Defines the time (or frequency) range (within the range stored) for
which data are to be displayed. Time is always displayed in the Z-axis
direction for 3-D graph displays. If XVAR = 1, time is also displayed
in the X-axis direction and this control also sets the abscissa scale
range.
"""
command = f"PLTIME,{tmin},{tmax}"
return self.run(command, **kwargs) | c2d65b2cb221f83972c9fbe9ba615266a308550f | 106,789 |
def deltas(xs):
"""Computes the differences between the elements of a sequence of integers.
>>> deltas([-1, 0, 1])
[1, 1]
>>> deltas([1, 1, 2, 3, 5, 8, 13])
[0, 1, 1, 2, 3, 5]
@param xs: A sequence of integers.
@type xs: C{list}
@return: A list of differences between consecutive elements of L{xs}.
@rtype: C{list}
"""
if len(xs) < 2:
return []
else:
return [xs[1] - xs[0]] + deltas(xs[1:]) | eb55f9f6d2f68beffd86f5feb066d39b6e510b3a | 106,790 |
def build_readable_attribute_key(key: str, attribute_name: str):
"""
Formatting nested attribute name to be more readable, and convenient to display in fetch incident command.
For the input of "srcIpAddr", "incidentSrc" the formatted key will be: "source_ipAddr".
Args:
key: (str): The that was extracted from the original incident attribute vale.
attribute_name (str): The original incident attribute.
Returns:
str: Formatted key.
"""
key_prefix = '' # for better readable layout in the incident source&target.
key_suffix = ''
if 'Src' in attribute_name:
key_prefix = 'source_'
elif 'Target' in attribute_name:
key_prefix = 'target_'
if key_prefix:
if 'ipaddr' in key.lower():
key_suffix = 'ipAddr'
elif 'hostname' in key.lower():
key_suffix = 'hostName'
else:
key_suffix = key
else:
key_suffix = key # prefix is empty
return key_prefix + key_suffix | d35eb822ba275745ade610d5592456c7ae180f4a | 106,794 |
def _get_offer_id(page):
"""Gets the relevant Character Id for the BeautifulSoup page"""
return int(page.find(class_="text-character")['data-character-id']) | b19b03157e8d3c0867812283102059cb5257b715 | 106,796 |
import torch
def sample_images_at_mc_locs(
target_images: torch.Tensor,
sampled_rays_xy: torch.Tensor,
):
"""
Given a set of pixel locations `sampled_rays_xy` this method samples the tensor
`target_images` at the respective 2D locations.
This function is used in order to extract the colors from ground truth images
that correspond to the colors rendered using a Monte Carlo rendering.
Args:
target_images: A tensor of shape `(batch_size, ..., 3)`.
sampled_rays_xy: A tensor of shape `(batch_size, S_1, ..., S_N, 2)`.
Returns:
images_sampled: A tensor of shape `(batch_size, S_1, ..., S_N, 3)`
containing `target_images` sampled at `sampled_rays_xy`.
"""
ba = target_images.shape[0]
dim = target_images.shape[-1]
spatial_size = sampled_rays_xy.shape[1:-1]
# The coordinate grid convention for grid_sample has both x and y
# directions inverted.
xy_sample = -sampled_rays_xy.view(ba, -1, 1, 2).clone()
images_sampled = torch.nn.functional.grid_sample(
target_images.permute(0, 3, 1, 2),
xy_sample,
align_corners=True,
mode="bilinear",
)
return images_sampled.permute(0, 2, 3, 1).view(ba, *spatial_size, dim) | ebcd600f7d51a556c8b6cabdb24ee6512d0b1bf4 | 106,798 |
def validate_marriage_before_death(gedcom):
"""
Marriage should occur before death of either spouse
"""
result = []
for family in gedcom.families:
marriage_date = family.married
if marriage_date:
husband = gedcom.individual_with_id(family.husband_id)
wife = gedcom.individual_with_id(family.wife_id)
if husband.death is not None:
if husband.death < marriage_date:
result.append(f'Error: US05: Family ({family.id}) has the husband {husband.name} ({family.husband_id})'
f' death date ({husband.death.strftime("%x")}) before the marriage date ({marriage_date.strftime("%x")})')
if wife.death is not None:
if wife.death < marriage_date:
result.append(f'Error: US05: Family ({family.id}) has the wife {wife.name} ({family.wife_id})'
f' death date ({wife.death.strftime("%x")}) before the marriage date ({marriage_date.strftime("%x")})')
return result | 81b856769791552cb992c4e70c96eda79c1e3656 | 106,803 |
def get_game_obj(game, **kwargs):
"""
Return a JSON-compatible read-only representation of the game state.
Additional properties can be added using keyword arguments.
"""
# NOTE: Set isn't JSON serializable, so we'll convert it to a list.
return {
'tries': game.get_tries(),
'guesses': list(game.get_guesses()),
'phrase': game.get_display_phrase(),
'game_over': game.is_game_over(),
'game_won': game.is_game_won(),
**kwargs
} | 3f1342870546c71d39e149ae468d0e041ff4ba2f | 106,806 |
def AIC_chisq(t_dof, t_chisq):
"""
t_dof: int
Number of degrees of freedom i.e. number of parameters of the model
t_chisq: float
\u03C7\u00B2 of the model.
Retruns: float
AIC
Assumptions:
* normally distributed errors
This function computes
$$
AIC = \chi^2 + 2k
$$
Following Sz. Borsanyi et. al.
doi.org/10.1126/science.1257050
"""
return t_chisq + 2*t_dof | e16a723a1002989af92faa80f95f9f02462b2480 | 106,810 |
def type_str(ty: type) -> str:
"""
Convert type ty to string.
:param ty: str, typing.List[int] , typing.List[typing.List[bool]], etc.
:return: string form of type, "str", "List[int]" , "List[List[bool]]", etc.
"""
type_str = str(ty).replace("typing.", "")
return type_str[8:-2] if type_str.startswith("<class '") else type_str | aa32b245ed101f74ffbd0322530686026e86ba6f | 106,813 |
import pickle
def load_pkl(load_from):
"""
Load the pickled object.
Inputs:
save_to: (str) Filepath to pickle the object to.
Returns:
(object) Loaded object.
"""
with open(load_from, 'rb') as fid:
obj = pickle.load(fid)
return obj | 545dff148dfc9d4405082f93dec8986048f41cef | 106,817 |
import glob
def globFiles(dirName, ext='png'):
"""Return files matching the given extension in the given directory."""
return glob.glob(dirName +'/*.%s'%ext) | 71df5d705c16bb20433ff3052eee19b87e80f4a7 | 106,822 |
def get_model_lr(model):
"""Get the current learning rate of a Keras model (must have been compiled)"""
opt = model.optimizer
lr = opt.lr.get_value()
decay = opt.decay.get_value()
iterations = opt.iterations.get_value()
return lr * (1.0 / (1.0 + decay * iterations)) | aa248b3fc534db6c5b83b64a79948f4fac2aab52 | 106,823 |
import re
def string_to_class_name(string):
"""
Single function to handle turning object names into class names.
GRAPH_REFERENCE has names like `error_y`, which we'll turn into `ErrorY`.
:param (str) string: A string that we'll turn into a class name string.
:return: (str)
"""
# capitalize first letter
string = re.sub(r'[A-Za-z]', lambda m: m.group().title(), string, count=1)
# replace `*_<c>` with `*<C>` E.g., `Error_x` --> `ErrorX`
string = re.sub(r'_[A-Za-z0-9]+', lambda m: m.group()[1:].title(), string)
return str(string) | be99b8f95a86ff8ddb92d84f855ef3ae65b74013 | 106,824 |
def group_in_sublists(list_of_objs, subslist_size):
"""group_in_sublists([1, 2, 3, 4], 2) --> [[1, 2], [3, 4]]"""
return [list_of_objs[i:i+subslist_size] for i in range(0, len(list_of_objs), subslist_size)] | beb527b5655449a376a098a4be2d0759d8041ff1 | 106,832 |
def _to_Hex(color):
"""
Converts (r,g,b) tuple into #RRGGBB hex code
"""
return "".join(f"{component:02x}" for component in color) | 686e9cb40e8981b00808cf5d31fb7b350e932164 | 106,833 |
def validate_command_line_parameter_keyword(keyword):
"""
Validates ``CommandLineParameter``'s `keyword` parameter.
Parameters
----------
keyword : `None` or `str`
Keyword parameter to validate.
Returns
-------
keyword : `None` or `str`
The validated keyword parameter.
Raises
------
TypeError
If `keyword` is neither `None` nor `str` instance.
"""
if keyword is None:
pass
elif type(keyword) is str:
pass
elif isinstance(keyword, str):
keyword = str(keyword)
else:
raise TypeError(f'`keyword` can be given as `None` or `str` instance, got {keyword.__class__.__name__}.')
return keyword | b9a2263cfd1c6fac7053b82fefe2d3846079812e | 106,835 |
def str_maker(arr):
"""Make a string from list of integers."""
res = []
for num in arr:
res.append(str(num))
output = ''.join(res)
return output | 93bdafb083d9b1648ac028edab3381b24c152bca | 106,839 |
def slug_to_cp(cp_slug):
"""
Convert a slug to a codepoint.
:param cp_slug: Slug from URL.
:return: Codepoint iterable to be used.
"""
return tuple(int(c) for c in cp_slug.split('-')) | e3195dc2cf28c387ee534bd8cdb34840559ae6e8 | 106,840 |
def transform(vector, rot, tran):
"""Performs transformation of vector position given rotation and
transformation matrices."""
return vector @ rot + tran | 6e5f11d71d75a5fa45f195915042a671283cc554 | 106,842 |
def is_git_sha(xs: str) -> bool:
"""Returns whether the given string looks like a valid git commit SHA."""
return len(xs) > 6 and len(xs) <= 40 and all(
x.isdigit() or 'a' <= x.lower() <= 'f' for x in xs) | 3da4f923d32ef94e234738e38a8f98e31756d2da | 106,845 |
import logging
def get_formatter(minimal=False):
"""Returns a log formatter for one on two predefined formats."""
if minimal:
fmt = "%(levelname)s - %(message)s"
else:
fmt = "[%(asctime)s]::%(pathname)s:%(lineno)d::%(levelname)s - %(message)s"
formatter = logging.Formatter(fmt)
return formatter | 83428a3c4f4fe309f63c3111419dcb0aaba99bfc | 106,852 |
def most_frequent(List):
"""Finds the most frequent element in a list"""
return max(set(List), key = List.count) | 4ee6a75ceb5bec00948dcaa0cb34b0cc0fb1af72 | 106,853 |
def parameter_projection(parameters):
"""Projection of the parameters from level l to level l+1.
Citing the paper: `The projection of the motion parameters from one level onto the next one consists merely of multiplying a0 and a1 by 2, and dividing a6 and a7 by two.`
Args:
parameters (list): the list of the current parameters for motion model at level l.
Returns:
parameters (list): the list of the updated parameters for motion model at level l+1.
"""
# scale transition parameters
# a0
parameters[0] = parameters[0] * 2
# b0
parameters[3] = parameters[3] * 2
return parameters | f935b92b908f97e59a07d649fd778391d8aba1ce | 106,854 |
def load_reports_file(path):
"""
Loads radiology reports from a file. Radiology reports must be separated by
the following string: '----------------------------------------------'
Inputs:
path str path to the file containing the reports
Output:
reportlist list a list with a report for each element
"""
reportlist = [""]
x = 0
with open(path, "r") as reportsfile:
for line in reportsfile:
if "----------------------------------------------" in line:
x = x+1
reportlist.append("")
else:
reportlist[x] = reportlist[x] + line
return reportlist | 4a941c75c725f73134007112a74920954465494d | 106,855 |
def get_neighbours(n, edge_list) :
"""
get_neighbours(n, edge_list)
Get the neighbours of a vertex n
Inputs
------
n : int
Index if the vertex
edge_list : array
Array of the edges of the graph
Returns
-------
neigh : list
List of the neighbours of n
"""
neigh = []
for i in edge_list :
if n in i :
if i[0] == n :
neigh += [i[1]]
else :
neigh += [i[0]]
return neigh | df4658aee939876712136a77978e37d7d68890c9 | 106,857 |
def get_appliance_extra_info(
self,
ne_pk: str,
) -> dict:
"""Get appliance information of location, contact, and general
overlay settings
.. note::
This API Call is not in current Swagger as of Orch 9.0.3
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - n/a
- GET
- /appliance/extraInfo/{nePk}
:param ne_pk: Network Primary Key (nePk) of existing appliance,
e.g. ``3.NE``
:type ne_pk: str
:return: Returns dictionary of appliance information \n
* keyword **location** (`dict`): Location info object \n
* keyword **address** (`str`): Primary address line
* keyword **address2** (`str`): Secondary address line
* keyword **city** (`str`): City
* keyword **state** (`str`): State
* keyword **zipCode** (`str`): Zip Code
* keyword **country** (`str`): Country
* keyword **contact** (`dict`): Contact info object \n
* keyword **name** (`str`): Contact name
* keyword **email** (`str`): Contact email
* keyword **phoneNumber** (`str`): Contact phone number
* keyword **overlaySettings** (`dict`): Overlay config object \n
* keyword **ipsecUdpPort** (`str`): UDP port to use,
e.g. ``12000``
* keyword **isUserDefinedIPSecUDPPort** (`bool`): Has the
user changed the IPSEC UDP port to use
:rtype: dict
:raises ValueError: Checks format of provided NePK value containing
".NE"
"""
valid = ".NE"
if valid not in ne_pk:
raise ValueError(
"nePk must be in format '0.NE', but %r was provided" % ne_pk
)
return self._get("/appliance/extraInfo/{}".format(ne_pk)) | 0812ef717b0dc11003d808f191f173160b7351e6 | 106,861 |
def exists(c, runner, path):
"""
Return True if given path exists on the current remote host.
:param c:
`~invoke.context.Context` within to execute commands.
:param str path:
Path to check for existence.
"""
cmd = 'test -e "$(echo {})"'.format(path)
return runner(cmd, hide=True, warn=True).ok | 0b78fbbb582d7b02354ef58125035b311c316177 | 106,866 |
def parse_layer_yml(arch_gcn,dim_input):
"""
Parse the *.yml config file to retrieve the GNN structure.
"""
num_layers = len(arch_gcn['arch'].split('-'))
# set default values, then update by arch_gcn
bias_layer = [arch_gcn['bias']]*num_layers
act_layer = [arch_gcn['act']]*num_layers
aggr_layer = [arch_gcn['aggr']]*num_layers
dims_layer = [arch_gcn['dim']]*num_layers
order_layer = [int(o) for o in arch_gcn['arch'].split('-')]
return [dim_input]+dims_layer,order_layer,act_layer,bias_layer,aggr_layer | 333f105d511a78e16a8a023270206da7eb68dbde | 106,868 |
def lowercase_dictionary(dirty_dictionary):
"""
Standardize the keys in a header to lower case to reduce the number of lookup
cases that need to be supported. Assumes that there are no duplicates, if
there are, the last one is saved.
"""
return dict((key.lower(), value) for key,value in dirty_dictionary.items()) | ba21baddd9349c69e5f8417c08a15f899e52d8b4 | 106,869 |
def make_review_page_url(productId, page=1, reviews_per_page=5):
"""Make review pagination URL from page and productID"""
return ("https://www.decathlon.it/it/ProductAvis_loadPaginationPage?"
"product_id={0}&viewSize={1}&viewIndex={2}&"
"currentEnvironment=PROD&componentId=ComponentProductAvis&"
"sortReview=1&ratingFilter=0&pageIndex={3}&collaborator=0".format(
productId,
reviews_per_page,
(page - 1) * reviews_per_page + 1,
page)
) | 441126efc361eeb1ef29b7db0b67cd2d592e89e6 | 106,870 |
def str2bool(string):
"""
Only true if string is one of "yes", "true", "t", "1". Returns false
otherwise.
"""
return string.lower() in ("yes", "true", "t", "1") | 70cd762119db8f683d2e7b0deb14f1b4c29511fc | 106,873 |
def number_of_components(params):
"""Compute number of Gaussian components."""
return int(len(params) / 3) | f9330e2cfa9f3bbefceb19f6f48490747939580d | 106,875 |
import torch
def tile(input, multiples):
"""
Constructs a tensor by tiling a given tensor.
Parameters
----------
input : tensor
A Tensor. 1-D or higher.
multiples : tensor
Must be one of the following types: int32, int64. 1-D.
Length must be the same as the number of dimensions in input
Returns
-------
A Tensor. Has the same type as input.
"""
return torch.tile(input, multiples) | 524a2aa8963fa9b89775cefa733592f81a3b911c | 106,877 |
import copy
def overwrite_notnull(v1, v2, **kwargs): # pylint: disable=invalid-name,unused-argument
"""
Completely overwrites one value with another, if not None.
"""
if v2 is None:
return copy.deepcopy(v1)
return copy.deepcopy(v2) | 94a5e5c65499eab32b681d4f45fbc6f211739c34 | 106,880 |
def ISO_6391_to_6392(code: str) -> str:
"""
Converts ISO 639-1 (2 letters) language codes to ISO 639-2 (3 letters)
"""
if code == "ca":
return "cat"
if code == "da":
return "dan"
elif code == "en":
return "eng"
elif code == "es":
return "spa"
elif code == "it":
return "ita"
elif code == "mn":
return "mon"
elif code == "zh":
return "cmn"
else:
raise ValueError("ISO 639-1 code not known: "+str(code)) | 814633c63519030d490e2d4f714d66e60857b0db | 106,881 |
from pathlib import Path
def tmpdir_for_subworkflow(tmpdir_factory, wdl_main, wdl_sub, wdl_subsub, wdl_sub2):
"""Temporary directory with main WDL and all imported sub WDLs
in a correct directory structure.
There are 3 files on this directory.
- main WDL: main.wdl
- sub WDL: sub/sub.wdl
- sub2 WDL: sub2/sub2.wdl
Returns a PathLib object of the root directory.
"""
root = tmpdir_factory.mktemp('main')
root.mkdir('sub')
root.mkdir('sub2')
Path(root / 'main.wdl').write_text(wdl_main)
Path(root / 'sub' / 'sub.wdl').write_text(wdl_sub)
Path(root / 'sub' / 'subsub.wdl').write_text(wdl_subsub)
Path(root / 'sub2' / 'sub2.wdl').write_text(wdl_sub2)
return root | d253a97f3c2892dceb0feefa3e3720328a6e5f6c | 106,882 |
def _sheet_list(spreadsheet):
"""sheets 파일 리스트
Args:
spreadsheet (obj): spreadsheet
Returns:
[dict]: {<title>: <id>}
"""
return {
worksheet.title: worksheet.id
for worksheet in spreadsheet.worksheets()
} | 9a2a169ade0cbf714f53eea83a0534508275a4be | 106,890 |
def load_file_lines(path: str) -> list:
"""Loads file into memory."""
with open(path, "r", encoding="utf8") as fh:
lines = fh.readlines()
return lines | 10103273551f10d09411f29bc7b6c6e849ba3697 | 106,894 |
def format_error_message(code=None, reason=None, recommendation=None):
"""Format an error message for logging."""
message = ''
if code is not None:
message += 'code=%r' % code
if reason is not None:
message += ', '
elif recommendation is not None:
message += '\n'
if reason is not None:
message += reason
if recommendation is not None:
message += '\n'
if recommendation is not None:
message += 'Recommendation:\n\t%s\n' % recommendation
return message | 447e416398eeadfbbb020a1f44a17384e6d69e8d | 106,895 |
def callback(_):
"""Stub callback."""
return {"Water": True} | 302f321588e6fb2273887219ae3c750cb7a52e83 | 106,896 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.