content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def is_skipped_node(node_entry):
"""Whether a node is not counted.
Parameters
----------
node_entry : dict
Node entry.
Returns
-------
out : bool
whether node is skipped.
"""
# Operators not counted in graph tuner.
_SKIPPED_OP = ["Tuple"]
return node_entry["op"] in _SKIPPED_OP
|
22c4e49a2bc65711d105c3adfd8bd8c22205ed19
| 165,426
|
def sanitize_kvstore_list(kvstore_list):
"""
Creates a new dictionay only with allowed keys.
"""
new_kvstore_list = []
allowed_keys = [
'id',
'key',
'value',
'kv_type',
'kv_format',
'kv_inherited',
]
for item in kvstore_list:
sanitized_kvstore_item = {
allowed_key: item.get(allowed_key, None)
for allowed_key in allowed_keys
if allowed_key in item.keys()
}
new_kvstore_list.append(sanitized_kvstore_item)
return new_kvstore_list
|
6466560e417ab483dcfbac28fc495dab92f0a3cd
| 146,918
|
def slow_roll_diffusion(potential, potential_dif, planck_mass=1):
"""Returns the slow-roll diffusion as a function.
Parameters
----------
potential : function
The potential of the slow-roll inflation simulated.
potential_dif : function
The first derivative of the potential of the slow-roll inflation
simulated.
planck_mass : scalar, optional
The Planck mass used in the calculations. The standard procedure is to
set it to 1. The default is 1.
Returns
-------
drift_func : function
A function dependent on ``(phi, N)`` which returns the slow-roll
diffusion.
"""
def diffusion_func(phi, N):
pi = 3.141592653589793
hubble = (potential(phi)/3)**0.5
return hubble/(2*pi)
return diffusion_func
|
8f203c5d788da733cd2371819843e742d0aded91
| 219,657
|
def disproportionation(oh, ai, aii, aiii):
"""disproportionation rate law from Fratzke, 1986"""
return (ai * oh + aii * oh * oh) / (1 + aiii * oh)
|
7d3e1bda1700e4b154d88d70f7c069f9a0a7cd11
| 189,748
|
def create_response(bot_reply, end_of_session=False):
"""Base reply for Alexa"""
response = {
"version": "1.0",
"response": {
"outputSpeech": {
"type": "PlainText",
"text": bot_reply,
},
"reprompt": {
"outputSpeech": {
"type": "PlainText",
"text": "Plain text string to speak",
"playBehavior": "REPLACE_ENQUEUED"
}
},
"shouldEndSession": end_of_session
}
}
return response
|
8401929422884db01868f3fd8d69342778d0e9c0
| 269,492
|
def count_lines(path) -> int:
"""Return the line count of a given path."""
with open(path) as file:
return sum(1 for _ in file)
|
354bb98147d29dbab7d2c2b28cf40968feb8fb53
| 91,404
|
def _sort_esystem(evals, evec_collection):
"""Utility function that sorts a collection of eigenvetors in desceding
order, and then sorts the eigenvectors accordingly.
Parameters
----------
evals : numpy array
The unsorted eigenvalues.
evec_collection : list of arrays
List where each element in the list is a collection of eigenvectors.
Each collection is sorted according to the eigenvalues.
Returns
-------
sorted_evals : numpy array
The sorted eigenvalues
sorted_evecs : of arrays
list where each element is a collection of sorted eigenvectors.
"""
idx = evals.argsort()[::-1]
sorted_evals = evals[idx]
sorted_evecs = [evecs[:, idx] for evecs in evec_collection]
return sorted_evals, sorted_evecs
|
b4f2380cf337296e57460e07c3c074fda6c63f64
| 572,009
|
from typing import Optional
from typing import Union
def convert_string_to_bool(param: str) -> Optional[Union[bool, str]]:
"""Converts a request param of type string into expected bool type.
Args:
param: str. The params which needs normalization.
Returns:
bool. Converts the string param into its expected bool type.
"""
case_insensitive_param = param.lower()
if case_insensitive_param == 'true':
return True
elif case_insensitive_param == 'false':
return False
else:
# String values other than booleans should be returned as it is, so that
# schema validation will raise exceptions appropriately.
return param
|
701310ee196edf6b7937a60513da011c80d18da1
| 391,507
|
import pathlib
def get_file(file: pathlib.Path) -> str:
"""Extract all lines from a file."""
with open(file, "r") as f:
return f.read()
|
86f7a57b5b5394e82082a11f69a37514f39e0e71
| 600,925
|
def pick_files(profile_dir, **kwargs):
"""
Return paths to the files from the profile that should be backed up.
There are 17 files that can be backed up. They have been organized into 11
categories for your convenience:
- autocomplete
- bookmarks
- certificates
- cookies
- dictionary
- download_actions
- passwords
- preferences
- search_engines
- site_settings
- styles
By default all 17 files will be backed up, but you can prune any of the
above categories by passing it as a keyword argument set to False, i.e.
``cookies=False''.
"""
profile_files = { # (no fold)
'autocomplete': [
'formhistory.sqlite',
],
'bookmarks': [
'places.sqlite',
'bookmarkbackups',
],
'certificates': [
'cert8.db',
],
'cookies': [
'cookies.sqlite',
],
'dictionary': [
'persdict.dat',
],
'download_actions': [
'mimeTypes.rdf',
],
'passwords': [
'key3.db',
'logins.json',
],
'preferences': [
'prefs.js',
'user.js',
],
'search_engines': [
'search.json',
'searchplugins',
],
'site_settings': [
'permissions.sqlite',
'content-prefs.sqlite',
],
'styles': [
'chrome/userChrome.css',
'chrome/userContent.css',
],
}
picked_files = []
for key in profile_files:
if kwargs.get(key, True):
picked_files += [profile_dir / x for x in profile_files[key]]
return [x for x in picked_files if x.exists()]
|
623e97335747a23f9aee8433d1e3934e29edfd4d
| 691,948
|
def SuiteLikelihood(suite, data):
"""Computes the weighted average of likelihoods for sub-hypotheses.
suite: Suite that maps sub-hypotheses to probability
data: some representation of the data
returns: float likelihood
"""
total = 0
for hypo, prob in suite.Items():
like = suite.Likelihood(data, hypo)
total += prob * like
return total
|
a04fd8a2946371092438845813cb051acf2b2304
| 610,960
|
def split_nth(seq, separator, n):
"""
Split sequence at the n-th occurence of separator
Args:
seq(str) : sequence to split
separator(str): separator to split on
n(int) : split at the n-th occurence
"""
pos = 0
for i in range(n):
pos = seq.index(separator, pos + 1)
return seq[:pos], seq[pos + 1:]
|
7eac612c842639c8bc8b85d16142e4137360da84
| 123,905
|
from typing import Any
from typing import Iterable
def gen_repr(obj: Any, attrs: Iterable, **kwargs) -> str:
"""Generates a __repr__ string.
Used to create consistent `__repr__` methods throughout ZeroBot's codebase
and modules.
Parameters
----------
obj : Any object
A reference to an object whose class this repr is for.
attrs : Any iterable
An iterable containing attribute names that should be included in the
`__repr__` string.
kwargs
Any extra keyword arguments are included as extra attributes.
Returns
-------
str
A string suitable to return from the class's `__repr__` method.
"""
name = obj.__class__.__name__
body = ' '.join(f'{attr}={getattr(obj, attr)!r}' for attr in attrs)
if kwargs:
extras = ' '.join(f'{attr}={val!r}' for attr, val in kwargs.items())
body += ' ' + extras
return f'<{name} {body}>'
|
1e70351500554e9dfca145f7bdff0af0be0e18ee
| 342,958
|
def iso7064mod37_hybrid_36(source: str) -> str:
"""
iso7064mod37_HYBRID_36校验算法
:param source: 需要添加校验的字符串
:return: 校验位
"""
alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ*"
m1 = 36
m2 = 37
p = m1
n = len(source) + 1
i = n
while i >= 2:
s = p + alphabet.index(source[n - i].upper())
if s % m1 == 0:
c = m1
else:
c = s % m1
p = (c * 2) % m2
i -= 1
return alphabet[(m1 + 1 - p % m1) % m1]
|
bdd80186848d75aa35b4f3bcd187a6cabce477b1
| 330,775
|
def subsample(df, freq=2):
"""
Subsample the original data for reduced size
:param df: df, dat
:param freq: int, every freq item will be taken
:return: df, subsampled df
"""
df = df.iloc[::freq, :]
return df
|
5ffde37406d73ed02a99bcedf098fe3781dcdb37
| 576,277
|
def make_conf() -> dict:
"""Create standard ladim configuration to be used for testing"""
return dict(
version=2,
time=dict(dt=[30, "s"], start="2000-01-02T03", stop="2000-01-02T03:02",),
grid=dict(module="ladim2.ROMS",),
forcing=dict(module="ladim2.ROMS",),
release=dict(),
state=dict(
particle_variables=dict(release_time="time", weight="float",),
instance_variables=dict(age="float"),
default_values=dict(weight=0, age=0),
),
tracker=dict(advection="EF", diffusion=0.0, vertdiff=0.0,),
output=dict(
output_period=[60, "s"],
particle_variables=dict(
release_time=dict(
encoding=dict(datatype="f8"),
attributes=dict(
long_name="particle release time",
units="seconds since reference_time",
),
),
),
instance_variables=dict(
pid=dict(
encoding=dict(datatype="i4"),
attributes=dict(long_name="particle identifier",),
),
X=dict(
encoding=dict(datatype="f4"),
attributes=dict(long_name="particle X-coordinate",),
),
Y=dict(
encoding=dict(datatype="f4"),
attributes=dict(long_name="particle Y-coordinate",),
),
Z=dict(
encoding=dict(datatype="f4"),
attributes=dict(
long_name="particle depth",
standard_name="depth_below_surface",
units="m",
positive="down",
),
),
),
),
)
|
94c5450d680a4e3da1894859ba6c98fd884f4619
| 188,867
|
import re
def clean_extra_newlines(textbody):
"""Removes excess newlines"""
multi_returns_re = re.compile(r"\n\n+")
textbody = multi_returns_re.sub(r"\n\n", textbody)
return textbody
|
3d21bb84a97cced98679784bb3d256547c9c6961
| 305,548
|
def simulate(dynamics,x0,ufunc,T=1,dt=1e-3):
"""Returns a simulation trace of dynamics using Euler integration over
duration T and time step dt.
Args:
dynamics (Dynamics): the system.
x0 (np.ndarray): the initial state.
ufunc (callable): a policy u(t,x) returning a control vector.
T (float): integration duration
dt (float): time step
Returns:
dict: maps 't', 'x', 'u', 'dx' to traces of these time, state, control,
and derivative, respectively.
"""
assert len(x0)==dynamics.stateDimension()
res = dict((idx,[]) for idx in ['t','x','u','dx'])
t = 0
while t < T:
u = ufunc(t,x0)
assert len(u) == dynamics.controlDimension()
dx = dynamics.derivative(x0,u)
res['t'].append(t)
res['x'].append(x0)
res['dx'].append(dx)
res['u'].append(u)
x0 = dynamics.integrate(x0,dt*dx)
t += dt
return res
|
89931fa50fcee831e90b71c601573bb4758e6cd3
| 618,460
|
import csv
def parse_fastqc_result(path_to_qc_summary):
"""
Args:
path_to_qc_summary (str): Path to the fastqc report summary file.
Returns:
dict: Parsed fastqc R1 report.
All values except are either "PASS", "WARN", or "FAIL".
For example:
{ "basic_statistics": "PASS",
"per_base_sequence_quality": "PASS",
"per_tile_sequence_quality": "PASS",
"per_sequence_quality_scores": "PASS",
"per_base_sequence_content": "WARN",
"per_sequence_gc_content": "FAIL",
"per_base_n_content": "PASS",
"sequence_length_distribution": "WARN",
"sequence_duplication_levels": "PASS",
"overrepresented_sequences": "WARN",
"adapter_content": "PASS",
}
"""
fastqc_summary = {}
with open(path_to_qc_summary) as qc_summary:
reader = csv.reader(qc_summary, delimiter='\t')
for row in reader:
field_name = row[1].lower().replace(" ", "_")
fastqc_summary[field_name] = row[0]
return fastqc_summary
|
133de8f205a601d39d361502d3fb64edd7745433
| 621,329
|
import json
def load_json(filename):
"""
Loads a file as json
"""
with open(filename) as f:
page = f.read()
return json.loads(page)
|
5be1d70c2cb53d8c9148344e6f26fed2df5f6027
| 470,064
|
def unstack_m(state,b1):
"""Generate a pickup subtask."""
if state.is_true("clear", [b1]):
for atom in state.atoms:
if atom.predicate.name == "on" and atom.args[0].name == b1:
return [('unstack',b1,atom.args[1].name)]
return False
|
0d524c5180c45846e04395807a4c8fb60290d417
| 556,633
|
def is_callID_active(callID):
"""Check if reactor.callLater() from callID is active."""
if callID is None:
return False
elif ((callID.called == 0) and (callID.cancelled == 0)):
return True
else:
return False
|
f54a05fde58db53319bc34cca79629011e2f2ffd
| 656,094
|
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result
|
f9ed294a9783b562b17885f0c696e25f288d05dd
| 92,231
|
def extractFlags( item, report=1 ):
"""Extract the flags from an item as a tuple"""
return (
item.negative,
item.optional,
item.repeating,
item.errorOnFail,
item.lookahead,
item.report and report,
)
|
e34207daca168051e77f725c41ddc17942559ba2
| 302,973
|
from typing import Mapping
from typing import List
def _mapping_to_mlflow_hyper_params(mp: Mapping) -> List[str]:
"""
Transform mapping to param-list arguments for `mlflow run ...` command
Used to pass hyper-parameters to mlflow entry point (See MLFlow reference for more information)
All mapping values will be converted to str(`value`)
>>> _mapping_to_mlflow_hyper_params({"alpha": 1.0, 'epochs': 10})
["-P", "alpha=1.0", "-P", "epochs=10"]
>>> result = _mapping_to_mlflow_hyper_params({"alpha": 1.0, 'epochs': 10})
>>> assert isinstance(result, List)
:param mp:
:return:
"""
param_list = []
for k, v in mp.items():
param_list.append('-P')
param_list.append(f'{k}={v}')
return param_list
|
c0d1fe9083ddf6fce29b98bd88de209a06a47f02
| 667,264
|
def font_parse_string(font):
"""
Convert from font string/tyuple into a Qt style sheet string
:param font: "Arial 10 Bold" or ('Arial', 10, 'Bold)
:return: style string that can be combined with other style strings
"""
if font is None:
return ''
if type(font) is str:
_font = font.split(' ')
else:
_font = font
family = _font[0]
point_size = int(_font[1])
style = _font[2:] if len(_font) > 1 else None
# underline = 'underline' in _font[2:]
# bold = 'bold' in _font
return family, point_size, style
|
81d987728c89f9b652a8cb0255bd27cfa59a4074
| 444,694
|
def word_contains(word: str, necessary: str) -> bool:
"""Check if a word contains all of the given characters
:param word: the word to check
:param necessary: a string containing all necessary characters ("" for none)
:return: True if the word contains all characters
"""
characters = set(necessary)
for ch in characters:
if ch not in word:
return False
return True
|
067f103dc20506bdc194eece4aa72b5bf9c84113
| 333,714
|
def add_line_numbers(lines, start=0, fn=None):
"""Prefix lines with numbers.
Parameters
----------
lines : list or list-like of str
Lines to be prefixed with a number.
start : int, optional
The index at which line numbers start (default is 0).
fn : str, optional
File name to prepend to line number (default is None).
Returns
-------
list
Lines prefixed with numbers.
"""
if fn:
fmt_string = '[{fn}-{{ln}}] {{line}}'.format(fn=fn)
else:
fmt_string = '[{ln}] {line}'
with_prefix = []
for line_no, line in enumerate(lines, start):
with_prefix.append(fmt_string.format(ln=line_no, line=line))
return with_prefix
|
13b9b1fb93e9c0a501d2e6e6ced0c34a662b94b1
| 504,460
|
from typing import Callable
def callable_name(func: Callable) -> str:
"""Return the qualified name (e.g. package.module.func) for the given callable."""
if func.__module__ == 'builtins':
return func.__name__
else:
return '{}.{}'.format(func.__module__, func.__qualname__)
|
6eeeb86d333ee1090957d656c2545a4a609ff918
| 56,081
|
def shuffle(x, order=None):
"""Reorganizes the given string according the order - list of character indices.
"""
if order == None:
return x
res = ""
for o in order:
res += x[o]
return res
|
daa1c33160f82917be40b082e6a0df70fdfd49c8
| 79,243
|
def axial_to_cubic(col, slant):
"""
Convert axial coordinate to its cubic equivalent.
"""
x = col
z = slant
y = -x - z
return x, y, z
|
f1f5a4e5794d08897abc7f41c5a0e5d69a659d19
| 450,394
|
def isPalindrome(n):
"""returns if a number is palindromic"""
s = str(n)
if s == s[::-1]:
return True
return False
|
2261cdae02a2c3b8665bfc09c991be524110fbb2
| 220,166
|
import configparser
def read_config(config_file):
"""Read the config file.
Args:
config_file (file) : Plugin config file
Returns:
object: config object
"""
config = configparser.ConfigParser()
config.read(config_file)
return config
|
20d67952dfd6c619f5693996162e2cca984af483
| 72,834
|
def get_identifier_name(cursor):
"""
Retrieves the identifier name from the given clang cursor.
:param cursor: A clang cursor from the AST.
:return: The identifier as string.
"""
return cursor.displayname
|
771b5a2f40530f0f5729f4d5c42b6d2a6666b2bc
| 398,005
|
def _GetHost(cpu, target_os):
"""Returns the host triple for the given OS and CPU."""
if cpu == 'x64':
cpu = 'x86_64'
elif cpu == 'arm64':
cpu = 'aarch64'
if target_os == 'linux':
return cpu + '-unknown-linux'
elif target_os == 'mac':
return cpu + '-apple-darwin'
elif target_os == 'ios':
return cpu + '-ios-darwin'
else:
raise RuntimeError('Unsupported host')
|
1431239a7352a3cee23bcc0aec8d1961dcb7296f
| 110,191
|
def eof(f, strict=False):
"""
Standard EOF function.
Return ``True`` if the the marker is at the end of the file.
If ``strict==True``, only return ``True`` if the marker is exactly at the end of file; otherwise, return ``True`` if it's at the end of further.
"""
p=f.tell()
f.seek(0,2)
ep=f.tell()
f.seek(p)
return (ep==p) or (ep<=p and not strict)
|
e75436b149af4eb48aa3da4d7f6f2f0d76487ac2
| 426,218
|
def is_label_in(node, label):
"""
Searches the immediate children of a node for a certain label, returning
True when such a label exists, and False when it doesn't
"""
return any([child.label() == label for child in node])
|
b91cb1c3a5823b741cb74d9ff5cd94945cb39bf7
| 391,435
|
import pkg_resources
def check_package_installed(package_name):
"""
Uses pkg_resources.require to certify that a package is installed
:param package_name: name of package
:type package_name: str
:return: boolean value whether or not package is installed in current python env
:rtype: bool
"""
try:
pkg_resources.require(package_name)
except pkg_resources.DistributionNotFound:
return False
return True
|
521124512d8cb7177876ce8b96a756108df33866
| 288,129
|
import itertools
def flatten(its, n):
"""Take the n-dimensional nested iterable its and flatten it.
Parameters
----------
its : nested iterable
n : number of dimensions
Returns
-------
flattened iterable of all items
"""
if n > 1:
return itertools.chain(*(flatten(it, n - 1) for it in its))
else:
return its
|
9702327c36ad004ed3108a81747f5b7bcb7bd3f8
| 254,998
|
def get_padding(dimension_size, sectors):
"""
Get the padding at each side of the one dimensions of the image
so the new image dimensions are divided evenly in the
number of *sectors* specified.
Parameters
----------
dimension_size : int
Actual dimension size.
sectors : int
number of sectors over which the the image will be divided.
Returns
-------
pad_before , pad_after: int, int
Padding at each side of the image for the corresponding dimension.
"""
reminder = dimension_size % sectors
if reminder != 0:
pad = sectors - reminder
pad_before = pad // 2
if pad % 2 == 0:
pad_after = pad_before
else:
pad_after = pad_before + 1
return pad_before, pad_after
return 0, 0
|
4bf0e3f4483586f1a41d5b65a3a6cb5e1dbbd6dc
| 106,533
|
def digitize(n):
"""Convert integer to reversed list of digits."""
stringy = str(n)
return [int(s) for s in stringy[::-1]]
|
8e3e763cffa1718b9d494b868d1dd551a8f5f59e
| 392,126
|
def obv(df, price, volume, obv):
"""
The On Balance Volume (OBV) is a cumulative total of the up and down volume.
When the close is higher than the previous close, the volume is added to
the running total, and when the close is lower than the previous close,
the volume is subtracted from the running total.
Parameters:
df (pd.DataFrame): DataFrame which contain the asset price.
price (string): the column name of the price of the asset.
volume (string): the column name of the volume of the asset.
obv (string): the column name for the on balance volume values.
Returns:
df (pd.DataFrame): Dataframe with obv of the asset calculated.
"""
df["diff"] = df[price].diff()
df = df.fillna(1)
df.loc[df["diff"] > 0, obv + "_sign"] = 1
df.loc[df["diff"] < 0, obv + "_sign"] = -1
df.loc[df["diff"] == 0, obv + "_sign"] = 0
volume_sign = df[volume] * df[obv + "_sign"]
df[obv] = volume_sign.cumsum()
df.drop(["diff", obv + "_sign"], axis=1, inplace=True)
return df
|
19f4c456ed501523d2b349e2766d482bd1fef13b
| 6,727
|
from pathlib import Path
def resolve_paths_to_new_working_dir(paths: list, new_work_dir: str) -> list[Path]:
"""Resolve a list of file paths relative to the new working directory.
Args:
paths: User-provided paths.
new_work_dir: The new working directory.
Returns:
List of new paths.
"""
listPath = [Path(p).resolve().relative_to(new_work_dir) for p in paths]
return listPath
|
da4553d93b0013191eb18399c2ac16749a006980
| 432,540
|
from typing import List
from typing import Any
def get_max_column_lengths(rows: List[List[Any]]) -> dict:
"""
:param rows: 2D list containing objects that have a single-line representation (via `str`).
:return: A dict of the maximum string length in each column
"""
number_of_columns = len(rows[0])
max_lengths = dict()
# for i in range(0, number_of_columns):
# max_lengths[i] = 0
# for row in rows:
# word = row[i]
# if len(str(word)) > max_lengths[i]:
# max_lengths[i] = len(str(word))
for index, row in enumerate(rows):
max_lengths[index] = 0
for word in row:
if len(str(word)) > max_lengths[index]:
max_lengths[index] = len(str(word))
return max_lengths
|
26e8e177d47022b43b37f986d8e3e18bbdbbf679
| 490,135
|
def get_billing_data(order):
"""Extracts order's billing address into payment-friendly billing data."""
data = {}
if order.billing_address:
data = {
'billing_first_name': order.billing_address.first_name,
'billing_last_name': order.billing_address.last_name,
'billing_company_name': order.billing_address.company_name,
'billing_address_1': order.billing_address.street_address_1,
'billing_address_2': order.billing_address.street_address_2,
'billing_city': order.billing_address.city,
'billing_postal_code': order.billing_address.postal_code,
'billing_country_code': order.billing_address.country.code,
'billing_email': order.user_email,
'billing_country_area': order.billing_address.country_area}
return data
|
3f020b13905f33c215d32ec1a3b2e4f4af369e15
| 73,363
|
def is_string(s):
"""Check if the argument is a string."""
return isinstance(s, str)
|
050be89ec88be7a6067464d4ad846acbf560d119
| 660,923
|
def suggest_patience(epochs: int) -> int:
"""Current implementation: 10% of total epochs, but can't be less than 5."""
assert isinstance(epochs, int)
return max(5, round(.1 * epochs))
|
e1631576d63dc3b62df636fb555739158035a25a
| 28,882
|
def make_inverted(m, n):
"""
Given two integers m and n, write a function that returns a reversed list of integers between m and n.
If the difference between m and n is even, include m and n, otherwise don't include.
If m == n, return empty list
If m < n, swap between m and n and continue.
Example 1:
Input: m = 2, n = 7
Output: [6, 5,4,3]
Example 2:
Input: m = 6, n = 10
Output: [10,9,8,7,6]
"""
m, n = int(float(m)), int(float(n))
if m == n:
return []
if m>n:
pass # No problem
else:
m,n = int(n), int(m)
# Swapped
#print(m,n)
# Now m > n
difference = m-n
difference_is_even = True;
#print(difference)
if difference%2 != 0:
difference_is_even = False
final_list = []
if difference_is_even:
final_list=list(range(n,m+1))
else:
final_list = list(range(n+1,m))
#print(final_list)
final_list.reverse()
#print(final_list)
return final_list
|
83692c924162bb595d01c9b7e1cb8c818497faf1
| 651,998
|
def renders(col_name):
"""
Use this decorator to map your custom Model properties to actual
Model db properties. As an example::
class MyModel(Model):
id = Column(Integer, primary_key=True)
name = Column(String(50), unique = True, nullable=False)
custom = Column(Integer(20))
@renders('custom')
def my_custom(self):
# will render this columns as bold on ListWidget
return Markup('<b>' + custom + '</b>')
class MyModelView(ModelView):
datamodel = SQLAInterface(MyTable)
list_columns = ['name', 'my_custom']
"""
def wrap(f):
if not hasattr(f, '_col_name'):
f._col_name = col_name
return f
return wrap
|
7128e0dac6d2c0b1519729e708fe7cb1b14c3596
| 620,247
|
def run_sklearn_metric_fn(metrics_fn, labels, probs):
"""Wrapper around sklearn metrics functions to allow code to proceed
even if missing a class in evaluation
Args:
metrics_fn: a function that takes labels and probs
labels: 1D label vector
probs: 1D probabilities vector
Returns:
results: metric values
"""
try:
results = metrics_fn(labels, probs)
except ValueError:
results = None
return results
|
5398bfa38db405e212c9aa6e4688aa6d47c26f2d
| 189,053
|
def color_component_int_to_float(rgb_component: int) -> float:
"""
Converts a color component from integer to float
:param rgb_component: a color component given in int range [0, 255]
:return: a color component in float range of [0,1]
"""
return rgb_component / 255.0
|
edd1390e2c3a2462d906091b3ac58c1d74a2891f
| 348,016
|
def _first(iterable, what, test='equality'):
"""return the index of the first occurance of ``what`` in ``iterable``
"""
if test=='equality':
for index, item in enumerate(iterable):
if item == what:
break
else:
index = None
else:
raise NotImplementedError
return index
|
0421ac9aa5c6c2f0e2f55a913fa49d9bf79edb63
| 643,397
|
def get_tlp(results):
"""Fetches a target log prob from a results structure"""
if isinstance(results, list):
return get_tlp(results[-1])
else:
return results.accepted_results.target_log_prob
|
98c4046b1691533b311b87aa8d52c6cbe71da221
| 399,171
|
def split_df(df, columns_split):
"""Split a dataframe into two by column.
Args:
df (pandas dataframe): input dataframe to split.
columns_split (int): Column at which to split the dataframes.
Returns:
df1, df2 (pandas dataframes): Split df into two dataframe based on column.
The first has the first column_split-1 columns, and the second has columns
columns_split onward.
"""
return df.iloc[:, :columns_split], df.iloc[:, columns_split:]
|
37aee5783680f6827891c737c9a4ffb9bba89b27
| 653,196
|
def classpath_entry_xml(kind, path):
"""Generates an eclipse xml classpath entry.
Args:
kind: Kind of classpath entry.
Example values are 'lib', 'src', and 'con'
path: Absolute or relative path to the referenced resource.
Paths that are not absolute are relative to the project root.
Returns:
xml classpath entry element with the specified kind and path.
"""
return "<classpathentry kind=\"{kind}\" path=\"{path}\"/>".format(
kind=kind, path=path)
|
5b350d2e2dc75f96348ba83fed739a83ac144053
| 572,302
|
def get_active_sessions_orchestrator(
self,
) -> list:
"""Get all current active sessions on Orchestrator
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - activeSessions
- GET
- /session/activeSessions
:return: Returns list of dictionaries of active sessions \n
[`dict`]: Active session object \n
* keyword **username** (`str`): Username
* keyword **type** (`str`): Protocol/method used for
establishing the session e.g. ``web`` or ``ssh``
* keyword **idle_time** (`int`): Measure of current interval
during which user is idle (seconds)
* keyword **login_time** (`int`): Time of user login (EPOCH
time in seconds)
* keyword **remote_host** (`str`): The IP address of the
user originating the session
:rtype: dict
"""
return self._get("/session/activeSessions")
|
3dd38d66816e2bdace27ab24b6609ec44ee02057
| 634,734
|
import textwrap
def wrap_line(line: str, width=29):
"""Wraps a long string into multiple lines with a default width of 29
:param width: maximum length of 1 row
:param line: str
:return: str
"""
return '\n'.join(textwrap.wrap(line, width=width))
|
92dfeb830fdf27d7fa1f97c9501808a83073e465
| 215,068
|
import re
def _do_subst(node, subs):
"""
Fetch the node contents and replace all instances of the keys with
their values. For example, if subs is
{'%VERSION%': '1.2345', '%BASE%': 'MyProg', '%prefix%': '/bin'},
then all instances of %VERSION% in the file will be replaced with
1.2345 and so forth.
"""
contents = node.get_text_contents()
if not subs: return contents
for (k,v) in subs:
contents = re.sub(k, v, contents)
return contents
|
c62e542f15ffd38cc2cb38bf088552340d132d59
| 135,850
|
def extract_image_class(filename: str) -> str:
"""
Extract the class label for a given filename.
Args:
filename: the filename to extract a class label for
Returns:
a class label based on the image's filename
"""
# extract the filename itself without the absolute path
filename = filename.split('/')[-1]
# remove any unique descriptive text following a dash
filename = filename.split('-')[0]
# remove any spaces for simplicity
filename = filename.replace(' ', '')
# replace file-type information with nothing
filename = filename.replace('.gif', '').replace('.png', '')
return filename
|
b500f1ae215ab60db6f3f98d5d58467f4f821e72
| 311,592
|
def lower_rep(text):
"""Lower the text and return it after removing all underscores
Args:
text (str): text to treat
Returns:
updated text (with removed underscores and lower-cased)
"""
return text.replace("_", "").lower()
|
a9e6c507146ba1b0c9bcd924867b330edc8e6d0f
| 81,805
|
def normalize_to_midnight(dt_obj):
"""Take a datetime and round it to midnight"""
return dt_obj.replace(hour=0, minute=0, second=0, microsecond=0)
|
f00fe4425972840a5a6d81a0ac8cf8315f2e53be
| 430,002
|
import re
def is_sha(text):
"""
To be considered a SHA, it must have at least one digit.
>>> is_sha('f75a283')
True
>>> is_sha('decade')
False
>>> is_sha('facade')
False
>>> is_sha('deedeed')
False
>>> is_sha('d670460b4b4aece5915caf5c68d12f560a9fe3e4')
True
>>> is_sha('ad670460b4b4aece5915caf5c68d12f560a9fe3e4')
False
>>> is_sha('670460b4b4aece5915caf5c68d12f560a9fe3e4')
True
>>> is_sha('670460b4b4aece5915caf5c68d12f560a9fe3g4')
False
"""
valid_sha = bool(re.match(r'''^[a-f0-9]{5,40}$''', text))
has_digit = bool(re.search(r'\d', text))
return valid_sha and has_digit
|
cb2f4dca633a81921d9489bc5d20b895448675b5
| 552,542
|
def get_surrounding_text(text, sub_start, sub_end=None, distance=25):
"""
Looks for the substrings, 'sub_start' and 'sub_end' variables in 'text' and return a new substring
with a number equal to 'distance' characters in the left of 'sub_start' position and in the right of
'sub_end' position if they exist, if one of them are not found the partition of the string will be until
the start or until the end of the text variable.
:param text: a string. The string to look for.
:param sub_start: a string. The start to get the left and the right parts of the new subtring.
:param sub_end: an optional string. The start to get the right part of the new substring.
:param distance: an optional integer. The length of the left and right parts of the new string from sub_start
and sub_end if they are found. If the substrings are not found, the distance will be to the start of the text
and the end of the text.
:return: a string. A new substring with the characters around the 'sub_start' and 'sub-end' substrings.
"""
surrounding_text = ""
separators = ['.', '\n', ',']
i_start = text.lower().find(sub_start.lower())
i_left = max(0, i_start - distance)
i_separators = [text[i_left:i_start].find(separator) for separator in separators]
i_separators = [i + i_left + 1 for i in i_separators if i >= 0]
if i_separators:
i_left = max(i_left, *i_separators)
if sub_end:
i_end = text.lower().find(sub_end.lower())
else:
i_end = -1
sub_end = sub_start
if i_end < 0:
i_end = i_start + len(sub_start)
i_right = len(text)
else:
i_end = i_end + len(sub_end)
i_right= min(len(text), i_end + distance)
i_separators = [text[i_end:i_right].find(separator) for separator in separators]
i_separators = [i + i_end for i in i_separators if i >= 0]
if i_separators:
i_right = min(i_right, *i_separators)
surrounding_text = text[i_left:i_right].strip()
return surrounding_text
|
c5ad4a80940260ca591b7974ef9a37441140be78
| 628,018
|
import random
def get_best_move(board, scores):
"""
Find all empty squares with max score, then randomly returns one of them as (row, col) tuple
"""
empty_list = board.get_empty_squares()
best_score = float('-inf')
best_move = []
# check for max value in scores
for item in empty_list:
if scores[item[0]][item[1]] >= best_score:
best_score = scores[item[0]][item[1]]
# add items with same best_score
for item in empty_list:
if scores[item[0]][item[1]] == best_score:
best_move.append(item)
return random.choice(best_move)
|
0cf0f1ccb45abbdb2032df329271b65c9ee6bc26
| 364,228
|
import struct
def readInt(byteArray, start):
"""Read the byte array, starting from *start* position,
as an 32-bit unsigned integer"""
return struct.unpack("!L", byteArray[start:][0:4])[0]
|
9a4c2dc107fd395e9bdb7f127c152aa95c071ac7
| 507,774
|
def is_uniq(values):
""" Check if input items are unique
Parameters
----------
values : set
set of all values
Returns
-------
True/False, MULTI/unique value
"""
if len(values) == 0:
return True, ''
elif len(values) == 1:
return True, list(values)[0]
else:
return False, 'MULTI'
|
00fd70ab7b6cc20f5c32948b0bfffbb924e0da9e
| 414,381
|
def grid_edge_is_closed_from_dict(boundary_conditions):
"""Get a list of closed-boundary status at grid edges.
Get a list that indicates grid edges that are closed boundaries. The
returned list provides a boolean that gives the boundary condition status
for edges order as [*bottom*, *left*, *top*, *right*].
*boundary_conditions* is a dict whose keys indicate edge location (as
"bottom", "left", "top", "right") and values must be one of "open", or
"closed". If an edge location key is missing, that edge is assumed to be
*open*.
Parameters
----------
boundary_conditions : dict
Boundary condition for grid edges.
Returns
-------
list
List of booleans indicating if an edge is a closed boundary.
Examples
--------
>>> from landlab.grid.raster import grid_edge_is_closed_from_dict
>>> grid_edge_is_closed_from_dict(dict(bottom='closed', top='open'))
[False, False, False, True]
>>> grid_edge_is_closed_from_dict({})
[False, False, False, False]
"""
for condition in boundary_conditions.values():
if condition not in ['open', 'closed']:
raise ValueError('%s: boundary condition type not understood',
condition)
return [boundary_conditions.get(loc, 'open') == 'closed'
for loc in ['right', 'top', 'left', 'bottom']]
|
2f285ef9c5065741759c83ae83021277b42aae9e
| 291,483
|
from typing import Tuple
def _normalize_line(raw_line: str) -> Tuple[str, str]:
"""Normalizes import related statements in the provided line.
Returns (normalized_line: str, raw_line: str)
"""
line = raw_line.replace("from.import ", "from . import ")
line = line.replace("from.cimport ", "from . cimport ")
line = line.replace("import*", "import *")
line = line.replace(" .import ", " . import ")
line = line.replace(" .cimport ", " . cimport ")
line = line.replace("\t", " ")
return (line, raw_line)
|
c35b7f7e79ff824427c055b66a1858d2483cf8ba
| 549,641
|
def clean(df, GPS = False, elevation = False, TEC = False, VTEC = False, locktime = False):
"""Clean
Removes erroneous values of VTEC
Args:
df (dataframe): Master dataframe containing TEC measurements
GPS (bool): (default: False) If True, only GPS satellite data is included and vice-versa
elevation (bool): (default: False) If True, only data for which elevation is greater than 30 degrees is included
TEC (bool): (default: False) If True, only data with positive TEC values are included and vice-versa
VTEC (bool): (default: False) If True, only data with positive VTEC values are included and vice-versa
locktime (bool): (default: False) If True, only data with locktime greater than 3 minutes are included and vice-versa
"""
if elevation == True:
df = df[df['elevation'] > 30]
if TEC == True:
df = df[df['TEC'] > 0]
if VTEC == True:
df = df[df['VTEC'] > 0]
if locktime == True:
df = df[df['locktime'] > 180]
if GPS == True:
df = df[(df['SVID'] >= 1) & (df['SVID'] <= 37)]
return df
|
d00e3b7857c06e84d360e94c87f40e29a0ee7b71
| 74,516
|
def comp_radius(self):
"""Compute the radius of the min and max circle that contains the slot
Parameters
----------
self : HoleM50
A HoleM50 object
Returns
-------
(Rmin,Rmax): tuple
Radius of the circle that contains the slot [m]
"""
Rmax = self.get_Rext() - self.H1
point_dict = self._comp_point_coordinate()
Rmin = min(abs(point_dict["Z5"]), abs(point_dict["Z7"]))
return (Rmin, Rmax)
|
d498ca0a82c868caa2f78828a1a6777f36be7d05
| 372,446
|
import math
def angular_momentum(m, v, r, theta):
"""
Calculates the angular momentum of an object
of mass 'm' whose linear velocity is 'v' and
radius of the path traced by the object is 'r',
angle between velocity and radius is 'theta'
Parameters
----------
m : float
v : float
r : float
theta : float
Returns
-------
float
"""
return m * v * r * math.sin(theta)
|
a37ec10b1375e4b14c8a27e938c7e63839518509
| 111,160
|
def join_strings(strings, join_char="_"):
"""Join list of strings with an underscore.
The strings must contain string.printable characters only, otherwise an exception is raised.
If one of the strings has already an underscore, it will be replace by a null character.
Args:
strings: iterable of strings.
join_char: str, the character to join with.
Returns:
The joined string with an underscore character.
Examples:
```python
>>> join_strings(['asd', '', '_xcv__'])
'asd__\x00xcv\x00\x00'
```
Raises:
ValueError if a string contains an unprintable character.
"""
all_cleaned = []
for s in strings:
if not s.isprintable():
raise ValueError("Encountered unexpected name containing non-printable characters.")
all_cleaned.append(s.replace(join_char, "\0"))
return join_char.join(all_cleaned)
|
291055e33f73762e62345eadf2b63f2af529ab40
| 386,369
|
def no_hidden(files):
"""Removes files that start with periods.
"""
return([x for x in files if not x.startswith('.')])
|
e03a88ed3387b707484d767f746edc18da012f46
| 219,871
|
def preload(pytac_lat):
"""Load the elements onto an 'elems' object's attributes by family so that
groups of elements of the same family can be more easily accessed, e.g.
'elems.bpm' will return a list of all the BPMs in the lattice. As a
special case 'elems.all' will return all the elements in the lattice.
Args:
pytac_lat (pytac.lattice.Lattice): The Pytac lattice object from which
to get the elements.
returns:
obj: The elems object with the elements loaded onto it by family.
"""
class elems:
pass
setattr(elems, "all", pytac_lat.get_elements())
for family in pytac_lat.get_all_families():
setattr(elems, family.lower(), pytac_lat.get_elements(family))
return elems
|
b65c6950442bf3afd55f1b3cc5c67529170c3b69
| 628,260
|
def parse_file(filename):
"""
Parses a file and counts the occurrences of the word "pride"
Args:
filename: The file to be parsed
Returns:
The number of occurrences
"""
count = 0
# this is a super cool way to open a file, so it
# will automatically be closed / cleaned up when needed
with open(filename, encoding="utf8") as f:
# go through each line in the file, one by one
for line in f:
# split the line into words based on space
# (the default delimiter)
words = line.split()
# go through each word and see if it matches
# (yes there are more clever ways to do this...)
for word in words:
if word == "pride":
count += 1
return count
|
5bc4336e06ac4cf0d90be8e97462f11de2bebacd
| 574,023
|
def cannot_add(left, right):
""" Addition error. """
return "Cannot add {} and {}.".format(left, right)
|
624d6e40801ec0af563705a6eb2ef1a7dc26c49f
| 352,525
|
def _get_interpretation_normality(test_name: str,
pvalue: float,
sig_level: float):
"""Displays p-value of the specific normality test,
along with test result interpretation
Args:
test_name (str): Name of statistical test
pvalue (float): Value of the p-value
sig_level (float): Significance level of statistical test
Returns:
str: Interpretation of normality statistical test
"""
if pvalue < sig_level:
interpretation = f'p-value of {test_name} ({round(pvalue,3)}) is <{sig_level}, suggesting the assumption of residual normality is VIOLATED'
else:
interpretation = f'p-value of {test_name} ({round(pvalue,3)}) is ≥{sig_level}, suggesting the assumption of residual normality is satisfied'
return interpretation
|
c6abfd63319f515ad4467bb261a40eacbe0a3e09
| 397,979
|
def deserialize_tuple(d):
"""
Deserializes a JSONified tuple.
Args:
d (:obj:`dict`): A dictionary representation of the tuple.
Returns:
A tuple.
"""
return tuple(d['items'])
|
aa502d4e16e824b354b00e0055d782cd10fe6b48
| 674,556
|
def bubble_sort(t_input):
""" Bubble Sort Algorithm
Simple and slow algorithm
http://en.wikipedia.org/wiki/Bubble_sort
Best case performance: O(n^2)
Worst case performance: O(n^2)
Worst Case Auxiliary Space Complexity: O(1)
:param t_input: [list] of numbers
:return: [list] - sorted list of numbers
"""
array = t_input[:]
while True:
swapped = False
for i in range(len(t_input) - 1):
if array[i + 1] < array[i]:
# swap elements
array[i], array[i + 1] = array[i + 1], array[i]
swapped = True
if swapped is False:
break
return array
|
3e0d19bb66c1e3569d8a2cc9aefb742867f6ae53
| 232,834
|
import re
def cleanFilename(fname):
"""Turn runs of bad characters to have in a filename into a single underscore,
remove any trailing underscore"""
return re.sub("_$", "", re.sub("[ _\n\t/()*,&:;@.]+", "_", fname))
|
9dce26172d9b4cc6db3c9cdd13e4855c224a1a0c
| 68,914
|
def rect2pathd(rect):
"""Converts an SVG-rect element to a Path d-string.
The rectangle will start at the (x,y) coordinate specified by the
rectangle object and proceed counter-clockwise."""
x0, y0 = float(rect.get('x', 0)), float(rect.get('y', 0))
w, h = float(rect.get('width', 0)), float(rect.get('height', 0))
x1, y1 = x0 + w, y0
x2, y2 = x0 + w, y0 + h
x3, y3 = x0, y0 + h
d = ("M{} {} L {} {} L {} {} L {} {} z"
"".format(x0, y0, x1, y1, x2, y2, x3, y3))
return d
|
910e0c45783d47f9533fa096e464ebce30705428
| 251,475
|
from typing import Pattern
import re
def r(regex: str) -> Pattern:
"""Compiles a regular expression."""
return re.compile(regex, flags=re.MULTILINE)
|
53af59d7a5c5149ee31c2ab42df2cf3a68327d39
| 147,386
|
def check_rule_name_ending(rule_name, starlark_rule_types=('binary', 'library')):
"""
Return True if `rule_name` ends with a rule type from `starlark_rule_types`
Return False otherwise
"""
for rule_type in starlark_rule_types:
if rule_name.endswith(rule_type):
return True
return False
|
f31bbeb71f8cdefecab5c86f183a1972dc836a85
| 152,491
|
def find_missing_number(nums):
"""Returns the missing number from a sequence of unique integers
in range [0..n] in O(n) time and space. The difference between
consecutive integers cannot be more than 1. If the sequence is
already complete, the next integer in the sequence will be returned.
>>> find_missing_number(i for i in range(0, 10000) if i != 1234)
1234
>>> find_missing_number([4, 1, 3, 0, 6, 5, 2])
7
"""
missing = 0
for i, num in enumerate(nums):
missing ^= num
missing ^= i + 1
return missing
|
4e5074db26924971c1d2ff65a28367256ded10b0
| 141,389
|
def dummy_import(monkeypatch): # (MonkeyPatch) -> Callable
"""
This fixture monkeypatches the import mechanism to fail.
After raising an `ImportError`, the monkeypatch is immediately removed.
"""
def dummy_import(*args, **kwargs):
try:
raise ImportError("this is a monkeypatch")
finally:
monkeypatch.undo()
return dummy_import
|
341bd038135e1d689af50677d1fc1fbdc96c730a
| 642,559
|
def _is_typing_type(field_type: type) -> bool:
"""Determine whether a type is a typing class."""
return hasattr(field_type, '_subs_tree')
|
f0259babf9c5c4025306197e93193d6ab2d66de7
| 178,333
|
def smart_bool(value):
"""Convert given value to bool, using a bit more interpretation for strings."""
if isinstance(value, str) and value.lower() in ["0", "no", "off", "false"]:
return False
else:
return bool(value)
|
90b5c33d38f8ed00a5bf97ddc314ea4901c172af
| 132,069
|
def get_walkID(filename):
"""Create walkID from filename.
Args:
filename (str): contains 3 substrings
'trial_[trial_num]'
'walklr_[walk_side]'
'visit_[visit_num']
Returns:
walkID (str): formatted as [trial_num]-[walk_side]-[visit_num]
"""
trial = filename.split('trial_')[-1].split('.')[0]
walk = filename.split('walklr_')[-1].split('_')[0]
visit = filename.split('visit_')[-1].split('_')[0]
walkID = trial + '-' + walk + '-' + visit
return walkID
|
c70bdf7202546f98d3f9ccea13c677c313d45536
| 363,440
|
def fmt_quil_str(raw_str):
"""Format a raw Quil program string
Args:
raw_str (str): Quil program typed in by user.
Returns:
str: The Quil program with leading/trailing whitespace trimmed.
"""
raw_quil_str = str(raw_str)
raw_quil_str_arr = raw_quil_str.split('\n')
trimmed_quil_str_arr = [qs.strip() for qs in raw_quil_str_arr]
trimmed_quil_str = '\n'.join([x for x in trimmed_quil_str_arr])
return trimmed_quil_str
|
e95c26f3de32702d6e44dc09ebbd707da702d964
| 707,167
|
import re
def convert_to_padded_path(path, padding):
"""
Return correct padding in sequence string
Args:
path (str): path url or simple file name
padding (int): number of padding
Returns:
type: string with reformated path
Example:
convert_to_padded_path("plate.%d.exr") > plate.%04d.exr
"""
if "%d" in path:
path = re.sub("%d", "%0{padding}d".format(padding=padding), path)
return path
|
67a7d029dd7139302f60a275512e5ff30700c48a
| 360,102
|
def index() -> str:
"""Rest endpoint to test whether the server is correctly working
Returns:
str: The default message string
"""
return 'DeChainy server greets you :D'
|
ce0caeb9994924f8d6ea10462db2be48bbc126d0
| 706,565
|
import torch
def angle_axis_to_rotation_matrix(angle_axis):
"""Convert batch of 3D angle-axis vectors into a batch of 3D rotation matrices
Arguments:
angle_axis: (b, 3) Torch tensor,
batch of 3D angle-axis vectors
Return Values:
rotation_matrix: (b, 3, 3) Torch tensor,
batch of 3D rotation matrices
"""
assert angle_axis.shape[-1] == 3, "Angle-axis vector must be a (*, 3) tensor, received {}".format(
angle_axis.shape)
def angle_axis_to_rotation_matrix_rodrigues(angle_axis, theta2):
theta = torch.sqrt(theta2).unsqueeze(-1) # bx1
r = angle_axis / theta # bx3
rx = r[..., 0] # b
ry = r[..., 1] # b
rz = r[..., 2] # b
r_skew = torch.zeros_like(r).unsqueeze(-1).repeat_interleave(3, dim=-1) # bx3x3
r_skew[..., 2, 1] = rx
r_skew[..., 1, 2] = -rx
r_skew[..., 0, 2] = ry
r_skew[..., 2, 0] = -ry
r_skew[..., 1, 0] = rz
r_skew[..., 0, 1] = -rz
R = torch.eye(3, dtype=r.dtype, device=r.device).unsqueeze(0) \
+ theta.sin().unsqueeze(-1) * r_skew \
+ (1.0 - theta.cos().unsqueeze(-1)) * torch.matmul(r_skew, r_skew) # bx3x3
return R
def angle_axis_to_rotation_matrix_taylor(angle_axis):
rx, ry, rz = torch.chunk(angle_axis, 3, dim=-1)
ones = torch.ones_like(rx)
R = torch.cat([ones, -rz, ry, rz, ones, -rx, -ry, rx, ones], dim=1).view(-1, 3, 3)
return R
theta2 = torch.einsum('bi,bi->b', (angle_axis, angle_axis))
eps = 1e-6
if (theta2 > eps).all():
rotation_matrix = angle_axis_to_rotation_matrix_rodrigues(angle_axis, theta2)
else:
rotation_matrix = angle_axis_to_rotation_matrix_taylor(angle_axis)
rotation_matrix_rodrigues = angle_axis_to_rotation_matrix_rodrigues(angle_axis, theta2)
# Iterate over batch dimension
# Note: cannot use masking or torch.where because any NaNs in the gradient
# of the unused branch get propagated
# See: https://github.com/pytorch/pytorch/issues/9688
for b in range(angle_axis.shape[0]):
if theta2[b, ...] > eps:
rotation_matrix[b, ...] = rotation_matrix_rodrigues[b:(b+1), ...]
return rotation_matrix
|
bd3300f2b341ec42c40af14763731a14e224cef5
| 216,782
|
from pathlib import Path
import yaml
def loadConfigDict(configNames: tuple):
# pathLib syntax for windows, max, linux compatibility, see https://realpython.com/python-pathlib/ for an intro
"""
Generic function to load and open yaml config files
:param configNames: Tuple containing names of config files to be loaded
:return: Dictionary with opened yaml config files
"""
basePath = Path(__file__).parent.parent / 'config'
configDict = {}
for configName in configNames:
filePath = (basePath / configName).with_suffix('.yaml')
with open(filePath) as ipf:
configDict[configName] = yaml.load(ipf, Loader=yaml.SafeLoader)
return configDict
|
1bbee88e3ff5df7f26f1502eb34d72a33d01b348
| 655,434
|
def get_location_by_offset(filename, offset):
"""
This function returns the line and column number in the given file which
is located at the given offset (i.e. number of characters including new
line characters).
"""
with open(filename, encoding='utf-8', errors='ignore') as f:
for row, line in enumerate(f, 1):
length = len(line)
if length < offset:
offset -= length
else:
return row, offset + 1
|
434b60a80fffd8068ea6d90ead92d914127f3b3e
| 21,236
|
def __ensure_suffix(t, suffix):
""" Ensure that the target t has the given suffix. """
tpath = str(t)
if not tpath.endswith(suffix):
return tpath+suffix
return t
|
392bf04404e2d739c676ede578410db34d232789
| 670,424
|
from typing import Dict
from typing import Set
def lettres_freq_inf(freqs : Dict[str,int], fseuil : int) -> Set[str]:
"""Précondition : fseuil > 0
Retourne les lettres de fréquence inférieure
à fseuil dans freqs.
"""
# Lettres de fréquence inférieure au seuil
finf : Set[str] = set()
lettre : str
for lettre in freqs:
if freqs[lettre] <= fseuil:
finf.add(lettre)
return finf
|
2f67e809c355a590fb99ed7639cd90bebbadfe33
| 553,599
|
def mean(X):
"""
Compute the mean for a dataset of size (D,N) where D is the dimension
and N is the number of data points
"""
# given a dataset of size (D, N), the mean should be an array of size (D,1)
# you can use np.mean, but pay close attention to the
# shape of the mean vector you are returning.
D, N = X.shape
# Edit the code to compute a (D,1) array `mean` for the mean of dataset.
mean = X.mean(axis=1).reshape(D, 1)
return mean
|
ee887eed8f6ac70d09f40762df2ece52dea74a2c
| 484,222
|
import pkg_resources
def bank_validation_str(iban: str, bank_code: str) -> list:
"""
Retrieves a string with the bank information
:param iban: An iban string
:type iban: str
:param bank_code: the bank code
:type bank_code: str
:return: A list with the bank information
:rtype: str
"""
filename = f'{iban[:6]}.txt'
location = f'data/banks/{filename}'
try:
stream = pkg_resources.resource_stream(__name__, location)
except FileNotFoundError:
return []
for line in stream:
line = line.decode('utf-8')
if line.startswith(bank_code + '|'):
return line.strip().split("|")
return []
|
6c3ab1ee8ac79eeb79f59cadd349936a4809070d
| 379,468
|
def update_toestand(A, x, resistent=[]):
"""
Voor een gegeven verbindingsmatrix A en een binaire toestandsvector x
(0: vatbaar/resitent, 1: geinfecteerd) op tijdstip t, bereken de toestandsvector op
tijdstip t+1. Optioneel kan je een lijst met indices van resistente individuen
geven.
"""
# update x
x[:] = (x + A @ x) > 0
# resitente worden niet ziek
if resistent:
x[resistent] = 0
return x
|
20ce9e50f6ebf0ca5d26c9e0ca3c789e81a94dee
| 582,003
|
def safe_unicode(obj):
"""Safe conversion to the Unicode string version of the object."""
try:
return str(obj)
except UnicodeDecodeError:
return obj.decode("utf-8")
|
f6a592f7ea0de5179f1b8a91e0bd75c9a5d522df
| 73,547
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.