content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def lineset_assign(lineset1, lineset2):
"""
Assign the attributes of lineset2 to lineset1.
Parameters
----------
lineset1 : open3d.LineSet
lineset2 : open3d.LineSet
Returns
-------
The lineset1 object with 2's attributes.
"""
lineset1.points = lineset2.points
lineset1.lines = lineset2.lines
lineset1.colors = lineset2.colors
return lineset1
|
7ffdec7582dc3624a1bde4579191835ffef567b0
| 542,856
|
def weighing_function(orig_length, cur_length):
"""
Function to generate the weight value given the predicted text-length and the expected text-length
The intuition is that if the predicted text-length is far from the expected text-length then weight should be
small to that word-bbox.
:param orig_length: Length of the expected word bounding box
:param cur_length: Length of the predicted word bounding box
:return:
"""
if orig_length == 0:
if cur_length == 0:
return 1
return 0
return (orig_length - min(orig_length, abs(orig_length - cur_length)))/orig_length
|
09afb649952039e0d7f7b776d43cc0b0307329ee
| 34,054
|
import re
def is_ipython_notebook(file_name):
"""
Return True if file_name matches a regexp for an ipython notebook. False otherwise.
:param file_name: file to test
"""
if (not re.match("^.*checkpoint\.ipynb$", file_name)) and re.match("^.*\.ipynb$", file_name): return True
return False
|
228df2ba027c814d748139dba20da9e4c46b7db1
| 196,036
|
import json
def serialize_json(obj, **kwargs):
"""Encode python object into a JSON string.
Args:
obj (:obj:`object`):
Python object to encode into a string.
**kwargs:
indent (:obj:`int`):
Indent spacing for prettifying.
Defaults to: 2.
rest of kwargs:
Passed to :func:`json.dumps`.
Returns:
:obj:`str`
"""
kwargs.setdefault("indent", 2)
return json.dumps(obj, **kwargs)
|
e418827f871f34743d3ed7b030ce35f370b5e2d6
| 378,573
|
def find_beat(v):
"""
find the beat of a vector format midi using the autocorrelation function
"""
# binary vector for testing autocorrelation
v2 = [0 if x[0] == -1 else 1 for x in v]
result = []
# no need to check more than 24*4 = 96
# i.e. 4 quarter notes of standard midi
for lag in range(96):
s = 0
for i in range(len(v2)):
if v2[i] > 0 and v2[(i + lag) % len(v2)] > 0:
s += 1
result.append((lag, s))
k = 1
srt = sorted(result, key=lambda x: x[1])
while srt[-k][0] == 0:
k += 1
return srt[-k][0]
|
b408afced09779eb69b40ae54d1fd2c2cfcf1906
| 700,830
|
import struct
def int_2_bytes(int_value, is_little_endian=False):
"""
将int转换成4字节的bytes串。
:param int_value:
:param is_little_endian:
:return:
"""
# 小端数据返回
if is_little_endian:
return struct.pack('<i', int_value)
# 大端数据返回
return struct.pack('>i', int_value)
|
dd4dceb7f500d68db3a5621845475397c75817f6
| 64,099
|
def discovery_status_to_text(status):
"""
Convert a Discovery Status code into meaningful text.
Args:
status: Staus code from Orion.
Returns:
String: Human text for status code.
"""
discovery_statuses = {"0": 'Unknown',
"1": 'InProgress',
"2": 'Finished',
"3": 'Error',
"4": "NotScheduled",
"5": "Scheduled",
"6": "NotCompleted",
"7": "Canceling",
"8": "ReadyForImport"}
return discovery_statuses[status]
|
acddb0b1d6472523950eaa6b9cc00f8e54508b19
| 91,480
|
import re
def filter_lines(output, filter_string):
"""Output filter from build_utils.check_output.
Args:
output: Executable output as from build_utils.check_output.
filter_string: An RE string that will filter (remove) matching
lines from |output|.
Returns:
The filtered output, as a single string.
"""
re_filter = re.compile(filter_string)
return '\n'.join(
line for line in output.splitlines() if not re_filter.search(line))
|
037281b7f1b1c9b87abbcc8572e198a1f36b6165
| 654,263
|
def non_qualified_code(code):
"""
Some codes, e.g. ISO 3166-2 subdivision codes, are compound and are formatted as
"{country_code}-{subdivision_code}". For validation cases we often care about
extracting the non-qualified subdivision code in such cases.
"""
return code.split("-", 2)[1]
|
aa5f6363349e11e1a3307be7bb6ed724d636883a
| 572,108
|
def choose_x(x, x_new):
"""Choice the x array with the smallest number of elements."""
if x.shape[0] == 0:
# for initial empty array
return x_new
else:
return x if x.shape[0] < x_new.shape[0] else x_new
|
4c4a4e37fe619c1a48997dcb13ae43f0a52c1d84
| 167,198
|
import uuid
def is_uuid_like(input_id: str) -> bool:
"""
Check, if input string has uuid3 string representation format.
**Example:**
"886313e1-3b8a-5372-9b90-0c9aee199e5d"
:param input_id: An input string
:return: A boolean output
"""
try:
uuid.UUID(str(input_id))
return True
except ValueError:
return False
|
af2a840135945908b3903a2b54d1e81a87d6691a
| 168,933
|
import re
def fixFlagsQuoting(text):
"""Replaces e.g. /DFOO with /D "FOO" and /DFOO=X with /D FOO=X."""
return re.sub(r'\/([DIid]) ([^ \"=]+)([ $])', r'/\1 "\2"\3',
re.sub(r'\/([DIid]) ([^ \"=]+)=([^ \"]*)([ $])', r'/\1 \2=\3\4', text))
|
ab60d5cf539d7a3753e457838989ab3be520d6b3
| 669,422
|
def gcd(x, y):
"""
Calculate greatest common divisor
"""
while y != 0:
t = x % y
x, y = y, t
return x
|
b7a57497047ca3df429eb3ed4e3941edbd0890a4
| 203,118
|
def get_valid_post_response(data):
"""
Returns success message correct processing of post/get request
:param str data: message
:return: response
:rtype: object
"""
response = {"status": 201, "message": "Created", "data": data}
return response
|
0cb2c7e7011e2baf45a1cc5202d3476fd7a49e01
| 514,483
|
import math
def safeArgs(args):
"""Iterate over valid, finite values in an iterable.
Skip any items that are None, NaN, or infinite.
"""
return (arg for arg in args
if arg is not None and not math.isnan(arg) and not math.isinf(arg))
|
d455bcd0cef7e6a47d1e967f17ba0e2dd08c25f4
| 46,793
|
async def websocket_lovelace_config(hass, connection, msg, config):
"""Send Lovelace UI config over WebSocket configuration."""
return await config.async_load(msg["force"])
|
a09b92334c93d7c4603a566939a643dcffcaba57
| 580,327
|
import random
import string
def get_tmp_suffix(length=None):
"""
Returns random filename extension as a string of specified length
"""
length = 10 if length is None else length
return "." + "".join(random.choices(string.ascii_uppercase + string.digits, k=length))
|
cd3c67ccadc375b34dbc97ed299182dfadc98328
| 80,854
|
def _calc_splits(df, num_buckets=100):
"""Calculate the right edge of num_session buckets
Utilizes approxQuantile to bucketize the source. Due to the available
implementation this has to be done per-wiki, so is perhaps slower than
necessary. For current dataset sizes that isn't an issue.
We need to bucketize the number of sessions so we can split the
per-wiki input into strata and sample from each bucket. This helps
ensure we keep a consistent distribution of popular and unpopular
queries from the input to the output.
Parameters
----------
df : pyspark.sql.DataFrame
Input dataframe to have bucketing fractions calculated for.
num_buckets : int
Number of buckets to create per-wiki
Returns
-------
list of ints
List of right-edges of buckets that will have an approximately equal number
of queries per bucket.
"""
percentiles = [x/float(num_buckets) for x in range(1, num_buckets)]
# With 100 buckets, there will be buckets at .01, .02, etc. This specifies
# percentils .01 must be the value between .009 and .011
relative_error = 1. / (num_buckets * 10)
splits = df.approxQuantile('num_sessions', percentiles, relative_error)
# range(1, num_buckets) returned num_buckets-1 values. This final inf captures
# everything from the last bucket to the end.
return splits + [float('inf')]
|
c7e80335ce625fd5ee6b75be4e9a6b771863f46f
| 420,675
|
def _dict_all_nones(bbox_dict):
"""Returns True if all dict values are None"""
return bbox_dict and all(v is None for v in bbox_dict.values())
|
79ad1b3405ca0680fc15a5427a1cbb409abe4245
| 386,746
|
import torch
def stack_subsample_frames_no_sync(x, x_lens, stacking=1, subsampling=1):
""" Stacks frames together across feature dim, and then subsamples
input is batch_size, feature_dim, num_frames
output is batch_size, feature_dim * stacking, num_frames / subsampling
"""
assert stacking == subsampling
# x is [B, H, T]
x = x.transpose(1, 2)
T = x.size(1)
padded = torch.nn.functional.pad(x, (0, 0, 0, (stacking - (T % stacking)) % stacking))
B, T, H = padded.size()
x = padded.reshape(B, T // stacking, -1)
x = x.transpose(1, 2)
x_lens = (x_lens.int() + stacking - 1) // stacking
return x, x_lens
|
a6d77a9e1710f3ec156f35660b2d5b76f1ec07b5
| 104,142
|
from pathlib import Path
def existing_git_submodule(path: Path) -> bool:
"""Check if a git submodule exists
:param Path path: Submodule path
:return: True, if .git file exists inside path
"""
return Path(path / '.git').is_file()
|
a0fb29aad65373f49d5306daa3ac13475f328a72
| 633,725
|
import functools
import operator
def product (nums):
"""
get the product of numbers in an array.
"""
return functools.reduce(operator.mul, nums, 1)
|
801b3e9c3fe9229eaf8ad58fe73a9a1e63506b41
| 701,234
|
import types
import typing
def temp_fn_argument_extractor(
mod: types.ModuleType,
mod_attr: str,
) -> typing.Iterator[typing.List[typing.Tuple[list, dict]]]:
"""
Temporarily intercept a function, so it's arguments can be extracted.
The context manager gives us a list where each item is a tuple of
arguments & keywords, stored each time the function was called.
"""
args_collected = []
real_fn = getattr(mod, mod_attr)
def wrap_fn(*args, **kw):
args_collected.append((args, kw))
return real_fn(*args, **kw)
setattr(mod, mod_attr, wrap_fn)
try:
yield args_collected
finally:
setattr(mod, mod_attr, real_fn)
|
75864f5147ee8c7adfda6ac6286ae5b1ac91ac27
| 176,328
|
def register_event(event_type, include_subclasses=False):
"""
Register a method to handle a specific `opsdroid.events.Event` object.
Args:
event_type (Event): The event class this method can handle.
include_subclasses (bool): Allow the function to trigger on subclasses of the registered
event. Defaults to False.
"""
def decorator(func):
if hasattr(func, "__opsdroid_events__"):
func.__opsdroid_events__.append(event_type)
else:
func.__opsdroid_events__ = [event_type]
func.__opsdroid_match_subclasses__ = include_subclasses
return func
return decorator
|
ab47a895471ae1911e6da30264677a596231de49
| 653,792
|
def parse_message(data):
"""Return a tuple containing the command, the key, and (optionally) the
value cast to the appropriate type."""
command, key, value, value_type = data.strip().split(';')
if value_type:
if value_type == 'LIST':
value = value.split(',')
elif value_type == 'INT':
value = int(value)
else:
value = str(value)
else:
value = None
return command, key, value
|
029705f2100b050cd1261f96079e9cf332a0e8e1
| 125,132
|
def is_written_by(author_first_name, author_last_name, paper):
"""
Judge whether the scraped paper belongs to the given author or not.
:param author_first_name: string. First name of the author.
:param author_last_name: string. Last name of the author.
:param paper: string. arXiv papers scraped online.
"""
if (paper.find(author_first_name) >= 0 and paper.find(author_last_name) >= 0) and len(paper) > 0:
return True
else:
return False
|
2bd17a88ecc6f435f5e9922ca6803f328258f690
| 272,969
|
def convert_to_base(num, base):
"""
Converts integer num into base b format
ie number to configuration
Args:
num - intger to be converted
base - base to be converted into
Returns:
base b representation of num
"""
convStr = "0123"
if num < base:
return str(num)
else:
return convert_to_base(num // base, base) + convStr[num % base]
|
a4026955d1f644e49ccfd43b6ab0e3c8e81d8d14
| 603,092
|
def _get_help_command_preface(command_name='scenedetect'):
"""Preface/intro help message shown at the beginning of the help command."""
return """
The PySceneDetect command-line interface is grouped into commands which
can be combined together, each containing its own set of arguments:
> {command_name} ([options]) [command] ([options]) ([...other command(s)...])
Where [command] is the name of the command, and ([options]) are the
arguments/options associated with the command, if any. Options
associated with the {command_name} command below (e.g. --input,
--framerate) must be specified before any commands. The order of
commands is not strict, but each command should only be specified once.
Commands can also be combined, for example, running the 'detect-content'
and 'list-scenes' (specifying options for the latter):
> {command_name} -i vid0001.mp4 detect-content list-scenes -n
A list of all commands is printed below. Help for a particular command
can be printed by specifying 'help [command]', or 'help all' to print
the help information for every command.
Lastly, there are several commands used for displaying application
version and copyright information (e.g. {command_name} about):
help: Display help information (e.g. `help [command]`).
version: Display version of PySceneDetect being used.
about: Display license and copyright information.
""".format(command_name=command_name)
|
bfd836f9046615f4f92364d96f32c82dd6125127
| 468,278
|
import math
def calculate_crop_for_angle(image, angle):
"""
Calculates the pixels in vertical and horizontal direction which become invalid, i.e.,
not completely filled with image.
:param image: input image
:param angle: rotation angle
:type image: numpy.ndarray
:type angle: float
:return: (vertical_crop, horizontal_crop)
:rtype: tuple(int, int)
>>> calculate_crop_for_angle(np.zeros((32, 32,)), 45.0)
(15, 15)
"""
wd = (image.shape[0] * 0.5) * math.tan(angle / (180.0 / math.pi))
hd = (image.shape[1] * 0.5) * math.tan(angle / (180.0 / math.pi))
hd, wd = int(abs(hd)), int(abs(wd))
return hd, wd
|
39bbc1200daddfda0a77c5b0d989f5fff9157435
| 605,812
|
def lib1_cons2_neutral3(x):
"""Rearrange questions where 3 is neutral."""
return -3 + x if x != 1 else x
|
32894ff4b22e86d3788f8456e8f743ab12f84cab
| 436,565
|
def get_ratio(numerator, denominator):
"""Get ratio from numerator and denominator."""
return (
0 if not denominator else round(float(numerator or 0) / float(denominator), 2)
)
|
e51a860292d54d2e44909ad878d0b1d8e66c37c2
| 709,228
|
def local_solar_time(local_time, tc):
"""
Corrects the local time to give the local solar time.
Parameters
----------
local_time : float
The local time in hours
tc : the time correction factor
Returns
-------
lst : float
The local solar time in hours.
"""
lst = local_time + tc / 60
return lst
|
a3282a5ba728ee6a40e4b627fc464dc99e29ae36
| 505,489
|
import random
import string
def device_name_list(min_devices=0, max_devices=10, unique=False):
"""
Return a function that returns a random list of device names based on
parameters.
:param int min_devices: the minimum number of device names to generate
:param int max_devices: the maximum number of device names to generate
:param bool unique: ensure that all device names are unique
"""
def the_func():
return [
"/dev/%s"
% "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(4)
)
for _ in range(random.randrange(min_devices, max_devices + 1))
]
if unique:
def the_unique_func():
devices = set()
while len(devices) < min_devices:
for device in the_func():
devices.add(device)
return list(devices)
return the_unique_func
return the_func
|
b23b7a38e9af95ebfa813f5e8bad51b29e2c44e3
| 579,054
|
def default_shear_conf() -> dict:
"""
Default shear configuration.
"""
shear_conf = {
'twinmode': '10-12',
'grids': 5,
'expansion_ratios': [1., 1., 1.],
}
return shear_conf
|
ad1523df07e335cd12411144dcad243ddfd143a8
| 566,956
|
import requests
import logging
def test_upload(url):
"""
Check if a presigned URL is valid
"""
try:
response = requests.put(url, data='aa', timeout=120)
except:
return None
if response.status_code == 403:
return False
logging.info('Successfully tested upload url')
return True
|
42e0d1411871a397ded567b4f9f28a4861e7e1c1
| 543,925
|
def common_prefix_len(a: str, b: str) -> int:
""" Find the common prefix of two strings. """
min_len = min(len(a), len(b))
for i in range(min(len(a), len(b))):
if a[i] != b[i]:
return i
return min_len
|
a6bcdc15305e6dbb59ac9682ed5b79f711a5f454
| 275,757
|
def common_replacements(setting, item):
"""Maps keys to values from setting and item for replacing string
templates.
"""
return {"$name": setting.lower(),
"$setting": setting,
"$prettyname": item.py_name,
"$doc_str": item.doc_str}
|
618e0f9f90822455cebdf48c5cfafd913f193214
| 677,483
|
def edentity(im):
"""
Identity function for Image objects.
"""
return im
|
885500e9ed8e8fc5fbadb55182d35f132350d16b
| 375,386
|
def surprisetopN(algo, trainSet, raw_uid, N):
"""Derive the topN recommendations for user uid
algo: scikit-surprise trained algorithm
trainSet (surprise.dataset.Trainset)
raw_uid (int or float): raw uid
e.g. surprisetopN(algo, trainSet, 196, 3)
Returns:
list: (raw_iid, prediction) for the N recommended item_ids
"""
inner_uid = trainSet.to_inner_uid(raw_uid)
recom = []
profile = set(map(lambda x: x[0], trainSet.ur[inner_uid]))
for iid in trainSet.all_items():
if iid not in profile: # item is unseen
raw_iid = trainSet.to_raw_iid(iid)
pred = algo.predict(raw_uid, raw_iid, r_ui=-1, verbose=False)
recom.append((raw_iid, pred.est))
recom = sorted(recom, key=lambda x: x[1], reverse=True)
return recom[:N]
|
26cb346192a5591f39f2b5de738d072dd934b2fc
| 91,252
|
def _favorite_authors(soup):
"""
.. versionadded:: 0.3.0
Find the favorite authors for a user and return them as a list.
:param soup: Soup containing a page from FanFiction.Net
:type soup: bs4.BeautifulSoup class
:returns: A list of user-ids corresponding to the authors liked by a user.
:rtype: list
Example:
.. code-block:: python
import ffscraper as ffs
# Get an example user (details are changed here)
soup = ffs.utils.soupify('https://www.fanfiction.net/u/12')
# Get their favorite authors via this function.
fav_authors = ffs.author.profile._favorite_authors(soup)
# We'll print to see the results
print(fav_authors)
.. code-block:: bash
['124', '125', '126']
"""
# Favorite Authors for a user is stored under a div with id='fa'
authors_table = soup.find('div', {'id': 'fa'})
if authors_table:
author_links = authors_table.find_all('a')
return [a['href'].split('/')[2] for a in author_links]
else:
return []
|
35ec899d02d6a4ca12db1509eab864b606e8bce7
| 370,632
|
from typing import List
def find_first_in_list(txt: str, str_list: List[str]) -> int:
"""Returns the index of the earliest occurrence of an item from a list in a string
Ex: find_first_in_list('foobar', ['bar', 'fin']) -> 3
"""
start = len(txt) + 1
for item in str_list:
if start > txt.find(item) > -1:
start = txt.find(item)
return start if len(txt) + 1 > start > -1 else -1
|
c979db94ae1e81e3e7cbfc6118b371ab2338192f
| 690,137
|
from typing import Tuple
def tokenize_version(version_string: str) -> Tuple[int, int, int]:
"""Tokenize a version string to a tuple.
Truncates qualifiers like ``-dev``.
:param version_string: A version string
:return: A tuple representing the version string
>>> tokenize_version('0.1.2-dev')
(0, 1, 2)
"""
before_dash = version_string.split('-')[0]
major, minor, patch = before_dash.split('.')[:3] # take only the first 3 in case there's an extension like -dev.0
return int(major), int(minor), int(patch)
|
2dbca80d7fbd0e504adbdd6901f42a87452116ac
| 692,447
|
from functools import reduce
import operator
def getFromDict(dict_, keys):
""" Get value from nested dict """
return reduce(operator.getitem, keys, dict_)
|
a7cb53601cec1cf4f2459f5eb6829a0413410d03
| 501,911
|
def intersect(range_1, range_2):
"""Return intersection size."""
return min(range_1[2], range_2[2]) - max(range_1[1], range_2[1])
|
7121d175e46d0686d2eb816bb4943f101dd5d101
| 242,516
|
def deg2day(year_length, degrees):
"""Convert degrees that the planet has orbited into a calendar day."""
return degrees * (year_length/360.0)
|
31eee82b7940722592ef80266e1def856bb49b20
| 207,869
|
from typing import List
def _unique(collection: List[str]) -> bool:
"""
Determine if all elements of a collection are unique.
:param collection: The collection
:type collection: Iterable
:return: `True` if all elements are unique, `False` otherwise
:rtype: bool
"""
return len(set(collection)) == len(collection)
|
fe727e13852ea9baf7696a3eda7e5f2ab57d4b5d
| 72,575
|
import re
def slugify(u):
"""Convert Unicode string into blog slug."""
# From https://leancrew.com/all-this/2014/10/asciifying/
u = re.sub(u'[–—/:;,.]', '-', u) # replace separating punctuation
a = u.lower() # best ASCII substitutions, lowercased
a = re.sub(r'[^a-z0-9 -]', '', a) # delete any other characters
a = a.replace(' ', '-') # spaces to hyphens
a = re.sub(r'-+', '-', a) # condense repeated hyphens
return a
|
7e4c18280459021a574456f1ac7f88f45c450a48
| 238,799
|
def create_dict_of_adjacent_tile_indices(width: int, height: int) -> dict:
"""
there is a grid of size width*height.
Each location in the grid has an integer index, 1 to width*height
the "transition dict" contains
key: integer index of each tile in the grid
value: list of adjacent tile indices.
"""
transition_dic={}
num_tiles=width*height
# print("width= "+str(width))
# print("height= "+str(height))
for n in range(1,num_tiles+1):
# print("\nn = "+str(n))
adjacent_edges_list=[]
# print("if (n-1)%width !=0, then left exists; value is "+str((n-1)%width))
if ((n-1)%width != 0):
# print("left = "+str(n-1))
adjacent_edges_list.append(n-1) # left
# print("if n%width !=0, then right exists; value is "+str(n%width))
if (n%width != 0):
# print("right = "+str(n+1))
adjacent_edges_list.append(n+1) # right
# print("if n > width, then top exists")
if (n > width):
# print("top = "+str(n-width))
adjacent_edges_list.append(n-width) # top
# print("if n<=((width*height)-width), then bottom exists; value is "+str( ((width*height)-width)))
if (n<=((width*height)-width)):
# print("bottom = "+str(n+width))
adjacent_edges_list.append(n+width) # bottom
transition_dic[n]=adjacent_edges_list
return transition_dic
|
28db079753515512cb785ada4c5927c96cbaa6de
| 397,732
|
def cc(key):
"""
Changes python key into Camel case equivalent. For example, 'compute_environment_name' becomes
'computeEnvironmentName'.
:param key:
:return:
"""
components = key.split('_')
return components[0] + "".join([token.capitalize() for token in components[1:]])
|
311d253842638f581dd1dd051270f3d6098e2955
| 629,247
|
from datetime import datetime
def is_valid_article(date : datetime, state : str, date_start : datetime, date_end : datetime) -> bool:
"""
Determines if the metadata retrived from the article is valid.
Params
------
date: datetime.datetime
Published datetime of the article
state: str
detected state of the incident in the article
date_start: datetime.datetime
article search beginning timeframe
date_end: datetime.datetime
article search ending timeframe
Returns
-------
bool:
boolean value determining whether the article is valid or not
"""
return isinstance(state, str) and date >= date_start and date <= date_end
|
db6ba5a40ca453ecda0e5872dd9b7c8318db452d
| 99,646
|
def ek_R56Q(cell):
"""
Returns the R56Q reversal potential (in mV) for the given integer index
``cell``.
"""
reversal_potentials = {
1: -96.0,
2: -95.0,
3: -90.5,
4: -94.5,
5: -94.5,
6: -101.0
}
return reversal_potentials[cell]
|
a61d33426e4c14147677c29b8e37381981f0d1db
| 701,803
|
def _flatten_dict(input_dict: dict) -> dict:
"""
Flattens a nested dictionary, keeping only keys with non-dict values. Note
that keys will be the underscore-delimited concatenation of the nested path
of keys from the original dict.
"""
res = {}
for k, v in input_dict.items():
if type(v) is not dict:
res[k] = v
else:
nest = _flatten_dict(v)
for key, value in nest.items():
new_key = "{}_{}".format(k, key)
res[new_key] = value
return res
|
9273b1b7b08d54b3b7e69a2710285abbac8d22e4
| 334,372
|
from typing import Tuple
def extended_euclidean(x: int, y: int) -> Tuple[int, int, int]:
"""
See:
shainer.github.io/crypto/math/2017/10/22/chinese-remainder-theorem.html
Given two integers, returns their greatest common denominator and
the two coefficients in the Bézout identity.
"""
x0, x1, y0, y1 = 1, 0, 0, 1
while y > 0:
q, x, y = x // y, y, x % y
x0, x1 = x1, x0 - (q * x1)
y0, y1 = y1, y0 - (q * y1)
return x, x0, y0
|
918835a7ed559af16cb67ef37d0d8abededc67cd
| 443,868
|
def linux_notify(title: str, message: str) -> str:
"""Display notification for Linux systems"""
command = f'''notify-send "{title}" "{message}"'''
return command
|
ae702eed884e35fccaf974898de9cc0c12b686c2
| 50,453
|
import pickle
def unpickle(filename: str) -> object:
"""
Unpickles a file and returns the object
"""
pickleIn = open(filename, "rb")
pickledObject = pickle.load(pickleIn)
pickleIn.close()
return pickledObject
|
891347cfc1f491a40d797332c2967f7b293630af
| 43,240
|
def maximum_severity(*alarms):
"""
Get the alarm with maximum severity (or first if items have equal severity)
Args:
*alarms (Tuple[AlarmSeverity, AlarmStatus]): alarms to choose from
Returns:
(Optional[Tuple[AlarmSeverity, AlarmStatus]]) alarm with maximum severity; none for no arguments
"""
maximum_severity_alarm = None
for alarm in alarms:
if maximum_severity_alarm is None or alarm[0] > maximum_severity_alarm[0]:
maximum_severity_alarm = alarm
return maximum_severity_alarm
|
d78a221b56e891103e8387d077d8f14dd2d1ce93
| 682,455
|
def _cmp_bystrlen_reverse(a, b):
"""A private "cmp" function to be used by the "sort" function of a
list when ordering the titles found in a knowledge base by string-
length - LONGEST -> SHORTEST.
@param a: (string)
@param b: (string)
@return: (integer) - 0 if len(a) == len(b); 1 if len(a) < len(b);
-1 if len(a) > len(b);
"""
if len(a) > len(b):
return -1
elif len(a) < len(b):
return 1
else:
return 0
|
f1306bef50367c3762133868682f7879fa9d311f
| 565,072
|
def scale_vector(vector, scale):
"""Scale a 3D vector's components by scale."""
return vector[0] * scale, vector[1] * scale, vector[2] * scale
|
292269d2e54db362b823c547b0415f53d93e3e4c
| 22,661
|
def kalkulasi_kecepatan_akhir(
kecepatan_awal: float, percepatan: float, waktu: float
) -> float:
"""
Menghitung kecepatan akhir dari suatu pergerakan
dengan percepatan yang berbeda
>>> kalkulasi_kecepatan_akhir(10, 2.4, 5)
22.0
>>> kalkulasi_kecepatan_akhir(10, 7.2, 1)
17.2
"""
# jika waktu 0 diisi dengan 1 detik
return kecepatan_awal + percepatan * waktu
|
1db3150cab6991de8063bbf546f475ce982db2bb
| 80,329
|
import torch
from typing import Callable
def encode_and_aggregate(input_tensor: torch.Tensor,
encoder: torch.nn.Module,
num_encoder_input_channels: int,
num_image_channels: int,
encode_channels_jointly: bool,
aggregation_layer: Callable) -> torch.Tensor:
"""
Function that encodes a given input tensor either jointly using the encoder or separately for each channel
in a sequential manner. Features obtained at the output encoder are then aggregated with the pooling function
defined by `aggregation layer`.
"""
if encode_channels_jointly:
input_tensor = encoder(input_tensor)
input_tensor = aggregation_layer(input_tensor)
else:
shape = input_tensor.shape
channel_shape = (shape[0], num_encoder_input_channels, shape[2], shape[3], shape[4])
encode_and_aggregate = []
# When using multiple encoders, it is more memory efficient to aggregate the individual
# encoder outputs and then stack those smaller results, rather than stack huge outputs and aggregate.
for i in range(num_image_channels):
start_index = i * num_encoder_input_channels
end_index = start_index + num_encoder_input_channels
encoder_output = encoder(input_tensor[:, start_index:end_index].view(channel_shape))
aggregated = aggregation_layer(encoder_output)
encode_and_aggregate.append(aggregated)
input_tensor = torch.cat(encode_and_aggregate, dim=1)
return input_tensor
|
f2d65e1c2c214cfddae40dd235fba86a61866277
| 697,661
|
def proc_fill_nan(df):
""" replaces NaN with zeros """
return df.fillna(0)
|
20ef3d5b260805319e0a5c6d195d05adc2988e84
| 190,149
|
def create_channel(slack_client, name):
"""
Create a channel with a given name.
"""
response = slack_client.api_call("channels.create",
name=name, validate=False)
return response
|
62b9d02ae833a491ae7e0c8d4c764b72635af6de
| 274,916
|
def invalid_iso(entity):
"""Generate invalid iso template."""
base = "is an invalid %s iso code" % entity
return "%(value)s " + base
|
846396b62f15bb4fc315a48e5575340bf0ddfcc2
| 544,136
|
def get_operation_size(ops):
""" Returns the number of characters added by an operation (can be negative) """
count = 0
for op in ops:
if isinstance(op, str):
count += len(op)
elif isinstance(op, int) and op < 0:
count += op
return count
|
1670ee281642722a705d329d82a8ada42cc4bb22
| 322,557
|
from typing import Container
def is_container_type(typ):
"""
Check if the given type is a container.
"""
return isinstance(typ, type) and issubclass(typ, Container)
|
0a2a4c564c451fdfce817af9170fb5d21931611b
| 505,569
|
def parse_Bytes(bytes, msg_class):
"""Parses the String of bytes into msg_class.
Returns the input bytes if msg_class is None."""
if msg_class is None:
return bytes
msg = msg_class()
msg.ParseFromString(bytes)
return msg
|
8cc586dd8d8c9c7575a2101b773224274c91ff4a
| 249,645
|
import re
def extract_youtube_id(youtube_url):
""" Extract the youtube ID from the url"""
youtube_id_match = re.search(r'(?<=v=)[^&#]+', youtube_url)
youtube_id_match = youtube_id_match or re.search(
r'(?<=be/)[^&#]+', youtube_url)
return (youtube_id_match.group(0) if youtube_id_match else None)
|
b2cdb5f9040b2423d4b1669ae51319c678c8a712
| 370,834
|
import json
def read_json(fp: str) -> dict:
"""Helper. Reads a JSON file and returns content.
:param fp: The filepath to the JSON file.
:dtype fp: str
"""
with open(fp) as f:
f_data = json.load(f)
return f_data
|
12a5587dfdae0f36a923e68a020f504e7b955a5c
| 510,215
|
def read_nonclust_frap_data(df_list, file_names, exclude=()):
"""Extract singe traces from Manual_FRAP_ROI.ijm data.
Args:
df_list (list of pandas dataframes): List of parsed data from
the output file of the "Manual_FRAP_ROI.ijm"
ImageJ macro.
file_names (list of str): List of file names that the traces
are coming from
exclude (list of tuples): Traces to exclude. Each item in the list
is a tuple containing file name and track ID of the trace to
be excluded. Defaults to an empty tuple.
Returns:
result_df (list of pandas dataframes): One dataframe per trace.
Contains two columns, "ROI_intensity" and "Bkgnd_intensity".
corr_int (list of numpy arrays): list of one array per trace,
containing just the background-corrected intensity values.
trace_IDs (list of tuples): list of filenames corresponding
to each trace and trace ID. Same length as result_df.
"""
result_df = []
result_np = []
trace_IDs = []
for df, file_name in zip(df_list, file_names):
# Find the number of FRAP/background trace pairs in file
num_roi_pairs = int((len(df.columns) - 1) / 4)
# break up data into smaller data frames, one per trace
for i in range(num_roi_pairs):
# there are four columns per trace
curr_df = df.iloc[:, i*4+1:i*4+5]
bkgnd_corr_int = curr_df.iloc[:,1] - curr_df.iloc[:,3]
curr_np = bkgnd_corr_int.values - min(bkgnd_corr_int.values)
# save the trace if it's not excluded
trace_ID = (file_name, i)
if trace_ID not in exclude:
result_df.append(curr_df)
result_np.append(curr_np)
trace_IDs.append(trace_ID)
return result_df, result_np, trace_IDs
|
ca6e472e19fe8f9209c34ccd995bfd8984693261
| 470,853
|
def get_event_description(e):
"""Return the description field for the event."""
return e.get('description')
|
96fde5fe77964e364907e6321cbd8352ee2c6bc1
| 39,474
|
def _compose2(b, a):
"""Compose 2 functions"""
def composed(x):
"""Composed function"""
return b(a(x))
return composed
|
87dcd18d9e4938eb59796fe0bb4edb5f1ced8c6e
| 435,890
|
def parse_valid_bb_file(valid_bb_path):
"""
Parse a file which contains valid basic block addresses.
The file is expected to contain a hexadecimal address per line.
"""
with open(valid_bb_path, "r") as f:
return set(int(l, 16) for l in f.readlines() if l.strip())
|
b04a2fcfd8ef3338e57927526073047b306d727a
| 545,694
|
import pkg_resources
async def version():
"""Get dependency versions
"""
versions = {}
for name in ('instagrapi', ):
item = pkg_resources.require(name)
if item:
versions[name] = item[0].version
return versions
|
b89ce1ae5472b7e6df3d3151b2781ed872c2c0f4
| 616,298
|
def populate_cells(worksheet, bc_cells=[]):
"""
Populate a worksheet with bc_cell object data.
"""
for item in bc_cells:
if item.cellref:
worksheet[item.cellref].value = item.value
else:
worksheet.cell(
row=item.row_num, column=item.col_num, value=item.value)
return worksheet
|
4b5529ebbb63ebb3d0904d218807166af4bbff38
| 659,883
|
def HasValue(entity, property_name):
"""Returns whether `entity` has a property value with the provided name."""
return property_name in entity._values # pylint: disable=protected-access
|
7397b0342a051f7c96eb4f606bb02ea2ff4d523c
| 622,539
|
def nl(x, gamma):
"""
Nonlinearity of the form
.. math::
f_{\\gamma}(x) = \\frac{1}{1-\\gamma x}
Args:
x (:class:`numpy.array`): signal
gamma (float): Nonlinearity parameter
Note:
The integral of ``gamma * nl(x, gamma)`` is
.. math::
\\int \\frac{\\gamma}{1 - \\gamma x} = -\\log (1 - \\gamma x)
"""
return 1.0/(1.0-gamma*x)
|
3aad00ebc0768e7008d4991f7fe159926364e49c
| 374,918
|
def fin_de_partie(cls_grille,couleur):
"""
Cette fonction permet de vérifier la potentielle fin de partie
match nul ou un gagnant (en appelant les méthodes coup_gagnant et est_pleine de la classe Grille).
Paramètre :
cls_grille : instance de la classe Grille
Cette fonction renvoie un entier.
2 si un coup gagnant est joué, 1 si la grille est pleine sinon 0.
"""
if cls_grille.coup_gagnant(couleur) == True:
return 2
if cls_grille.est_pleine() == True:
return 1
return 0
|
ca1ad4b7456b5a799667b6100a7f83c46bea626a
| 554,295
|
def calc_actual_vapor_pressure(es_tmin, es_tmax, hmin, hmax):
"""
Calculate actual vapor pressure from saturation vapor pressure
and relative humidity. Equation 17 of Allen (1998).
Parameters
----------
es_tmin : numpy ndarray
Saturation vapor pressure at minimum air temperature.
es_tmax : numpy ndarray
Saturation vapor pressure at maximum air temperature.
hmin : numpy ndarray
Minimum relative humidity, in percent
hmax : numpy ndarray
Maximum relative humidity, in percent
"""
return ((es_tmax * hmin / 100.) + (es_tmin * hmax / 100.)) / 2.
|
7f03569d53539fce542b334ae2cade72d6c6513c
| 104,515
|
import time
def proj2time(t):
"""Convert MSProject time to Python time"""
return time.mktime(time.strptime(t.Format('%m/%d/%y %H:%M:%S'), '%m/%d/%y %H:%M:%S'))
|
461b3a108c7787c35ef83afefea3e2be9acebb6b
| 330,396
|
def scale_qtys(x, n):
"""
Will create a list of qtys on both long and short
side that scale additively i.e.
[5, 4, 3, 2, 1, -1, -2, -3, -4, -5].
x: How much of your balance to use.
n: Number of orders.
"""
n_ = x / ((n + n ** 2) / 2)
long_qtys = [int(n_ * i) for i in reversed(range(1, n + 1))]
short_qtys = [-i for i in long_qtys]
return long_qtys + short_qtys[::-1]
|
26de0234f4250f18b20ec1d5fe08f07562efcfd3
| 198,813
|
def aicc(log_likelihood, df, n_samples):
"""Calculate AICc given log-liikelihoood, degrees of freedom, and number of samples."""
return -2 * log_likelihood + 2 * df * n_samples / (n_samples - df - 1)
|
a623f079f95b5bc0a2f1a33d30bd7ac39edb946c
| 420,521
|
import torch
def optimizer_creator(models, config):
"""Returns optimizer."""
return torch.optim.SGD(models.parameters(), lr=0.1)
|
af15cd568f77d6220dbc58b7f1ad7c635bb30002
| 233,999
|
def is_master_manifest(manifest_content):
"""
Parse the m3u8 manifest to see if this is the master manifest that points to other manifests.
:param manifest_content:
:return: True if it's the master manifest
"""
manifest_lines = manifest_content.split('\n')
for line in manifest_lines:
if ".m3u" in line:
return True
return False
|
e36f29dfb44cfd705233893866ddce36aad43929
| 182,531
|
def trail_vector_out(tvec_in, q_vec, rd_vec, ri_vec):
"""
Calculate an outgoing trail vector.
Parameters
----------
tvec_in : ``list``
An incoming trail vector.
q_vec : ``list``
A load vector.
rd_vec : ``list``
The direct deviation edges resultant force vector.
ri_vec : ``list``
The indirect deviation edges resultant force vector.
Returns
-------
tvec_out : ``list``
An outgoing trail vector.
"""
return -1.0 * (tvec_in + q_vec + rd_vec + ri_vec)
|
ac6c82c83fba84f036aa204156ebf09d8fb7b408
| 536,358
|
import inspect
import re
def get_functions_from_module(mod, pattern=None):
"""Get all the function in a module.
Parameters
----------
mod : module
An instance of a module.
pattern : str, optional
Only get functions whose name match a regular expression.
Returns
-------
dict
Dictionary of functions contained in the module. Keys are the
function names, values are the functions themselves.
"""
funcs = {}
for name, func in inspect.getmembers(mod, inspect.isroutine):
if pattern is None or re.match(pattern, name):
funcs[name] = func
return funcs
|
ac143f3dd22f1564736c0ef1bd90a3f87f1de31d
| 564,683
|
def rowMatch(df, col,rex,addedSpec="(?i)"):
"""
Select rows in a DataFrame using a regular expression as mask. The
difficulty is that we may need to work with NA/NaN which are not
allowed in masks
"""
msk = df[col].str.contains(rex+addedSpec)
try:
ret = df[msk]
except Exception as err:
# print(f"got into exception {err}, type(msk) ={type(msk)}")
# display(msk)
msk[msk.isna()]=False
ret = df[msk]
return ret
|
7c964c42c8af36b0659603a753afb2b0530e99ec
| 130,881
|
def has_key(dictionary, key) -> bool:
"""Dictionary contain the key."""
return key in dictionary.keys()
|
a71063972d88d533a6aa16349ef0522a615fa443
| 621,506
|
def find_namespaced_pods(namespace, job_name, api_client):
"""find pod by namespace and job name
Args:
namespace (str) -
job_name (str) -
api_client (CoreV1Api) -
Returns:
str - Name of the pod if found
"""
api_response = api_client.list_namespaced_pod(namespace)
ret_val = []
for i in api_response.items:
if i.metadata.labels.get("job_instance_id", "") == job_name:
ret_val.append(i.metadata.name)
return ret_val
|
54c090d243aafb9b0c1c75477ca2b1cabe38a3f2
| 601,061
|
def get_cache_key(key):
"""
Generates a prefixed cache-key for ultimatethumb.
"""
return 'ultimatethumb:{0}'.format(key)
|
43d82dced6371742a58d18b1cbc1f3a9a48aae5c
| 93,706
|
from typing import Dict
from typing import Optional
from typing import Any
def __group_member_contribution(res_act: Dict[str, Dict[str, int]], act_res: Dict[str, Dict[str, int]],
groups: Dict[str, Dict[str, int]], parameters: Optional[Dict[Any, str]] = None) -> Dict[
str, Dict[str, Dict[str, int]]]:
"""
Calculates the member contribution metric
GROUP MEMBER CONTRIBUTION of a member of a resource group with respect to the given type of work specifies how
much of this type of work by the group was performed by the member. It can be used to measure how the workload
of the entire group devoted to a certain type of work is distributed over the group members.
The approach implemented is the one described in:
Yang, Jing, et al. "OrgMining 2.0: A Novel Framework for Organizational Model Mining from Event Logs."
arXiv preprint arXiv:2011.12445 (2020).
Parameters
----------------
res_act
Dictionary resources-activities-occurrences
act_res
Dictionary activities-resources-occurrences
groups
Dictionary groups-resources-occurrences
parameters
Parameters of the algorithm
Returns
-----------------
metric
Metric value
"""
ret = {}
for g in groups:
ret[g] = {}
for r in groups[g]:
ret[g][r] = res_act[r]
return ret
|
ff969ce9d91adad9fc16bfb8a49c18a70481f9e4
| 285,065
|
def _mklist(values):
"""Convert tuple to list, and anything else to a list with just that thing.
This is a helper to fix an inconsistency with the group keys in a
pandas.groupby object. When grouping by multiple columns, the keys are
tuples of values. When grouping by a single column, even if specified as a
single-element list, the keys are single values. We always want a list,
which is what this function accomplishes."""
if isinstance(values, tuple):
return list(values)
else:
return [values]
|
d713ee076d2de182bebb30ef94ebab8c8ba71a40
| 204,907
|
def parse_stop_num(stop_name, stop_id):
"""
The 2021 Dublin Bus data uses Stop IDs instead of the actual stop
numbers presented to the public. This method tries to extract the stop
number from the stop name and if this fails it tries to extract it from
the ID instead.
Args
---
stop_name: str
The stop name as a string
stop_id: str
The stop ID as a string
Returns
---
An int for the extracted stop number
"""
try:
stop_num = int(stop_name.split(" ")[-1])
except ValueError:
# stop number isn't in the name
# try parse out of ID instead
stop_num = int(stop_id.split("DB")[-1])
return stop_num
|
f62290c2e1164b4df86a8c5c922b15ae321da711
| 557,786
|
def input_tensor(interpreter):
"""Returns the input tensor view as numpy array."""
tensor_index = interpreter.get_input_details()[0]['index']
return interpreter.tensor(tensor_index)()[0]
|
101a4ed2f9815c768ac481313602bd8608afc2fa
| 538,186
|
def detailed_knowledge(maze, population):
"""
Analyze the population of classifiers to determine what classifiers cover
all possible condition-action tuples in the maze.
For every condition-action tuple (i.e., every situation together with
all allowed actions), print the classifiers that match it.
:param maze: The maze to analyze
:param population: The classifier population to analyze
:return: String describing the population's knowledge as in the original
C++ implementation
"""
result = ""
transitions = maze.env.get_all_possible_transitions()
# For all possible destinations from each path cell
for start, action, end in transitions:
p0 = maze.env.maze.perception(*start)
p1 = maze.env.maze.perception(*end)
result += "\n{}-{}-\n{}".format("".join(p0), action, "".join(p1))
result += "\n"
result += str(population.form_match_set(p0).form_action_set(action))
result += "\n"
return result
|
a5955b5adcb524bd35ed3e4b8dba0d30f4091f68
| 261,220
|
from typing import Tuple
def _validate_added_file(file: str) -> Tuple[bool, str]:
"""
Checks whether an added file detected by container-diff is located under /opt/pht_train.
:param file: line of output generated by container diff containing info on the added file
:return: whether the file is correctly located or not
"""
path = file.split(" ")[0]
valid = False
print(f"Validate called with file: {file}")
if not file:
return True, path
if len(path) > 1:
path_dir = path.split("/")[1:]
if path_dir[0] == "opt":
if path_dir[1] == "pht_results":
valid = True
if path_dir[1] == "pht_train":
valid = True
if path_dir[1] == "train_config.json":
valid = True
if path_dir[1] == "user_sym_key.key":
valid = True
if not valid:
print(f"Invalid file detected: {path}")
return valid, path
|
3574b76dea0dba10579d375aed4d1e24c1f8c945
| 499,431
|
def serialize_numpy_array(nda):
""" Returns dict for numpy array data and metadata.
nda.dtype is like (str) 'uint32'
"""
return {'type' : 'nd',
'shape': str(nda.shape),
'size' : str(nda.size),
'dtype': str(nda.dtype),
'data' : nda.tobytes() # (bytes)
}
|
300fabc9321666320b8de8993e2394d566412044
| 483,825
|
def program_entry(program):
"""
Template tag {% program_entry program %} is used to display a single
program.
Arguments
---------
program: Program object
Returns
-------
A context which maps the program object to program.
"""
return {'program': program}
|
e43075dfbe3bde21e2b5f90d317ff09f4d7cdc76
| 622,585
|
def _convert_to_boolean(value):
"""
Converts string input to boolean assuming "Yes/No" inputs
Parameters:
value: (string) Input value
"""
return value == "Yes"
|
aecb151e057a2c26a5d5ebbd11cfda686f9c1733
| 178,760
|
def _fixed_threshold(errors, k=4):
"""Calculate the threshold.
The fixed threshold is defined as k standard deviations away from the mean.
Args:
errors (ndarray):
Array of errors.
Returns:
float:
Calculated threshold value.
"""
mean = errors.mean()
std = errors.std()
return mean + k * std
|
386386699e2429350d5b8af5bc51d01f8778460e
| 340,825
|
def _adj_column_names(ts):
"""
ta-lib expects columns to be lower case; to be consistent,
change date index
"""
ts.columns = [col.lower().replace(' ','_') for col in ts.columns]
ts.index.names = ['date']
return ts
|
a3c27b48884b35af3171ba9f5699238c57d9d890
| 205,342
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.