content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def in_rectangle(pos, rect):
"""Tests if the 2-D point lies in an axis-aligned rectangle, defined
as a two-element list of arrays [bottom left, top right].
"""
return all([rect[0][i] <= pos[i] <= rect[1][i] for i in range(2)])
|
fa20f325403caeaa817e614df45db930a5099e43
| 58,564
|
def list_check_value(value,lista):
"""[Verifica se um Value na List existe ou não]
Args:
value ([int]): [Index da lista]
lista ([list]): [Lista Desejada]
Returns:
[bool]: [True ou False]
"""
try:
if (lista[int(value)] in lista):
return True
except IndexError:
return False
|
9d9c75e2f524c5d542e04f2aad70da1db94c4cf5
| 58,566
|
def _get_qpoint_pos(pos):
"""Return the coordinates of a QPointF object."""
return pos.x(), pos.y()
|
5f7c850599bacc335dbcba73cb23922abccc0107
| 58,567
|
from typing import Union
import inspect
def get_model_name(cls: Union[type, object] = None) -> str:
"""Return the model name for the given class."""
if not inspect.isclass(cls):
cls = cls.__class__
return '{}.{}'.format(cls.__module__, cls.__name__)
|
89563af6ae82bb06c5a2cad8456b89c0d0639c43
| 58,570
|
def _split_chunks(l):
"""
Generates a list of lists of neighbouring ints. `l` must not be empty.
>>> _split_chunks([1,2,3,5,6,7,9])
[[1,2,3],[5,6,7],[9]]
:type l: list[int]
:rtype list[list[int]]
"""
ret = [[l[0]]]
for c in l[1:]:
if ret[-1][-1] == c - 1:
ret[-1].append(c)
else:
ret.append([c])
return ret
|
81f2adc2e6026edec5a6f520f506a1617a81af02
| 58,571
|
def sign(val):
"""Returns 0 for 0, 1 for positive numbers, and -1 for negative numbers"""
if val:
return -1 if val < 0 else 1
return 0
|
04c96849f99c76165c0f9070c000ca7d8a65dad1
| 58,576
|
import torch
def cwl2_lossf(logits, target, kappa=0):
"""
Computes the non-norm portion of the C&W L_2 attack
This is a direct translation of the formula on
page 9 of the paper "Towards Evaluating the
Robustness of Neural Networks"
Args:
logits (torch.Tensor): 2d torch tensor of output
logit units of a model (no softmax should be
applied to the model)
target (float): the target class whose score
should be increased
kappa (float): the parameter kappa from the
paper (see pg. 9 for details.)
"""
mask = torch.ones_like(logits, dtype=torch.uint8)
target_val = logits[:, target]
non_targets = logits[mask]
max_non_target = non_targets.max()
return torch.clamp(max_non_target - target_val, min=-kappa)
|
7ec7f29388374760ed24df4d84bd7330f13378ce
| 58,577
|
import inspect
def get_class_attributes(cls) -> list[tuple]:
"""Gets all non-dunder sttributes of a class, excluding methods."""
attributes = inspect.getmembers(cls, lambda a: not (inspect.isroutine(a)))
return [
a for a in attributes if not (a[0].startswith("__") and a[0].endswith("__"))
]
|
49d7760272d77079ed36848d7f3ccf43994a6e8d
| 58,579
|
def most_frequent_frame(vector):
"""Determines the most frequent frame of all unitvector terms in a vector.
"""
frame_counter = {}
for uv in vector.dict:
frame_counter[uv.frame] = frame_counter.get(uv.frame, 0) + 1
return max([(frame_counter[x], x) for x in frame_counter])[1]
|
006f81dbe23df6976759d35a3853752b0fecb4ab
| 58,580
|
def filter_visibility(entries, visibilities):
"""
Remove entries whose applied visibility is not in the supplied visibilities.
Args:
entries: An iterable of Entry nodes
visibilities: An iterable of visibilities to filter against
Yields:
An iterable of Entry nodes
"""
return (e for e in entries if e.applied_visibility in visibilities)
|
b4c63ca38c61b33db2f8487c34962a9a0b6000cf
| 58,582
|
def bubble_sort(nums: list[float]) -> list[float]:
"""Sorts a list in-place using the Booble Sort approach and recursion.
Time complexity: O(n^2) for best, worst, average.
Space complexity: O(n) total O(n) auxiliary? (recursion tree depth?)
Args:
nums: A list of numbers.
Returns:
The sorted list.
"""
def swap_through(nums: list[float], offset: int, index: int = 0) -> None:
if index < len(nums) - 1 - offset:
if nums[index] > nums[index + 1]:
nums[index], nums[index + 1] = nums[index + 1], nums[index]
swap_through(nums, offset, index + 1)
def loop_through(nums: list[float], loop: int = 0) -> None:
if loop < len(nums) - 1:
swap_through(nums, loop)
loop_through(nums, loop + 1)
loop_through(nums)
return nums
|
76eaee36c5f5f57f058c9b57d96e77ccf35f32b5
| 58,583
|
import copy
def merge_dicts(a, b):
"""
Merges dicts a and b recursively into a new dict.
:param dict a: (required).
:param dict b: (required).
"""
result = copy.deepcopy(a)
for key, value in b.items():
if isinstance(value, dict):
result[key] = merge_dicts(value, a.get(key, {}))
else:
result[key] = value
return result
|
3b93d60c63a65bff24ea3646311e3008c9ffcd2d
| 58,588
|
def identity(obj):
"""identity function, returns input unmodified"""
return obj
|
17ac77ff54951876961420786a24834de957d1b3
| 58,593
|
def get_tag(tag):
"""
Get the rule tag. Used by the search function that needs
the revision number stripped from the tag id.
:return: string representation of tag
:rtype: str
"""
if tag:
try:
tag, _version = tag.split('.')
return tag
except ValueError:
pass
|
e3461260456b92a35f546d6e3c293ea60fb1805b
| 58,598
|
import itertools
import copy
def make_opt_list(opts, group):
"""Generate a list of tuple containing group, options
:param opts: option lists associated with a group
:type opts: list
:param group: name of an option group
:type group: str
:return: a list of (group_name, opts) tuples
:rtype: list
"""
_opts = [(group, list(itertools.chain(*opts)))]
return [(g, copy.deepcopy(o)) for g, o in _opts]
|
9d8cfdaaadb73a8114f9a2b9d4373a2dbf832889
| 58,602
|
def default_Name(x, world) :
"""The default name of a thing is its id, X."""
return str(x)
|
504b252b78461713023d57d22112d973dde5b43a
| 58,604
|
def _remove_nulls(output):
"""Remove nulls from dict. Temporary until we fix this in cosmos
:param output: dict with possible null values
:type output: dict
:returns: dict without null
:rtype: dict
"""
return {k: v for k, v in output.items() if v}
|
1165256adbb22a33f872ac81ce440936c44775ce
| 58,609
|
def clean_config(raw_str: str):
"""
:param raw_str: String to parse from config
:return: a list of all names separated by a comma
"""
raw_list = raw_str.replace("\n", "").split(",")
return list(map(lambda x: x.strip(), raw_list))
|
2dcf38bf2da89c38351c54656b87ea9ab46b8911
| 58,613
|
def calc_check_digit(number):
"""Calculate the check digit. The number passed should not have the
check digit included."""
weights = (3, 7, 13, 17, 19, 23, 29, 37, 41, 43, 47, 53, 59, 67, 71)
s = sum(w * int(n) for w, n in zip(weights, reversed(number))) % 11
return '01987654321'[s]
|
1d81230bdaec0c9d9ba382e9c6596c79c7ae7579
| 58,616
|
def field_name_modifier(_, __, event_dict):
"""Replace log level field name 'level' with 'serverity' to meet
Cloud Logging's data model.
Make sure to call this processor after structlog.stdlib.add_log_level.
https://cloud.google.com/logging/docs/reference/v2/rpc/google.logging.v2?hl=en#google.logging.v2.LogEntry
"""
event_dict["severity"] = event_dict["level"]
del event_dict["level"]
return event_dict
|
90c9fa94f4f6ea9aa39d4887720fd70b0366d8f9
| 58,619
|
def map1to8(v):
"""
Limit v to the range 1-8 or None, with 0 being converted to 8 (straight ahead).
This is necessary because the back-calculation to degree7 will negative values
yet the input to calculate_average_instruction must use 1-8 to weight forward
instructions correctly.
:param v:
:return: v, in the range 1-8 or None
"""
if v is None or v > 0:
return v
return v + 8
|
c7470cd7b27708b0a4f4a73216dc3cacfa50f036
| 58,630
|
def primary_secondary(url):
""" return just the secondary.primary domain part, as a single string """
if len(url) >= 2:
url_split = url.split('.')
url_join = '.'.join(url_split[-2:])
return url_join
# To consider: Would a single-length case ever happen?
else:
return url
|
82b0f5e7ff2f07e4dca0d4bf8f9a2430b9c62047
| 58,631
|
def multiplicative_process(n,rate,dt):
"""
A constant growth rate process
Parameters
----------
n : int
Number of time steps
rate : float
Constant growth rate
dt : TYPE
Delta time between time steps
"""
wealth = [1]
time = [0]
for i in range(n):
wealth.append(wealth[-1]*rate)
time.append(time[-1]+dt)
return time, wealth
|
8de2c1e8d858839ab1c076a644984a86ccd42b99
| 58,632
|
def flatten_dict(nested_dict, lead_key=None, unwind_arrays=True):
"""
Helper function to flatten nested dictionary, recursively
walks through nested dictionary to get keys corresponding
to dot-notation keys, e. g. converts
{"a": {"b": 1, "c": 2}} to {"a.b": 1, "a.c": 2}
Args:
nested_dict ({}): nested dictionary to flatten
unwind_arrays (bool): whether to flatten lists/tuples
with numerically indexed dot notation, defaults to True
lead_key (str): string to append to front of all keys,
used primarily for recursion
Returns:
non-nested dictionary
"""
flattened = {}
for key, value in nested_dict.items():
flat_key = "{}.{}".format(lead_key, key) if lead_key else key
if isinstance(value, dict):
flattened.update(flatten_dict(value, flat_key, unwind_arrays))
elif isinstance(value, (list, tuple)) and unwind_arrays:
array_dict = {n: elt for n, elt in enumerate(value)}
flattened.update(flatten_dict(array_dict, flat_key, unwind_arrays))
else:
flattened.update({flat_key: value})
return flattened
|
8747016db002cac92dfd9db8703803f27ac60cc8
| 58,635
|
def _raw_ptr_ops(in_params):
"""Generate pointers to an array to use in place of CArray indexing.
The ptr will have a name matching the input variable, but will have
_data appended to the name.
As a concrete example, `_raw_ptr_ops('raw X x, raw W w')` would return:
['X x_data = (X*)&(x[0]);', 'W w_data = (W*)&(w[0]);']
"""
ops = []
for in_p in in_params.split(","):
in_p = in_p.strip()
if in_p.startswith("raw "):
_, typ, name = in_p.split(" ")
ops.append(f"{typ}* {name}_data = ({typ}*)&({name}[0]);")
return ops
|
df5d676eb224776cdc597630a673e5f62a0de430
| 58,637
|
def loadWeights(model, outputFolder):
"""
Loads weights into model.
Note: Mainly serves the purpose to have a single line of code where the filename is specified for all cases.
Parameters
----------
model : keras model
Model to load weights into.
outputFolder : string
Path where data was written to.
Returns
-------
model : keras model
Model with loaded weights.
"""
model.load_weights(outputFolder+'modelWeights.h5')
return model
|
29981bcb77ea5dc76127e8cb331789965a0eb71f
| 58,643
|
def chunker(blob):
"""
Chunk a blob of data into an iterable of smaller chunks
"""
return [chunk for chunk in blob.split('\n') if chunk.strip()]
|
d926f2d5c13d04900cfc0ed68c7e9e95d316b5d5
| 58,644
|
def get_or_create_db(server, name):
"""
Return existing db instance or create new one
"""
if name not in server:
server.create(name)
return server[name]
|
7631da98262e36ed8cab327a275dbf78b9d08b0d
| 58,647
|
def weight_for_program_to_even(programs, needed, current, program):
"""Calculate weight of program that would balance all programs."""
return programs[program]['weight'] + needed - current
|
18c95aa6bff04508888cec956c4008411d07cc8f
| 58,648
|
def path_to_image_name(path, class_name):
"""
for paths of the following structure:
E:\ILSVRC2017\images\original\n01531178\n01531178_2421.JPEG
returns the image name, e.g. n01531178_2421
"""
t1 = path.split(class_name, 1)[1]
t2 = t1.split('.', 1)[0]
t3 = t2.split('\\', 1)[1]
return t3
|
b5e306b7d2a2c516b5f304375f793fd966843e5c
| 58,651
|
def prompt(self):
""" Returns prompt string """
return ">>>"
|
59357c9346936be04b0da704363a5ed20bad8557
| 58,652
|
def change_number_formats(tables):
"""
Change number format to thousand separators.
Args:
tables (int/float): a pandas table.
Returns:
a thousand separated pandas table.
"""
for column in tables.columns:
tables[column] = tables[column].apply(lambda x: f'{x:,}')
return tables
|
aa21aae81a0c3142dc0147fc734e226333ab3d5c
| 58,653
|
def _color_code(row):
"""Returns the DMR Color Code from the WWARA record"""
color_code = row.get("DMR_COLOR_CODE").lstrip("CC")
return color_code or "0"
|
850c95357ffa75308d6b34e54d21979f980f4d7c
| 58,655
|
def compute_delta(orig_pos, new_pos, strand):
""" Given a starting position and a new position, compute the distance
between them. The sign indicates whether the second point is
upstream or downstream of the original with respect to strand. """
abs_dist = abs(orig_pos - new_pos)
if strand == "+":
if new_pos < orig_pos:
return -1*abs_dist
else:
return abs_dist
elif strand == "-":
if new_pos < orig_pos:
return abs_dist
else:
return -1*abs_dist
else:
raise ValueError("Strand must be either + or -")
|
ae4659a5f3240efff12fbda3087f857056bf3801
| 58,656
|
def split_number_and_unit(s):
"""
Split a string into two parts: a number prefix and an arbitrary suffix.
Splitting is done from the end, so the split is where the last digit
in the string is (that means the prefix may include non-digit characters,
if they are followed by at least one digit).
"""
if not s:
return (s, '')
pos = len(s)
while pos and not s[pos-1].isdigit():
pos -= 1
return (s[:pos], s[pos:])
|
148315baa03e51d8b7962d6c227fafc2e0940d3d
| 58,657
|
def _get_fzn_array(fzo):
""" Get the list of objects of an FZN object
Args:
fzo: FZN object
Returns:
Array of FZN objects
"""
return fzo if isinstance(fzo, list) else fzo.value
|
6680d4751defadfe1d1703f119831d05613a8b7d
| 58,658
|
def pipeline_code_wrapper(pipeline_code):
"""Generate code specific to the execution of the sklearn pipeline.
Parameters
----------
pipeline_code: str
Code that defines the final sklearn pipeline
Returns
-------
Source code for the sklearn pipeline and calls to fit and predict
"""
return """exported_pipeline = {}
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
""".format(pipeline_code)
|
aeed55ee2afb1fbc3d5f466d807f0deb02be2134
| 58,660
|
from datetime import datetime
import pytz
def localized_date_to_utc(date):
"""
Return a timezone-unaware UTC time from a timezone-aware localized datetime object.
"""
if not isinstance(date, datetime):
return date
return date.astimezone(pytz.utc).replace(tzinfo=None)
|
63ccd11c27645d56b479b01245703038d58d215e
| 58,662
|
def rank_authors_by_publications(author_pub, weighted=True, equal_contribution=True):
"""
:param author_pub: the output of publications_per_author()
:param weighted: if True, rank the authors by weighted publication scores; otherwise, by publication counts.
:param equal_contribution: if True, the contribution of each paper is equally divided by the # of authors.
:return: the ranked list of authors with their publication scores in descending order.
"""
def score(pubs):
if equal_contribution:
return sum(pub[2]/pub[3] for pub in pubs) if weighted else sum(1/pub[3] for pub in pubs)
else:
return sum(pub[2] for pub in pubs) if weighted else len(pubs) # pub[2]: weight
return sorted([(k, score(v)) for k, v in author_pub.items()], key=lambda x: x[1], reverse=True)
|
49734afe9eec79fafa9db030f2bcdc7e2b778a3c
| 58,663
|
def tabToCatalogList(tab):
"""Converts an :obj:`astropy.table.Table` object into a list of dictionaries.
Args:
tab (:obj:`astropy.table.Table`): Catalog in the form of an astropy Table object.
Returns:
A list of dictionaries, where each dictionary represents an object in the catalog.
"""
catalog=[]
for row in tab:
objDict={}
for k in list(tab.keys()):
objDict[k]=row[k]
catalog.append(objDict)
return catalog
|
894cbb45120df1f782d84b3b7ebdd4c046561a50
| 58,664
|
def min_max_rescale(data, data_min, data_max):
"""Rescale the data to be within the range [new_min, new_max]"""
return (data - data_min) / (data_max - data_min)
|
e1b2ce9fed92d79c50b3a648aac3e2d12d8d2fb7
| 58,666
|
def branchwise(tree):
"""Return sets of nodes and edges from a dict-based tree.
Parameters
----------
tree: dict
Representation of a scenario tree with as a dictionary with
scenario tuple keys and probability values.
Returns
-------
nodes: set
Set of `nodes` of the tree represented by scenario tuples.
edges: set
Set of `edges` of type ``(nodes[i], nodes[j])`` of the tree.
"""
edges = {(v[0:i], v[0:i+1]) for v in tree for i in range(1, len(v))}
nodes = {v for e in edges for v in e}
return nodes, edges
|
37fe25a94ddb499bffed1be2bed1eab77a8a8d30
| 58,668
|
def handler_from_scope_params(permission, extra_params):
"""
Construct the given permission's handler using the scope-specific extra
arguments (received from the API). This method filters the extra_params
to the values actually needed by the handler constructor.
"""
# Filter the list of arguments of the handler factory __call__()
# with the arguments given by the API.
args_to_pass = set(extra_params).intersection(permission.CALL_ARGS)
kwargs = {key: extra_params[key] for key in args_to_pass}
return permission(**kwargs)
|
e702ce6257e46eae4567c63550e9ab7ad4718c96
| 58,671
|
def save(level, save_type):
"""
Calculate a character's saves based off level
level: character's level
save_type: "good" or "poor"
returns a number representing the current base save
"""
if save_type == "good":
return int(round(level/2)) + 2
else:
return int(round(level/3))
|
fe00890585bd0036ec8cb05f209a9950afcbbaeb
| 58,673
|
def is_literaldata(val):
"""
Checks if the provided value is a string (includes unicode).
"""
return isinstance(val, str)
|
ddfdfc5e6789130d2a1ec1cc719646b4c368abb4
| 58,675
|
def relevant_files(files, root):
"""Trims the list of files to keep the expected files and unexpected files.
Unexpected files are files that are not based inside the |root| directory.
"""
expected = []
unexpected = []
for f in files:
if f.startswith(root):
f = f[len(root):]
assert f
expected.append(f)
else:
unexpected.append(f)
return sorted(set(expected)), sorted(set(unexpected))
|
4fc8c93214a02899f0fe48463dd9b63c1c59653e
| 58,676
|
def sanitize_class_name(class_name):
""" Make sure the first letter is uncapatalized to match the class name
of the deserializer
"""
class_name = class_name[0].lower() + class_name[1:]
return class_name
|
cfda4e702c3ee9e103108ebf24d37122a802680f
| 58,678
|
def _missing_count(album):
"""Return number of missing items in `album`.
"""
return (album.albumtotal or 0) - len(album.items())
|
8d7969b9ead10d2baf3d19a1aa53dfb4a9d1d6fd
| 58,686
|
def c_rot32(i, n):
"""
Rotate *i* left by *n* bits within the uint32 value range.
>>> c_rot32(0xF0000000, 4)
15
>>> c_rot32(0xF0, -4)
15
"""
if n < 0:
n = 32 + n
return (((i << n) & 0xFFFFFFFF) | (i >> (32 - n)))
|
022a484517659a05e629075975c1bd2934914ea8
| 58,687
|
def plot(self, view_id):
"""Get a particular plot by providing view id
Arguments:
view_id(int): view id
Returns:
:class:`rips.generated.generated_classes.Plot`
"""
plots = self.plots()
for plot_object in plots:
if plot_object.id == view_id:
return plot_object
return None
|
f35f9f53c152953778893d3aa68b0b51232f23e4
| 58,696
|
def get_child(parent, selector, index=0):
"""Get the child element of parent as specified by the given CSS selector
If index is specified, return the matching child element at the specified zero-based index; otherwise, return the first matching child element.
"""
children = parent.cssselect(selector)
if (index < 0 and len(children) >= abs(index)) or (index >= 0 and len(children) > index):
return children[index]
|
cde3aca2c3698c09806a92f457acf5407eeddc78
| 58,697
|
def wrapTheta(theta):
""" Wrap an angle from the real line onto (-180, 180].
Parameters
----------
theta : float
Angle, in degrees.
Returns
-------
theta_wrapped : float
The input, projected into (-180, 180].
"""
return (theta + 90) % 360 - 90
|
bc4fa0c44fc53fdea8bfb61b53d674132299b254
| 58,698
|
def is_leap(year: int) -> bool:
"""Returns True if year is a leap year
:param:
year: Represents a year
:returns:
bool: True if year is a leap year
"""
# Leap years are years that are divisible by 4, and not by 100 OR are divisible by 400!
CASE1: bool = year % 4 == 0
CASE2: bool = year % 100 == 0
CASE3: bool = year % 400 == 0
return CASE1 and not CASE2 or CASE3
|
2a414ebae6a80139c1614853cbd25c1520149a88
| 58,699
|
import re
def get_emoji_in_message(message):
"""
Scan a message for any emoji or reactji.
Returns an array of emoji names, no order or uniqueness guarantees.
message: a single message dict.
"""
# Emoji come in several ways:
# reactji: Found in the "reactions" group
# In-text: found by searching text for :something: groups.
emojilist = []
for reaction in message.get('reactions', None):
emojilist.append(reaction['name'])
for emoji in re.findall(r':([^:\s]+):', message['text']):
emojilist.append(emoji)
return emojilist
|
8fc1530e3de5596873e8b15678fe795c1e58743c
| 58,701
|
import itertools
def gen_architectures(lengths, values, decreasing_only=True):
"""
Generates a list of architecture given the number of layer and each different number of neurones per layer.
:param lengths: Number of layer (list of int).
:param values: Neurones per layer (list of int).
:param decreasing_only: If True, each layer positioned after another one will have less or the same amout of
neurones. If false, every combinations will be returned.
:return: A list of architectures, to fill in the "architecture" argument of tune_params.
"""
ret = []
if decreasing_only:
for length in lengths:
ret.extend(sorted(list(itertools.combinations_with_replacement(sorted(values, reverse=True), length))))
else:
for length in lengths:
ret.extend(sorted(list(itertools.product(sorted(values, reverse=True), repeat=length))))
return ret
|
b7740fdba5dea2ee32a3947409899c014d4cad31
| 58,702
|
def is_valid_model_filters(model, filters):
"""Return True if filter values exist on the model
:param model: a Cinder model
:param filters: dictionary of filters
"""
for key in filters.keys():
if not hasattr(model, key):
return False
return True
|
1bcedf2b2d050f82b575416d1b653a4b7b509984
| 58,711
|
def converts(*args):
""" A convenient decorator for the ModelConverter used to mark which
method should be used to convert which MongoEngine field.
"""
def _inner(func):
func._converter_for = frozenset(args)
return func
return _inner
|
cd999d76c896630d9afa87244a195029dec75281
| 58,715
|
import json
def change_user_details(client, userid, **kwargs):
"""
Changes the details of an existing user
:param client: the client to make the request
:param userid: the user whose details need changing
:param kwargs: the details that need to be changed
:return:
"""
return client.patch(f'/api/auth/user/{userid}',
data=json.dumps(kwargs),
content_type='application/json')
|
06b8c7953b0d96702ec78516232a78e67f40b7e2
| 58,721
|
def generer_grille_pions(n,char):
"""
genere une grille de dimension n avec comme lettre char
:param n: dimension
:type n: entier
:param char: caractére à mettre dans la grille
:type char: caractére
:return: une grille de dimension n
:rtype: liste
"""
grille=[0]*n
for i in range(n):
grille[i]= [char]*n
return grille
|
05ad687989b8dafe0496d0b29e2b432712d68a86
| 58,722
|
from typing import Dict
def add_resources(dict1: Dict[str, float],
dict2: Dict[str, float]) -> Dict[str, float]:
"""Add the values in two dictionaries.
Returns:
dict: A new dictionary (inputs remain unmodified).
"""
new_dict = dict1.copy()
for k, v in dict2.items():
new_dict[k] = v + new_dict.get(k, 0)
return new_dict
|
501a30b9e71049d9418dd6daf7bd98d96ea8fe0f
| 58,723
|
def check_num_of_letters(num_of_letters: int) -> int:
"""Accepts `num_of_letters` to check if it is less than 3.
If `num_of_letters` is greater than or equals to 3, return `num_of_letters`
as-is. Otherwise, return `num_of_letters` with the value of 6.
Args:
num_of_letters (int)
Returns:
num_of_letters (int)
"""
if num_of_letters < 3:
print("Number of letters defined by user is less than 3. \
Using default value of 6.")
return 6
else:
return num_of_letters
|
ba829b3fc380ed7d4ec14c717fbb589597fc6e58
| 58,724
|
def ranking_suffix(rank: int) -> str:
"""Provides ordering ranking e.g. 1st, 11th, 21st
Args:
rank (int): rank or placing
Returns:
str: ordering rank
"""
str_rank = str(rank)
suffix = "th"
if len(str_rank) > 1:
if str_rank[-2:] in ["11", "12", "13"]:
return f"{str_rank}{suffix}"
if str_rank[-1] == "1":
suffix = "st"
elif str_rank[-1] == "2":
suffix = "nd"
elif str_rank[-1] == "3":
suffix = "rd"
return f"{str_rank}{suffix}"
|
2190a4b109ed2e1394106552f891da5a335c6440
| 58,727
|
def isExistingSameDevice(config_db, deviceName, table):
"""Check if the given device name is conflict with existing device"""
settings = config_db.get_table(table)
for key,values in settings.items():
if "remote_device" in values and deviceName == values["remote_device"]:
return True
return False
|
e1b3bd608125d75e4ce3c90a7373d2eee161c41d
| 58,728
|
def _Normalize(s, max_len=8):
"""Get the first few alphanum chars of a string"""
result = ''
for c in s:
if ('a' <= c <= 'z' or 'A' <= c <= 'Z' or '0' <= c <= '9'):
result += c
if len(result) >= max_len:
break
return result
|
ba23626234a4c9f44cb8cb951e574e1fbbd6473e
| 58,729
|
def in_to_mm(inches):
"""
Convert inches to millimeters
"""
return inches * 25.4
|
3e5be1fe8badc0b92de67dc770a7f6aaa482222f
| 58,731
|
def strip_space(dfcol):
""" Strip spaces (not newline) """
return dfcol.str.strip(r' ')
|
1a3ec2b1dc5eaf4ff2fb7104b3db48c71db20ab0
| 58,732
|
def to_pos(i, j):
"""Convert a coordinate (with 0,0 at bottom left) on the board to the standard representation
>>> to_pos(0,0)
'a1'
>>> to_pos(3,3)
'd4'
"""
return 'abcdefgh'[i] + str(j + 1)
|
c001bafb7294b07d493352894860fd1c22de6bb9
| 58,737
|
def greeting(greeting, name):
""" Returns a greeting to a person.
Args:
greeting (str): A greeting.
name (str): A person's name.
Returns (str):
A greeting to a person.
"""
# NOTES:
# 1. String technique #3: %-interpolation
# 2. Built-in function 'title()' capitalizes only the first letter of a string.
return "%s, %s!" % (greeting.title(), name)
|
5ab8c58ab91e71d14cdd05da007bfd93d862a44c
| 58,749
|
def inss(valor=0):
"""
-> Função para cálcular o valor do INSS.
:param valor: Valor do salário.
:return: Retorna o valor do INSS e alíquota utilizada.
"""
inss = []
if valor < 1751.81:
inss.append((valor * 8) / 100) # Alíquota de 8%.
inss.append('8')
elif valor >= 1751.81 and valor <= 2919.72:
inss.append((valor * 9) / 100) # Alíquota de 9%.
inss.append('9')
elif valor >= 2919.72:
inss.append((valor * 11) / 100) # Alíquota de 11%.
inss.append('11')
return inss
|
71055acab478e878d17b8a68c5aa8bc2d52cc7b8
| 58,753
|
def decode_var_len_uint8(br):
"""Decodes a number in the range [0..255], by reading 1 - 11 bits"""
if br.read_bits(1):
nbits = br.read_bits(3)
if nbits == 0:
return 1
return br.read_bits(nbits) + (1 << nbits)
return 0
|
9ff653a3781d8b48e2b971661dffba0d3a2ec6ff
| 58,758
|
def read_flist(listpath, delimiter='|'):
"""read file list from file"""
flists_flat = []
lines = open(listpath, 'r').readlines()
for line in lines:
entry = line.rstrip().split(delimiter)
flists_flat.append(entry)
return flists_flat
|
02f7a617a625841db03336a439bbe4278fd25eb9
| 58,763
|
def calculate_L(W, frequency, S):
"""
Calculate L, which is subsequently used to perform rotational temperature analysis.
Parameters
----------
W - float
Integrated flux of a line
frequency - float
Frequency of a transition in MHz
S - float
Intrinsic linestrength of the transition
Returns
-------
L - float
"""
L = (2.04e20 * W) / (S * frequency**3)
return L
|
5dc4153b2990cc7f71bb2a45d2692a90bbd0fb82
| 58,764
|
def clamp(value, minval, maxval):
"""Return a value clamped to the given interval [minval, maxval]."""
minval, maxval = min(minval, maxval), max(minval, maxval)
return min(max(value, minval), maxval)
|
e3e0457fc052e21d092df189f61c089d8585284d
| 58,766
|
import json
import pkg_resources
def load_json(filename: str):
"""Load JSON-formatted file into native data structure."""
return json.loads(pkg_resources.resource_string(__name__, filename))
|
525c68c21638444b529165870d05027c39472460
| 58,772
|
import ctypes
def pycapsule_new(ptr, name, destructor=None) -> ctypes.py_object:
"""
Wraps a C function pointer into an XLA-compatible PyCapsule.
Args:
ptr: A CFFI pointer to a function
name: A binary string
destructor: Optional PyCapsule object run at destruction
Returns
a PyCapsule (ctypes.py_object)
"""
return ctypes.pythonapi.PyCapsule_New(ptr, name, None)
|
dbfb92a8e60af149aecdcefac66aeb5befc05517
| 58,774
|
def clean_row(review):
"""
Cleans out a review and converts to list of words.
Currently only removes empty elements left after removing some punctuations.
Args:
review(str): The review in string format.
Returns:
List of words in the accepted review, cleaned.
"""
return [word for word in review.split(' ') if word != '']
|
e3b16c4b709ec16469ae8ac34ae2a04694882ba8
| 58,776
|
def reduceDimensions(matrix):
"""
Decrease any list/matrix dimensions by one.
Parameters
----------
matrix: Multidimensional Array
Returns
-------
list: The matrix after reducing it's dimensions.
"""
# Doesn't accept vectors/(1-dimensional matrices)
# If matrix is a vector return it without changes
if (type(matrix[0]) is not list): return matrix
new_matrix = []
for item in matrix:
new_matrix += item
return new_matrix
|
c861937238de53d3da35782ed2c8f377604a19d0
| 58,777
|
def fib(n):
"""Calculate the n-th Fibonacci number.
>>> fib(0)
0
>>> fib(15)
610
>>>
"""
a, b = 0, 1
for i in range(n):
a, b = b, a + b
return a
|
2b70b1ae94556e26644d7394df603fdfc5ad6553
| 58,784
|
import math
def get_upscaled_rgb_tuple(downscaled_rgb_tuple):
"""Scales RGB color values from decimal 0.0-1.0 to int 0-255.
Based on source by Greg Taylor in the python-colormath library
https://github.com/gtaylor/python-colormath/blob/1d168613718d2d7d31ec4230524e987ef66823c7/colormath/color_objects.py#L565
and used under the BSD license."""
rgb_r_down, rgb_g_down, rgb_b_down = downscaled_rgb_tuple
rgb_r = int(math.floor(0.5 + rgb_r_down * 255))
rgb_g = int(math.floor(0.5 + rgb_g_down * 255))
rgb_b = int(math.floor(0.5 + rgb_b_down * 255))
return rgb_r, rgb_g, rgb_b
|
b0ef1e370f97228ab78e501161bc392dc5a1b7e0
| 58,793
|
def is_readable(fp, size=1):
"""
Check if the file-like object is readable.
:param fp: file-like object
:param size: byte size
:return: bool
"""
read_size = len(fp.read(size))
fp.seek(-read_size, 1)
return read_size == size
|
7fb9c1c87ea169d266a29e2414224c27b086fcae
| 58,803
|
def count_frames_manual(video):
"""
This method comes from https://www.pyimagesearch.com/2017/01/09/
count-the-total-number-of-frames-in-a-video-with-opencv-and-python/
written by Adrian Rosebrock.
Counts frames in video by looping through each frame.
Much slower than reading the codec, but also more reliable.
"""
# initialize the total number of frames read
total = 0
# loop over the frames of the video
while True:
# grab the current frame
(grabbed, frame) = video.read()
# check if we reached end of video
if not grabbed:
break
# increment the total number of frames read
total += 1
# return the total number of frames in the video file
return total
|
89bd20ff5239219d13024d808d18ad7cc9ac07be
| 58,804
|
def get_alternatives_for_square(board, nrow, ncolumn):
"""Return sequence of valid digits for square (nrow, ncolumn) in board."""
def _box(idx, size=3):
"""Return indexes to cover a box (3x3 sub-matrix of a board)."""
start = (idx // size) * size
return range(start, start + size)
nums_in_box = [board[r][c] for r in _box(nrow) for c in _box(ncolumn)]
nums_in_row = [board[nrow][c] for c in range(9)]
nums_in_column = [board[r][ncolumn] for r in range(9)]
nums = nums_in_box + nums_in_row + nums_in_column
return sorted(set(range(1, 9+1)) - set(nums))
|
616f675cc7c3fd81d5bbc59e09b026b27b88ef1e
| 58,808
|
import re
def _json_remove_excessive_newlines(jstr, ind, n):
"""Remove newline before lines with a certain indent.
Problem:
The JSON writer either inserts no newline characters at all, or
after every entry. The former is impractical, the latter blows up
JSON files containing large sets of data (e.g. contour coordinates).
Solution:
To keep the JSON file structure clear, keep newlines before lines
with an indent of up to N spaces. Newline characters before every
line with more indent are removed.
Arguments:
- jstr: Indented JSON string.
- ind: Number of spaces per level indent.
- n: Lowest level for which newlines are retained.
"""
# Remove all newlines for >N spaces indent.
rx1 = "\n {{{n}, }}(?=[^ ])".format(n=n * ind)
# Remove newline before closing bracket of list entries at indent N
rx2 = "\n {{{n}}}(?=[\]}}])".format(n=(n - 1) * ind)
jstr = re.sub(rx1, "", jstr)
jstr = re.sub(rx2, "", jstr)
return jstr
|
7d51d63ce054fd04c86efe6883f7b5c3d7219309
| 58,815
|
def get_index_previous_timestamp(data_in, datetime_target, idx_start, idx_limit=0):
"""
Returns index of the image with a timestamp previous of the input target
:param data_in: Dictionary with exif/precise data
:param datetime_target: Target datetime object in UTC
:param idx_start: Sample from where the search is initiated. It goes backwards in the list
:param idx_limit: Last valid sample in the list. To limit the search to part of the data
:return: Index of data_in list
"""
if idx_limit < 0:
idx_limit = 0
if idx_start <= idx_limit:
idx_return = idx_limit
else:
idx_return = idx_start + 1 #Loop starts by substracting
target_found = False
while not target_found:
idx_return -= 1
if idx_return > idx_limit:
if data_in['timestampMs'][idx_return] <= datetime_target:
target_found = True
else:
# No more valid samples to analyze
target_found = True
return idx_return
|
347b8f30901591f596fbf9049f8450fabe862999
| 58,818
|
import requests
import json
from typing import OrderedDict
def get_model(uri):
"""
Return a capture model as ordered JSON object
:param uri: URI for capture model
:return: ordered JSON object
"""
if uri:
r = requests.get(uri)
if r.status_code == requests.codes.ok:
source = r.text
model = json.loads(source, object_pairs_hook=OrderedDict)
return model
else:
return None
else:
return None
|
0c57c3a3aabfdd2e0dbbc229fee1cf39c63b6dd7
| 58,821
|
def determine_name(func):
"""
Given a function, returns the name of the function.
Ex::
from random import choice
determine_name(choice) # Returns 'choice'
Args:
func (callable): The callable
Returns:
str: Name string
"""
if hasattr(func, "__name__"):
return func.__name__
elif hasattr(func, "__class__"):
return func.__class__.__name__
# This shouldn't be possible, but blow up if so.
raise AttributeError("Provided callable '{}' has no name.".format(func))
|
e70be86e30e628dccfa8cbcbebc8ade6d2fcc4ae
| 58,822
|
def string_to_bool(value):
"""
Convert the given unicode string ``value`` to a boolean object.
If ``value`` is ``'1'``, ``True`` is returned. If ``value`` is ``'0'``,
``False`` is returned. Any other value raises a
:exc:`~exceptions.ValueError`.
"""
if value not in ('1', '0'):
raise ValueError('Not a boolean value: {0!r}'.format(value))
return value == '1'
|
ce2059ea9d05a39d9c12a77330db1234e661b0c8
| 58,825
|
def remove_workflow_name(name):
""" Remove the workflow name from the beginning of task, input and output names (if it's there).
E.g. Task names {workflowName}.{taskName} => taskName
Input names {workflowName}.{inputName} => inputName
Output names {workflowName}.{taskName}.{outputName} => taskName.outputName
"""
partitioned = name.partition('.')
name = partitioned[2] if partitioned[2] != '' else partitioned[0]
return name
|
00efd3c6d900ca7e99178bd02021644789802fd5
| 58,826
|
def gen_len(gen):
"""
Get genreator's length.
"""
return sum(1 for _ in gen)
|
3927b72c4768b5d9536382ac6724459ef50052ec
| 58,829
|
def key_of_max(d):
"""Return key associated with maximum value in d.
>>> key_of_max({'a':1, 'b':2})
'b'
"""
keys = list(d.keys())
keys.sort()
return max(keys, key=lambda x: d[x])
|
01ee05b37d8c8bbaa12c184aba422c2e3ac2e438
| 58,831
|
import uuid
def is_valid_guid(to_test: str) -> bool:
"""
Determine if value is a valid UUID.
Parameters
----------
to_test : str
value to test
"""
try:
guid = uuid.UUID(to_test)
except ValueError:
return False
return str(guid) == to_test
|
69d4c838cba54b8b1d4821dda937deb9c0d35a6f
| 58,832
|
import re
def count_occurrences(path):
""" Produeix un recompte de quantes vegades
apareix el patró Huffington Post i quantes
url condueixen a un document pdf.
Keyword arguments:
path -- ruta de l'arxiu que conté els patrons a analitzar.
"""
# declarem els comptadors
huff_count = 0
pdf_count = 0
# llegim l'arxiu linia a linia i augmentem els comptadors si trobem el patró.
with open(path, "r") as infile:
for line in infile:
if re.search(r"Huffington post\b", line, flags=re.IGNORECASE):
huff_count += 1
if re.search(r"https?:.*\.pdf/*$", line, flags=re.MULTILINE):
pdf_count += 1
print('El patró Huffington_post apareix %s vegades' % huff_count)
print('El patró url_pdf apareix %s vegades' % pdf_count)
return huff_count, pdf_count
|
a4c12364ac61fa9c721b98133c177eaecc36c674
| 58,833
|
import random
def mark(character, diacritic_options):
"""
Combine character with a random diacritic.
"""
return character + random.choice(diacritic_options).strip()
|
67917dbfcf0539ef168158536593ee006ebd9d66
| 58,837
|
def get_scale(beam, scale):
"""
Maps a scale factor name to the underlying beam quantity
"""
if(scale=='charge'):
return beam.q
elif(scale=='number'):
return beam['n_particle']
else:
raise ValueError('Could not get scale factor for plot.')
|
a5f8b2fe626f23dd9e73cb0b7fbf901ce227fd83
| 58,842
|
def boolean_mask(value):
"""Mask a value to boolean. This can be used for sensitive fields.
:param value: Any value.
:returns boolean: The input value casted to boolean.
"""
return bool(value)
|
3a1e143684d8e7bc47a4c81b185d1833c7d2b59b
| 58,844
|
import csv
def _load_key_value_csv(csv_path, key_type=str, value_type=str, ignore_header=False):
"""
Loads a CSV file that contains key, value pairs as rows into a dictionary.
:param csv_path: Path to the CSV file.
:type csv_path: str
:param key_type: Type of the keys (first column), to cast them when creating the dictionary. Optional, default: str
:type key_type: type
:param value_type: Type of the values (second column), to cast them when creating the dictionary. Optional,
default: str
:type value_type: type
:para, ignore_header: Set to true to ignore the first row of the CSV file, which is assumed to be a header
with metadata (usually the name of the columns). Optional, default: False
:raises ValueError: If the type conversion is not possible.
:raises OSError: If there is a problem reading the file.
:return: A dictionary that maps each element of the first column in the CSV to its corresponding pair in the
second column for the same row. The type of the keys and of the values is defined by the corresponding
parameters.
:rtype: dict
"""
with open(csv_path, newline='') as csv_file:
reader = csv.reader(csv_file)
if ignore_header:
# Skip first row.
next(reader)
csv_dict = {key_type(row[0]): value_type(row[1]) for row in reader}
return csv_dict
|
4efc93e8261c699a13f068babfe927fcee655c80
| 58,845
|
def is_non_neg_int(val):
"""Check if value is non negative integer"""
return isinstance(val, int) and val >= 0
|
9a01e9dd9704b1000906a8a1b9e2d33d8f031e53
| 58,847
|
def sort_by(items, order):
"""Sort the items into the given order. Return a new list.
All items which appear in order are sorted first, in that order,
followed by the remaining items, sorted in natural order.
"""
def sort_key(item):
try:
return [order.index(item), item]
except ValueError:
return [len(order), item]
return sorted(items, key=sort_key)
|
bae223f743fc5d8d20e68d44d6aa28dd740db4b4
| 58,850
|
def mse_loss(y_true, y_predicted):
"""returns the mean squares loss of y_true and y_predicted"""
return ((y_true - y_predicted) ** 2).mean()
|
5beda4a9263a53f0e596dc13ee7f2a49b14d1be4
| 58,851
|
import functools
import time
def timeit(runtime_attr: str):
"""
Decorate a function to compute its run time and update the \
:class:`IRLSState` instance attribute called ``runtime_attr`` with the run \
time of the function.
The first argument of ``fct`` should be an :class:`IRLSState` instance.
"""
def fct_wrap(fct):
@functools.wraps(fct)
def inner_fct(*args, **kwargs):
start = time.perf_counter()
out = fct(*args, **kwargs)
setattr(args[0], runtime_attr, time.perf_counter() - start)
return out
return inner_fct
return fct_wrap
|
c4ba2c1d015de7fe756c3cf83cddcd4c991271b7
| 58,852
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.