content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def get_list_string(data: list, delim: str = '\t', fmt: str = '{}') -> str:
"""Converts a 1D data array into a [a0, a1, a2,..., an] formatted string."""
result = "["
first = True
for i in data:
if not first:
result += delim + " "
else:
first = False
result += fmt.format(i)
result += "]"
return result
|
25e9bb0e0220776ff79e121bab3bddf99168602d
| 41,631
|
def _is_int(val):
"""Check if a value looks like an integet."""
try:
return int(val) == float(val)
except (ValueError, TypeError):
return False
|
d6ec5ca2717026b4c333711f477fa7fae558d26f
| 501,760
|
from typing import Container
from typing import List
import inspect
def get_unannotated_params(func, annotations: Container) -> List[str]:
""" Return a list of ``func`` argument names which are not type annotated.
``annotations`` should be a result of get_type_hints call for ``func``.
>>> from typing import get_type_hints
>>> def foo(x, y: str, *, z, w: int, **kwargs): pass
>>> annotations = get_type_hints(foo)
>>> get_unannotated_params(foo, annotations)
['x', 'z']
"""
ARGS_KWARGS = {
inspect.Parameter.VAR_POSITIONAL, # *args argument
inspect.Parameter.VAR_KEYWORD # **kwargs argument
}
res = []
for name, param in inspect.signature(func).parameters.items():
if name in annotations or param.kind in ARGS_KWARGS:
continue
res.append(name)
return res
|
f4e5164ec0b0666cb2f329e0ea9c482b8a2b6c60
| 432,763
|
from typing import List
def read_config_list(config_file: str) -> List[str]:
"""
Reads a file and returns a list with one item per line.
"""
return_list = []
with open(config_file) as config_list:
for line in config_list:
return_list.append(line.strip())
return return_list
|
554cd4e66292480761c794b74e0207f3ff032970
| 264,562
|
def get_shared_prefix(word1: str, word2: str) -> str:
"""
Get the substring in the beginning of word1 and word2 which both share.
Parameters
----------
word1 : str
word2 : str
Returns
-------
shared_prefix : str
Examples
--------
>>> get_shared_prefix("foo", "bar")
''
>>> get_shared_prefix("foobar", "bar")
''
>>> get_shared_prefix("foobar", "foo")
'foo'
>>> get_shared_prefix("steamship", "steampowered")
'steam'
"""
shared_prefix = ""
for char1, char2 in zip(word1, word2):
if char1 == char2:
shared_prefix += char1
else:
break
return shared_prefix
|
578ffed289bed22c439734fe0695a72f108b898e
| 557,737
|
def render_to_log_kwargs(wrapped_logger, method_name, event_dict):
"""
Render `event_dict` into keyword arguments for :func:`logging.log`.
The `event` field is translated into `msg` and the rest of the `event_dict`
is added as `extra`.
This allows you to defer formatting to :mod:`logging`.
.. versionadded:: 17.1.0
"""
return {"msg": event_dict.pop("event"), "extra": event_dict}
|
72849c01beb311405a2a85d179445196f2127a50
| 441,048
|
def fixup_cell_names(design):
"""
Scans Yosys' JSON data structure and replaces cell instance names that
contains dots in names with other character.
"""
# Process modules
modules = design["modules"]
for mod_name, mod_data in modules.items():
print(mod_name)
# Process cells
cells = mod_data["cells"]
for cell_name in list(cells.keys()):
# Fixup name
if "." in cell_name:
new_name = cell_name.replace(".", "_")
assert new_name not in cells, new_name
cells[new_name] = cells[cell_name]
del cells[cell_name]
return design
|
55978578226538860c3968f84b14560304ea23af
| 66,610
|
def find_pos(lst, element):
"""Find position of element in list"""
for i in range(len(lst)):
if lst[i] == element:
return i
return None
|
545e876a19ffb68f53c7e3495af3793bba38282c
| 540,151
|
def fexists(sftp, path):
"""os.path.exists for paramiko's SCP object
"""
try:
sftp.stat(path)
except IOError:
return False
else:
return True
|
3cff765bbc8cc3f5ed3a3165473961ebfc04ec94
| 28,038
|
def split_by_first_occurence(string, character):
"""
Split a function by first occurence of a character or a substring,
and then strip the results.
Like most song names can have artists name seperated with an '&'
'Artist1 & Artist2' For this string, and character = '&',the
function would return ['Artist1', 'Artist2']. This function
also strips bracket content, to provide better search results.
:param string: String in which to search
:param character: character or substring which is to be searched
:returns: A list of 2 strings, stripped of spaces at the beginning and end
"""
if character not in string:
return [string]
first_half = string[:string.find(character)]
first_half = first_half.strip()
last_half = string[string.find(character) + len(character):]
if '(' in last_half:
# part after the brackets is mostly not required for song name
last_half = last_half[:last_half.find('(')]
elif '[' in last_half:
last_half = last_half[:last_half.find('[')]
last_half = last_half.strip()
return [first_half, last_half]
|
b5aeaed6c5bbabacd19a0cc24bda8d590eff3f1b
| 273,686
|
import math
def poisson_prob(n, _lambda):
"""The probability of k occurrences of a random variable having poisson distribution with expected value mu
:param n: number of occurrences
:type n: int
:param _lambda: expected value
:type _lambda: int
:return: probability of k
:rtype: float
"""
return math.exp(-_lambda) * pow(_lambda, n) / math.factorial(n)
|
378e327a1d3524bccb900fc61585514ebc999d61
| 73,975
|
def reverse( sequence ):
"""Return the reverse of any sequence
"""
return sequence[::-1]
|
f08ae428844347e52d8dbf1cd8ad07cfbf4ef597
| 707,020
|
def get_attribute(s, ob):
"""
Break apart a string `s` and recursively fetch attributes from object `ob`.
"""
spart = s.partition('.')
f = ob
for part in spart:
if part == '.':
continue
f = f.__getattribute__(part)
return f
|
bdbd92e0ef1d70f4e031ee0f66292c0de93d8813
| 684,650
|
def make_portfolio(df, date_level='date'):
"""
Make a portfolio of assets by grouping by date and summing all columns.
Note: the caller is responsible for making sure the dates line up across
assets and handling when they don't.
"""
return (
df
.groupby(level=date_level)
.sum()
)
|
000b543d712c6074a2d447d6ab66071a85d0c1e4
| 624,902
|
def binarize_images(x):
"""
Convert images to range 0-1 and binarize them by making
0 the values below 0.1 and 1 the values above 0.1.
"""
x /= 255
x[x >= 0.1] = 1
x[x < 0.1] = 0
return x
|
5410d58c1111b730cd7b4d46bac28aa1dacf9ba6
| 428,121
|
def getlambda(pixel, lo, hi):
#-----------------------------------------------------------------------------
"""
Small utility to calculate lambda on a line for given position
in pixels
"""
#-----------------------------------------------------------------------------
if pixel is None:
return 0.5
delta = hi - lo
if delta == 0.0:
return 0.5
return (pixel-lo)/(delta)
|
6245ff3ec09db39d913175e58b9cec08888aa66e
| 29,957
|
def multifilter(filters, result):
""" Applies multiple filters to `result` .
Returns:
list:
result, reduced by each filter.
"""
if not filters:
return result
for f in filters:
result = filter(f, result)
return result
|
7cf2234e5e21650a06d8bdaf560af5ca8bfb90a5
| 477,592
|
def calc_integration_time(num_groups, frame_time, frames_per_group, num_skips):
"""Calculates the integration time.
Parameters
----------
num_groups : int
Groups per integration.]
frame_time : float
Frame time (in seconds)
frames_per_group : int
Frames per group -- always 1 except maybe brown dwarves
num_skips : int
Skips per integration -- always 0 except maybe brown dwarves
Returns
-------
integration_time : float
Integration time (in seconds)
"""
integration_time = (num_groups * (frames_per_group + num_skips) - num_skips) * frame_time
return integration_time
|
450bdc2f209eafcb0f6621a3eb5a6b5bff8eddc3
| 239,940
|
def reshape_axis(ax, axis_size_pix):
"""reshape axis to the specified size in pixels
this will reshape an axis so that the given axis is the specified size in pixels, which we use
to make sure that an axis is the same size as (or an integer multiple of) the array we're
trying to display. this is to prevent aliasing
NOTE: this can only shrink a big axis, not make a small one bigger, and will throw an exception
if you try to do that.
Arguments
---------
ax : `matpotlib.pyplot.axis`
the axis to reshape
axis_size_pix : `int`
the target size of the axis, in pixels
Returns
-------
ax : `matplotlib.pyplot.axis`
the reshaped axis
"""
if ax.bbox.width < axis_size_pix[1] or ax.bbox.height < axis_size_pix[0]:
raise Exception("Your axis is too small! Axis size: ({}, {}). Image size: ({}, {})".format(
ax.bbox.width, ax.bbox.height, axis_size_pix[1], axis_size_pix[0]))
bbox = ax.figure.get_window_extent().transformed(ax.figure.dpi_scale_trans.inverted())
fig_width, fig_height = bbox.width*ax.figure.dpi, bbox.height*ax.figure.dpi
rel_axis_width = axis_size_pix[1] / fig_width
rel_axis_height = axis_size_pix[0] / fig_height
ax.set_position([*ax.get_position().bounds[:2], rel_axis_width, rel_axis_height])
return ax
|
5a029753014ebb4af4683be3a1d50ae4130ccc32
| 35,885
|
def toa_rad_clr(swdn_toa_clr, swup_toa_clr, olr_clr):
"""Clear-sky TOA downward radiative flux."""
return swdn_toa_clr - swup_toa_clr - olr_clr
|
2e6f746cf21e32f7b0d6c94a3d43706d057194e9
| 352,532
|
def goal_must_be_positive(cls, value):
"""Validate that goal is a positive number."""
assert value >= 0, f'goal == {value}, must be >= 0'
return value
|
cef66f0faeabea1761bf9af5d001a1b4ee430576
| 515,890
|
def device_extents(devmem):
"""Find the extents (half open begin and end pointer) of the underlying
device memory allocation.
NOTE: it always returns the extents of the allocation but the extents
of the device memory view that can be a subsection of the entire allocation.
"""
return devmem.ptr, devmem.ptr + devmem.size
|
44c9f7ec6b0efccf5798123617de686f1ac9bd54
| 467,666
|
def decode(obj, encoding='utf-8'):
"""Decode the bytes of an object to an encoding"""
try:
return obj.decode(encoding)
except (AttributeError, UnicodeDecodeError):
return obj
|
7c5c5e908f11ee6682b3b81abdf588056298b371
| 180,864
|
def freq(mylist, item):
"""Return the relative frequency of an item of a list.
:param mylist: (list) list of elements
:param item: (any) an element of the list (or not!)
:returns: frequency (float) of item in mylist
"""
return float(mylist.count(item)) / float(len(mylist))
|
5d00eb08bd2225b0e2433876bb3d6202aa9b5d05
| 598,370
|
from typing import OrderedDict
def get_chunks_stats(chunks):
"""Iterates over chunks and collects info about main features, such
as:
- labels
- classes
- tracks
Args:
chunks (list): Tupls of chunks with video data
Returns:
OrderedDict: Statistics of chunks
"""
stats = OrderedDict()
classes = OrderedDict()
counted_tracks = []
labels = []
chunk_classes = []
for chunk in chunks:
if chunk['track'] not in counted_tracks:
stats['tracks_in_script_total'] = stats.setdefault('tracks_in_script_total', 0) + 1
counted_tracks.append(chunk['track'])
if chunk['label'] not in labels:
labels.append(chunk['label'])
if chunk['class'] not in chunk_classes:
chunk_classes.append(chunk['class'])
classes[chunk['class']] = classes.setdefault(chunk['class'], 0) + 1
stats['labels'] = labels
stats['classes'] = classes
stats['tracks_used'] = counted_tracks
return stats
|
1e6b69e22316108b0d1e33c84804598e4d5dca62
| 76,879
|
def sysmeta_prefix(resource):
"""
Returns the system metadata prefix for given resource type.
"""
if resource == 'object':
return 'x-object-sysmeta-swift3-'
else:
return 'x-container-sysmeta-swift3-'
|
912eba8c9c1875074a1c03a2a5e3fa4451025ac1
| 508,265
|
def pos2iso6709(lat: float, lon: float, alt: float, crs: str = "WGS_84") -> str:
"""
convert decimal degrees and alt to iso6709 format.
:param float lat: latitude
:param float lon: longitude
:param float alt: altitude
:param float crs: coordinate reference system (default = WGS_84)
:return: position in iso6709 format
:rtype: str
"""
if not (
isinstance(lat, (float, int))
and isinstance(lon, (float, int))
and isinstance(alt, (float, int))
):
return ""
lati = "-" if lat < 0 else "+"
loni = "-" if lon < 0 else "+"
alti = "-" if alt < 0 else "+"
iso6709 = (
lati
+ str(abs(lat))
+ loni
+ str(abs(lon))
+ alti
+ str(alt)
+ "CRS"
+ crs
+ "/"
)
return iso6709
|
e1dbf684402504f93f44f7607d755bb17c6c5546
| 625,690
|
def split_sents(notes, nlp):
"""
Split the text in pd.Series into sentences.
Parameters
----------
notes: pd.Series
series with text
nlp: spacy language model
Returns
-------
notes: pd.DataFrame
df with the sentences; a column with the original note index is added
"""
print(f'Splitting the text in "{notes.name}" to sentences. This might take a while.', flush=True)
to_sentence = lambda txt: [str(sent) for sent in nlp(txt).sents]
sents = notes.apply(to_sentence).explode().rename('text').reset_index().rename(columns={'index': 'note_index'})
print(f'Done! Number of sentences: {sents.shape[0]}')
return sents
|
0aae3af46a2a0c29fff9c3bb5725b0ddcb8ed796
| 17,354
|
import math
def vecteur_norme(vec):
"""
Retourne la norme d'un vecteur.
"""
return math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
|
f2b347a9214bfa410695acc282a769acd6c6201e
| 248,208
|
import json
def _jsonify_exception(excep):
"""Given an exception object return json string {"error": str(e)} """
return json.dumps({"error": str(excep)})
|
ee999ca23324134cdd849a0d1a50f44288f3faa2
| 564,745
|
def faceAreaE3(face):
"""Computes the area of a triangular DCEL face with vertex coordinates given
as PointE3 objects.
Args:
face: A triangular face of a DCEL with vertex .data given by PointE3 objects.
Returns:
The area of the triangular face.
"""
p0, p1, p2 = [v.data for v in face.vertices()]
v0 = p1 - p0
v1 = p2 - p0
return v0.cross(v1).norm() * 0.5
|
412031297213a702f0579f8c86c23c93baaff8c8
| 696,250
|
from typing import OrderedDict
def group_events_by_year(events):
"""
Given a queryset of event objects,
returns a sorted dictionary mapping years to event objects.
"""
years = OrderedDict()
for event in events:
year = event.start_date.year
if year not in years:
years[year] = [event]
else:
years[year].append(event)
return years
|
4dc3bf0361f09bac13685c4bb0a5c5967484143c
| 304,474
|
def i18n_representer(dumper, data):
"""oslo_i18n yaml representer
Returns a translated to the default locale string for yaml.safe_dump
:param dumper: a SafeDumper instance passed by yaml.safe_dump
:param data: a oslo_i18n._message.Message instance
"""
serializedData = str(data.translation())
return dumper.represent_str(serializedData)
|
f94e74e7310f5a5c5d617eabdcb2b512072360b7
| 306,179
|
from typing import Iterable
def all_equal(iterable: Iterable):
"""
Utility function for checking if all the elements in an iterable are equal
"""
if isinstance(iterable, list):
return not iterable or iterable.count(iterable[0]) == len(iterable)
elif isinstance(iterable, tuple):
return iterable[1:] == iterable[:-1]
else:
# This is a generic solution for all iterables, the type checks above are
# optimized for the respective types.
it = iter(iterable)
try:
first = next(it)
except StopIteration:
return True
return all(first == rest for rest in it)
|
59bcc2825ab74c347ec2bb61f101601155f7df01
| 140,918
|
def to_month_only_dob(date):
"""
Convert dates of birth into month and year of birth starting from 1st of each month for anonymity
return blank if not in the right format
"""
try:
date = date.strftime("%Y-%m-01")
except ValueError:
date = ""
except TypeError:
date = ""
except AttributeError:
date = ""
return date
|
d6e38593cb5f636c5d93e7cf5901056196d712c2
| 60,753
|
def is_set_policy(policy):
""" Checks if the current policy action is a set action. """
return 'setIamPolicy' in policy['action'] and not 'metadata' in policy
|
33bf358ec78a1ddf023b2fb890f4b067b6263273
| 57,216
|
import string
from typing import Optional
from typing import List
def normalize_name(character_name: str, allowed_double_names: Optional[List[str]] = None) -> str:
"""
Ensures consistency for a character's name. For some scripts, "Jaime Lannister" is listed as "Jaime", "JAIME",
etc.
Parameters
----------
character_name : string
The original name of the character as it appears in the script.
allowed_double_names : list of strings, optional
Some characters are allowed to have two names (e.g., "The Hound"). This parameter specifies those names. If
``None``, then uses the default values (see source code).
Returns
-------
character_name : string
The normalized name of the character.
"""
# First be consistent and capitalize the first letter in all names of a character.
# Use `capwords` rather than `title` because `title` capitalizes letters after
# apostrophes.
character_name = string.capwords(character_name)
# There are only a handful of names we (by default) allow to be multiple words.
if allowed_double_names is None:
allowed_double_names = [
"The Hound", "Khal Drogo", "Maester Luwin", "Septa Mordane",
"Waymar", "Grand Maester Pycelle", "Maester Pycelle", "Street Urchin",
"King's Landing Baker", "Hot Pie", "Ser Alliser",
"Maryn Trant", "King Joffrey", "King's Landing Page",
"Wine Merchant", "Stable Boy", "Old Nan", "Little Bird",
"The Group", "The Others At The Table", "Gold Cloak", "Crowd",
"Black Lorren", "The Mountain", "Pyatt Pree", "Eddison Tollett",
"Kraznys Mo Nakloz", "Grey Worm", "Ser Dontos", "Dying Man", "Old Man",
"Blone Prostitute", "Black Haired Prostitute", "Sand Snakes", "High Sparrow",
"Slave Owner", "Night's Watchman", "Khal Moro", "Young Rodrik", "Young Ned",
"Three-Eyed Raven", "Young Lyanna", "Young Hodor", "Lady Walda", "Lady Crane",
"Maester Aemon", "Ser Vardis", "Maester Walkan", "Maester Pycelle",
"High Septon", "Black Walder"
]
# Populate a bunch of <House> <scout/warrior/guards>.
houses = [
"Lannister", "Stark", "Tyrell", "Baratheon", "Kings", "Nights Watch",
"Kings Landing", "Wounded", "Frey"
]
NPC_classes = ["Soldier", "soldier", "Scout", "Warrior", "Guards", "Bannerman", "Bannermen",
"Guard", "Boy"]
random_NPCs = []
for house in houses:
for NPC_class in NPC_classes:
random_NPCs.append(f"{house} {NPC_class}")
allowed_double_names = allowed_double_names + random_NPCs
# Now if a character's name is not allowed to be double, we will split it into two and
# take the first name.
if character_name not in allowed_double_names:
character_name = character_name.split()[0]
# We also map some names explicitly to others...
name_map = {
"Three-eyed": "Three-Eyed Raven",
"Three-Eyed": "Three-Eyed Raven",
"Three": "Three-Eyed Raven",
"Eddard": "Ned",
"Samwell": "Sam",
"Maester Aemon": "Aemon",
"Royce": "Waymar",
"Sandor": "The Hound",
"Hound": "The Hound",
"Luwin": "Maester Luwin",
"Drogo": "Khal Drogo",
"Grand Maester Pycelle": "Pycelle",
"Maester Pycelle": "Pycelle",
"King Joffrey": "Joffrey",
"Samwell": "Sam",
"Ser Alliser": "Alliser",
"Baelish": "Littlefinger",
"Petyr": "Littlefinger",
"Mountain": "The Mountain",
"Gregor": "The Mountain",
"Sparrow": "High Sparrow",
"Blackfish": "Brynden",
"Twyin": "Tywin", # Spelling lul.
"Rodrick": "Rodrik", # Spelling.
"Oberyon": "Oberyn",
}
if character_name in name_map:
character_name = name_map[character_name]
return character_name
|
c8dc419d2cc58c8286835972928ec4394262a0b4
| 601,445
|
import json
def try_to_replace_line_json(line, json_type, new_json, json_prefix=""):
"""Attempts to replace a JSON declaration if it's on the line.
Parameters
----------
line: str
A line from a JavaScript code file. It's assumed that, if it declares
a JSON, this declaration will only take up one line (i.e. it
will be of the form
"[whitespace?]var [JSON prefix?][JSON name] = {JSON contents};").
If a replacement is made, everything on and after the { in this line
will be replaced with the contents of the new JSON, followed by
";\n".
json_type: str
One of "rank", "sample", or "count". Other values will result in a
ValueError being thrown.
new_json: dict
A JSON to try replacing the current variable declaration (if present)
on the input line with.
json_prefix: str (default value: "")
An optional prefix that will be appended to any JSON names we try to
replace. If this is anything but "", this *won't replace normal JSON
lines* (e.g. "var rankPlotJSON = {") -- instead, this will only
replace lines with the given prefix (e.g. if the prefix is "SST",
then only JSON lines of the format "var SSTrankPlotJSON = {" will be
replaced.
Returns
-------
(line, replacement_made): str, bool
If no replacement was made, replacement_made will be False and line
will just equal to the input line.
If a replacement was made, replacement_made will be True and line
will be equal to the new line with the JSON replaced.
"""
prefixToReplace = ""
if json_type == "rank":
prefixToReplace = "var {}rankPlotJSON = {{"
elif json_type == "sample":
prefixToReplace = "var {}samplePlotJSON = {{"
elif json_type == "count":
prefixToReplace = "var {}countJSON = {{"
else:
raise ValueError(
"Invalid json_type argument. Must be 'rank', "
"'sample', or 'count'."
)
prefixToReplace = prefixToReplace.format(json_prefix)
if line.lstrip().startswith(prefixToReplace):
return (
(
line[: line.index("{")]
+ json.dumps(new_json, sort_keys=True)
+ ";\n"
),
True,
)
return line, False
|
602897349b52be3f10a41cf90d211ad70a6d4cc2
| 704,988
|
def if_statement(printer, ast):
"""Prints "if (cond) {thenBody} [else {elseBody}]"."""
cond_str = printer.ast_to_string(ast["cond"])
then_body_str = printer.ast_to_string(ast["thenBody"])
else_body_str = f'else {printer.ast_to_string(ast["elseBody"])}' if ast.get("elseBody") else ''
return f'if ({cond_str}) {then_body_str} {else_body_str}'
|
a50dc848cb72b873afce68102f9e3d8e6640e1df
| 416,987
|
def dominate(s1, s2):
""" Returns true if S1 dominates S2. S1 dominates S2 if S1 is better
(it our case lower) in the two fitness functions """
score1 = s1.get_fitness_score()
score2 = s2.get_fitness_score()
return False if (score1[0] > score2[0] or score1[1] > score2[1] or
(score1[0] == score2[0] and score1[1] == score2[1])) else True
|
ab3a7df8a55fe451ffa5ecda85935aeada70a829
| 648,285
|
def color_to_str(C):
"""Convert color value to hex string."""
# Scale from [0-1] to [0-255]
V = [int(round(c * 255)) for c in C]
# Clamp values to [0-255]
for i in range(len(V)):
V[i] = max(min(V[i], 255), 0)
return '#{:02X}{:02X}{:02X}'.format(V[0], V[1], V[2])
|
6b93f2484fabb237f1d9cf2c505c986043648a95
| 645,947
|
from typing import Dict
from typing import Tuple
from typing import List
def schedule_ordering(schedules_by_cohort: Dict) -> Tuple[List, Dict]:
"""Assigns a canonical ordering to unique schedules across cohorts.
Multiple cohorts may share the same schedule. In order to minimize the size
of the assignment problem, we assign an ID to each unique schedule.
Args:
schedules_by_cohort: A dictionary with cohort names as the keys and
lists of schedules as the values.
Returns:
A tuple containing:
* A list of unique schedules, indexed by schedule IDs.
* A modified version of `schedules_by_cohort`. Each schedule
is replaced with a wrapper dictionary with an `id` field
(the unique ID of the schedule) and a `blocks` field
(the original schedule data).
"""
schedule_ids: Dict[Tuple, int] = {}
schedules_by_id = []
schedules_by_cohort_with_id = {}
sched_id = 0
for cohort, schedules in schedules_by_cohort.items():
with_ids = []
for schedule in schedules:
uniq = []
for block in schedule:
block_hash = (block['start'], block['end'], block['site'])
uniq += list(block_hash)
uniq_hash = tuple(uniq)
if uniq_hash in schedule_ids:
with_ids.append({
'id': schedule_ids[uniq_hash],
'blocks': schedule
})
else:
schedule_ids[uniq_hash] = sched_id
schedules_by_id.append(schedule)
with_ids.append({'id': sched_id, 'blocks': schedule})
sched_id += 1
schedules_by_cohort_with_id[cohort] = with_ids
return schedules_by_id, schedules_by_cohort_with_id
|
57187a45b55afb1fadc9a552e902f7becd76e20f
| 428,044
|
def quantify(iterable, pred=bool):
"""Count how many times the predicate is true."""
return sum(map(pred, iterable))
|
79b4763b06304160e107cf09e210221be8e873d4
| 543,176
|
def partial_perms_ok(ctx):
"""
Returns whether the user can use command on self without full permissions.
True if user doesn't tag anyone or tags themselves only.
"""
mentions = ctx.message.mentions
return (len(mentions) == 1 and mentions[0] == ctx.author
or not bool(mentions))
|
a7a407167c126b285c44fde70bb0e747a9d594be
| 558,037
|
import re
def _UnixName(name):
"""Returns the unix_style name for a given lowerCamelCase string.
"""
# First replace any lowerUpper patterns with lower_Upper.
s1 = re.sub('([a-z])([A-Z])', r'\1_\2', name)
# Now replace any ACMEWidgets patterns with ACME_Widgets
s2 = re.sub('([A-Z]+)([A-Z][a-z])', r'\1_\2', s1)
# Finally, replace any remaining periods, and make lowercase.
return s2.replace('.', '_').lower()
|
4f844877cdfa8c8b15534322bae5c362c71ac925
| 514,885
|
def regroupRDDs(rdd, numGroup=10):
""" regroup an rdd using a new key added that is 0-numGtoup-1
:param rdd: input rdd as a (k,v) pairs
:param numGroup: number of groups to concatenate to
:return: a new rdd in the form of (groupNum, list of (k, v) in that group) pairs
"""
rdd = rdd.map(lambda kv: (kv[0] % numGroup, (kv[0], kv[1])), preservesPartitioning=True)
return rdd.groupByKey().mapValues(list)
|
6f9692b2374adf881ca81b993856f4f33ddc16b5
| 627,367
|
def to_celsius(fahrenheit):
"""
Accepts degrees Fahrenheit (fahrenheit argument)
Returns degrees Celsius
"""
celsius = (fahrenheit - 32) * 5/9
return celsius
|
ddb0b75550d623802bcbde70be39ed53d7c3d0c6
| 681,511
|
import re
def _re_compile(regex):
"""Compile a string to regex, I and UNICODE."""
return re.compile(regex, re.I | re.UNICODE)
|
ae312d1f3519171161ce394770c5d7115d8ac348
| 683,422
|
import shutil
import click
def _copy(file, to_dir, message) -> str:
""" Copies a file to the given directory.
If the file exists, it is overwritten.
Returns:
The absolute path of the newly copied file.
"""
newly_created_path = shutil.copy(src=file, dst=to_dir)
click.echo(f'{message} {newly_created_path}')
return newly_created_path
|
005bfc520e1dc4f549d80aa6cb97302a1fa5e32a
| 423,342
|
import re
def BCA_formula_from_str(BCA_str):
"""
Get chemical formula string from BCA string
Args:
BCA_str: BCA ratio string (e.g. 'B3C1A1')
"""
if len(BCA_str)==6 and BCA_str[:3]=='BCA':
# format: BCAxyz. suitable for single-digit integer x,y,z
funits = BCA_str[-3:]
else:
# format: BxCyAz. suitable for multi-digit or non-integer x,y,z
funits = re.split('[BCA]',BCA_str)
funits = [u for u in funits if len(u) > 0]
funits
components = ['BaO','CaO','Al2O3']
formula = ''.join([f'({c}){n}' for c,n in zip(components, funits)])
return formula
|
36375e62d70995628e253ba68ba8b777eb88d728
| 2,570
|
def sum_square_difference(n: int) -> int:
"""
Returns the "sum square difference" of the first n natural numbers.
"""
square_of_sum = (n*(n+1) // 2)**2
sum_of_squares = n*(n+1)*(2*n+1) // 6
return square_of_sum - sum_of_squares
|
a2a48f442dc6334809d4043f6c839631ccf0b65f
| 536,330
|
import requests
import json
def _query_ID_converter(ext_id):
"""
Converts ePMC ext_id into PMID , API description here - https://www.ncbi.nlm.nih.gov/pmc/tools/id-converter-api/
Parameters
----------
ext_id : String
ePMC identifier used to retrieve the relevant entry. Format is prefix of 'PMC'
followed by an integer.
Returns
-------
response_json : dict
json returned by the API containing the relevant information. Can be passed to
:func:`~pyre.convert_PMCID_to_PMID`
See Also
--------
* :func:`~pyre.convert_PMCID_to_PMID`
"""
service_root_url = "https://www.ncbi.nlm.nih.gov/pmc/utils/idconv/v1.0/?ids="
request_url = service_root_url + ext_id
fmt = "json"
request_url = request_url + r"&format=" + fmt
tool = "pyresid"
request_url = request_url + r"&tool=" + tool
email = "robert.firth@stfc.ac.uk"
request_url = request_url + r"&email=" + email
r = requests.get(request_url)
response_json = json.loads(r.text)
return response_json
|
c7e19dbe162cdcb22d51e12013f3645f8baf67d1
| 692,138
|
from typing import Optional
import base64
import binascii
import logging
def slug_to_int(slug: str) -> Optional[int]:
"""
Convert the permalink slug back to the integer id.
Returns ``None`` if slug is not well-formed.
"""
byt = slug.encode("utf-8")
try:
int_bytes = base64.urlsafe_b64decode(byt)
return int(int_bytes)
except (binascii.Error, ValueError):
logging.getLogger(__name__).error("Unable to interpret slug: %s", slug)
return None
|
011239a2cbc9c53ce8dc9244668e614a1a79657f
| 132,735
|
def ensure_exists(path):
"""
Make sure the given path exists
:param path: The path that should be created if not exists
:type path: pathlib.PosixPath
:return: The existing path
:rtype: pathlib.PosixPath
"""
path.mkdir(parents=True, exist_ok=True)
return path
|
eb8abdfd9ea45ce6c472e19cbf8871cc9bf5c525
| 531,377
|
def osabi_to_string(elf_header):
"""Convert the EI_OSABI field to a string."""
osabi = elf_header['EI_OSABI']
if osabi == b'\x00':
return 'System V'
elif osabi == b'\x01':
return 'HP-UX'
elif osabi == b'\x02':
return 'NetBSD'
elif osabi == b'\x03':
return 'Linux'
elif osabi == b'\x04':
return 'GNU Herd'
elif osabi == b'\x06':
return 'Solaris'
elif osabi == b'\x07':
return 'AIX'
elif osabi == b'\x08':
return 'IRIX'
elif osabi == b'\x09':
return 'FreeBSD'
elif osabi == b'\x0A':
return 'Tru64'
elif osabi == b'\x0B':
return 'Novell Modesto'
elif osabi == b'\x0C':
return 'OpenBSD'
elif osabi == b'\x0D':
return 'OpenVMS'
elif osabi == b'\x0E':
return 'NonStop Kernel'
elif osabi == b'\x0F':
return 'AROS'
elif osabi == b'\x10':
return 'Fenix OS'
elif osabi == b'\x11':
return 'CloudABI'
else:
raise ValueError
|
e26878ce8b08ca64325c8ccad757fead2b009979
| 176,609
|
def alias(language='en', value=''):
"""Create and return an alias (dict)"""
a = {}
if len(value) == 0:
a[language] = [{'language': language, 'value': ''}]
else:
a[language] = [{'language': language, 'value': val} for val in value]
return a
|
588899670ea80f992df101cde9686826c5af4ff8
| 276,209
|
def get_permutations(word):
"""
Gets the permutations of word recursively
:param word: the word to get permutations of
:return: a set of all posibble permutations of the word
:rtype: set
"""
# base case
if len(word) <= 1:
return {word}
all_chars_except_last = word[:-1]
last_char = word[-1]
# recursive call: get all possible permutations except last
permutations_of_all_chars_except_last = get_permutations(all_chars_except_last)
# get the last character in all possible positions in each of the above permutations
permutations = set()
for permutation_of_all_chars_except_last in permutations_of_all_chars_except_last:
for position in range(len(all_chars_except_last) + 1):
permutation = permutation_of_all_chars_except_last[
:position] + last_char + permutation_of_all_chars_except_last[position:]
permutations.add(permutation)
return permutations
|
e85ec60c5125321e5ec3142af93ff2806636fdb0
| 453,769
|
def stag_pressure_ratio(M, gamma):
"""Stagnation pressure / static pressure ratio.
Arguments:
M (scalar): Mach number [units: dimensionless].
gamma (scalar): Heat capacity ratio [units: dimensionless].
Returns:
scalar: the stagnation pressure ratio :math:`p_0 / p` [units: dimensionless].
"""
return (1 + (gamma - 1) / 2 * M**2)**(gamma / (gamma - 1))
|
351b14716077386eadea04a4717ea9afec8fdcaf
| 696,481
|
def _listrepr(x):
""" Represent a list in a short fashion, for easy representation. """
try:
len(x)
except TypeError:
return None
else:
return '<%d element%s>' % (len(x), 's'[:len(x) >= 2])
|
b05baac1c9969153b14d46276f3ac7b897449bc5
| 339,418
|
def decode_extra_distance(bits, dist):
"""Decode extra bits for a match distance symbol."""
assert dist <= 29
if dist >= 4:
extra = (dist - 2) / 2
if extra:
ebits = bits.read(extra)
dist = 2**(extra+1) + ((dist % 2) * (2**extra)) + ebits
dist += 1
return dist
|
3fa6c7612a7f6d53bac737a8db56834fb953b261
| 372,642
|
def decodeXMLName(name):
"""
Decodes an XML (namespace, name) pair from an ASCII string as
encoded by encodeXMLName().
"""
def invalid():
raise ValueError("Invalid encoded name: %r" % (name,))
if not name:
invalid()
if name[0] is "{":
index = name.find("}")
if (index is -1 or not len(name) > index):
invalid()
namespace = name[1:index].decode("utf-8")
localname = name[index + 1:].decode("utf-8")
if not namespace:
namespace = None
if not localname:
invalid()
else:
namespace = None
localname = name.decode("utf-8")
if "{" in localname or "}" in localname:
invalid()
return (namespace, localname)
|
39f0f8977b2c897b80b4c022d4a474a16fa1832a
| 616,067
|
def filter_traceback(tb_list):
"""Given a traceback as a list of strings, looks for common boilerplate and removes it."""
# List of boiler plate pattern to look for, with example before each
# The match pattern is just some string that the line needs to contain
"""
File "asynq/async_task.py", line 169, in asynq.async_task.AsyncTask._continue
File "asynq/async_task.py", line 237, in asynq.async_task.AsyncTask._continue_on_generator
File "asynq/async_task.py", line 209, in asynq.async_task.AsyncTask._continue_on_generator
"""
TASK_CONTINUE = (
[
"asynq.async_task.AsyncTask._continue",
"asynq.async_task.AsyncTask._continue_on_generator",
"asynq.async_task.AsyncTask._continue_on_generator",
],
"___asynq_continue___",
)
"""
File "asynq/decorators.py", line 161, in asynq.decorators.AsyncDecorator.__call__
File "asynq/futures.py", line 54, in asynq.futures.FutureBase.value
File "asynq/futures.py", line 63, in asynq.futures.FutureBase.value
File "asynq/futures.py", line 153, in asynq.futures.FutureBase.raise_if_error
File "<...>/python3.6/site-packages/qcore/errors.py", line 93, in reraise
six.reraise(type(error), error, error._traceback)
File "<...>/python3.6/site-packages/six.py", line 693, in reraise
raise value
"""
FUTURE_BASE = (
[
"asynq.decorators.AsyncDecorator.__call__",
"asynq.futures.FutureBase.value",
"asynq.futures.FutureBase.value",
"asynq.futures.FutureBase.raise_if_error",
"reraise",
"six.reraise",
"reraise",
"value",
],
"___asynq_future_raise_if_error___",
)
"""
File "asynq/decorators.py", line 153, in asynq.decorators.AsyncDecorator.asynq
File "asynq/decorators.py", line 203, in asynq.decorators.AsyncProxyDecorator._call_pure
File "asynq/decorators.py", line 203, in asynq.decorators.AsyncProxyDecorator._call_pure
File "asynq/decorators.py", line 204, in asynq.decorators.AsyncProxyDecorator._call_pure
File "asynq/decorators.py", line 275, in asynq.decorators.async_call
"""
CALL_PURE = (
[
"asynq.decorators.AsyncDecorator.asynq",
"asynq.decorators.AsyncProxyDecorator._call_pure",
"asynq.decorators.AsyncProxyDecorator._call_pure",
"asynq.decorators.AsyncProxyDecorator._call_pure",
"asynq.decorators.async_call",
],
"___asynq_call_pure___",
)
REPLACEMENTS = [TASK_CONTINUE, FUTURE_BASE, CALL_PURE]
# iterate through the lines of the traceback and replace multiline
# segments that match one of the replacements
output = []
i = 0
while i < len(tb_list):
did_replacement = False
# for each replacement, try checking if all lines match
# if so, replace with the given replacement
for (text_to_match, replacement) in REPLACEMENTS:
matches = True
j = 0
while j < len(text_to_match) and (i + j) < len(tb_list):
if text_to_match[j] not in tb_list[i + j]:
matches = False
break
j += 1
if matches and j == len(text_to_match):
# formatted to match default indentation level.
output.append(" " + replacement + "\n")
i = i + j
did_replacement = True
break
if not did_replacement:
output.append(tb_list[i])
i += 1
return output
|
ce883c3d2c9118125b41701b1c981bfa8583011b
| 658,304
|
def ChapmanKolmogorovTest(MSM_object, nsets,memberships=None, error_estimation=False, mlags=2):
""" Perform the ChapmanKolmogorov test to validate the MSM"""
return MSM_object.cktest(nsets,memberships=memberships,err_est=error_estimation, mlags=mlags)
|
23c0560c16f78ce0562f82ed642dea78f6c14289
| 76,813
|
def array_is_in_list(array, l):
"""Checks if an np.array 'array' is in the list 'l'."""
for item in l:
if (array == item).all():
return True
return False
|
c74c80f34cfda528f98b4b7558710906e71f62a5
| 299,380
|
def concatenated(lst, element):
"""
concatenates `element` to `lst` and
returns lst
"""
lst.append(element)
return lst
|
66b68bc4c043c8cd223f8f39766c01ec6f39ddc4
| 692,233
|
import random
def create_coordinates(n, coordsmin, coordsmax):
""" Returns a list of n distinct numbers in the range coordsmin <= c <= coordsmax. min/max should be integers."""
return random.sample(range(coordsmin, coordsmax + 1), n)
|
8a37ebb1fc239fa09e2e18e15a06fb6e20d9078e
| 400,977
|
import importlib
def conditional_jit(function=None, **kwargs): # noqa: D202
"""Use numba's jit decorator if numba is installed.
Notes
-----
If called without arguments then return wrapped function.
@conditional_jit
def my_func():
return
else called with arguments
@conditional_jit(nopython=True)
def my_func():
return
"""
def wrapper(function):
try:
numba = importlib.import_module("numba")
return numba.jit(**kwargs)(function)
except ImportError:
return function
if function:
return wrapper(function)
else:
return wrapper
|
b197fbf68d5ba42b932fa6e0190d235949fcd36c
| 366,223
|
def ConstructNameFilterExpression(requested_name_regexes):
"""Construct a name filter expression.
Args:
requested_name_regexes: A list of name regular expressions that can
be used to filter the resources by name on the server side.
Returns:
A string expression suitable for the requested names, or None if
requested_name_regexes is None.
"""
if requested_name_regexes:
if len(requested_name_regexes) == 1:
return 'name eq {0}'.format(requested_name_regexes[0])
else:
regexes = []
for regex in requested_name_regexes:
regexes.append('({0})'.format(regex))
return 'name eq {0}'.format('|'.join(regexes))
else:
return None
|
dc9fc587abab52e400866b6675102a48533431a1
| 668,090
|
def construct_absolute_url(url):
"""Turn a relative URL into an absolute URL"""
return "https://www.consumerfinance.gov" + url
|
7bffc3906a6106c24bd93a260ad71d73321ad7ff
| 481,247
|
def get_device(module):
"""return device of module."""
return next(module.parameters()).device
|
f4bf30cffd4a6dc318cb40516b3b411fe2f7c517
| 167,864
|
def get_vars(triple):
"""Get variables in a triple pattern"""
return set([v for k, v in triple.items() if v.startswith('?')])
|
c47c74d30fdfa655a5bc8404119fa183cf9713c8
| 393,021
|
def get_loc_from_dict(dct, loc):
"""
Take a string loc and return a sub-dict corresponding to that loc.
i.e. "foo.bar" would return dict['foo']['bar']
empty string returns top-level dict.
"""
if loc == "":
return dict
else:
locs = loc.split(".")
d = dct
for ll in locs:
try:
d = d[ll]
except KeyError:
raise KeyError(f"loc {loc} does not exist in dict {dct}")
return d
|
5762ef16c289dcc6770236e4a864799bdb7749f9
| 349,133
|
import math
def dp(
emission1: float,
gndaz1: float,
emission2: float,
gndaz2: float,
radar=False,
):
"""Returns the Parallax/Height Ratio (dp) as detailed in
Becker et al.(2015).
The input angles are assumed to be in radians. If *radar* is true,
then cot() is substituted for tan() in the calculations.
Physically, dp represents the amount of parallax difference
that would be measured between an object in the two images, for
unit height.
"""
def emission_trig(emi: float):
e = math.tan(emi)
if radar:
# Cotangent is 1 / tan()
return 1 / e
else:
return e
def px(emi: float, scazgnd: float):
return -1 * emission_trig(emi) * math.cos(scazgnd)
def py(emi: float, scazgnd: float):
return emission_trig(emi) * math.sin(scazgnd)
px1 = px(emission1, gndaz1)
px2 = px(emission2, gndaz2)
py1 = py(emission1, gndaz1)
py2 = py(emission2, gndaz2)
return math.sqrt(math.pow(px1 - px2, 2) + math.pow(py1 - py2, 2))
|
7bebf9843cde714de72ea2345c66acf7beb0e5b2
| 497,073
|
from typing import List
def generate_conclusion(definition: str) -> List[str]:
"""Generate header file conclusion lines"""
return [
'\n',
'#ifdef __cplusplus\n',
'}\n',
'#endif\n',
'\n',
f'#endif // {definition}\n'
]
|
e94714bdc9c8efe47e8d97b9029b9e8bed92bab7
| 588,571
|
def _tosigfigs(x, num_figs):
"""Rounds x to the specified number of significant figures."""
num_figs_str = '%d' % num_figs
return float(('%.' + num_figs_str + 'g') % x)
|
61506a42fc025ba1af5a5686420afc4f90c7cd81
| 438,123
|
def to_dict(_str, pair_sep=',', kv_sep='='):
"""
Convert string to dict
:param _str: str
Example:
a=5,b=6,c=abc
:param pair_sep: separator for each pair
:param kv_sep: separator for key and value
:return: dict
"""
if not isinstance(_str, str):
raise TypeError
return {item[0].strip(): item[1].strip() for item in
[_item.strip().split(kv_sep, 1) for _item in _str.split(pair_sep) if kv_sep in _item]}
|
05575396886d3780164161606912b40e1e914e56
| 280,687
|
import csv
def file_to_dicts_lists(file_path):
"""Function make list of dictionaries with quotes from given file
Args:
file_path (str): Path to file with qoutes
Returns:
list: List with dictionaries with qoutes
"""
with open(file_path, "r") as f:
dicts_list = []
reader = csv.DictReader(f)
for line in reader:
dicts_list.append(line)
return dicts_list
|
a74622ec09b2cdf1a1c463c77f540507df25fb84
| 232,557
|
def progessbar(new, tot):
"""Builds progressbar
Args:
new: current progress
tot: total length of the download
Returns:
progressbar as a string of length 20
"""
length = 20
progress = int(round(length * new / float(tot)))
percent = round(new/float(tot) * 100.0, 1)
bar = '=' * progress + '-' * (length - progress)
return '[%s] %s %s\r' % (bar, percent, '%')
|
119cec7121167c5bcb23571cab4f33e4a816e521
| 675,881
|
def clean_string(input_str: str) -> str:
"""Format a string by removing invisible caracters and trailing spaces
Args:
input_str (str): input string
Returns:
str: trimed and cleaned string
"""
return ''.join(c for c in input_str if c.isprintable()).strip(" ")
|
c62911743ec5e711fe3fa7cc78e565e8443bdf14
| 221,333
|
def bbox_from_planet_feature(planet_feature):
"""Extract the tile footprint from a planet feature
Args:
planet_feature (dict): planet feature to extract bbox from
Returns:
list: coords of the feature's bbox
"""
# it's a geojson polygon, so drill in
coords = planet_feature['geometry']['coordinates'][0]
xs = [p[0] for p in coords]
ys = [p[1] for p in coords]
min_x = min(xs)
max_x = max(xs)
min_y = min(ys)
max_y = max(ys)
return [[[[min_x, min_y],
[max_x, min_y],
[max_x, max_y],
[min_x, max_y],
[min_x, min_y]]]]
|
8ceb650720d6b0fa62c45bd6e1ada74c2df5e0dd
| 575,732
|
import math
import random
def random_dna(length, a=0.25, c=0.25, g=0.25, t=0.25):
"""
Generates random nucleotide sequence.
Parameters
----------
+ length `int` nucleotide sequence
+ a= `float` probability of A
+ c= `float` probability of C
+ g= `float` probability of G
+ t= `float` probability of T
"""
assert(math.isclose(a+c+g+t, 1.0))
seq = ''
for i in range(length):
r = random.random()
if r < a: seq += 'A'
elif r < a+c: seq += 'C'
elif r < a+c+g: seq += 'G'
else: seq += 'T'
return seq
|
53951403723e0235289e2c8f4fcd872cc2fe0f9a
| 420,345
|
def dec2bin(d, nb=8):
"""
Convert dec to binary
"""
if d == 0:
return "0".zfill(nb)
if d < 0:
d += 1 << nb
b = ""
while d != 0:
d, r = divmod(d, 2)
b = "01"[r] + b
return b.zfill(nb)
|
b3cd5b09dec612256d158e770e3c666ae037cbd6
| 261,772
|
def avg_word_len(input):
"""Find the average sentence length."""
lens = [len(x) for x in input.split()]
return sum(lens) / len(lens)
|
1760eb51e5e7df9f8aebb65f29eee961af10286d
| 355,615
|
def dec2hex(number):
"""return a decimal number into its hexadecimal form"""
return hex(number)[2:]
|
89caaaebe3b6964aa74e886336d1a3ada5fcbfc0
| 384,992
|
def recursiveIndex(nestedList, query):
"""
Find index of element (first occurrence) in an arbitrarily nested list.
Args:
nestedList(list): list object to search in
query: target element to find
Returns:
list: Position indices
"""
for index, element in enumerate(nestedList):
if isinstance(element, (list, tuple)):
path = recursiveIndex(element, query)
if path:
return [index] + path
if element == query:
return [index]
return []
|
6386feee441e6c687f1b0b68e8e319ca79653041
| 109,796
|
def get_color_points(surface, color, bounds_rect=None, match_color=True):
"""Get all the points of a given color on the surface within the given
bounds.
If bounds_rect is None the full surface is checked.
If match_color is True, all points matching the color are returned,
otherwise all points not matching the color are returned.
"""
get_at = surface.get_at # For possible speed up.
if bounds_rect is None:
x_range = range(surface.get_width())
y_range = range(surface.get_height())
else:
x_range = range(bounds_rect.left, bounds_rect.right)
y_range = range(bounds_rect.top, bounds_rect.bottom)
surface.lock() # For possible speed up.
if match_color:
pts = [(x, y) for x in x_range for y in y_range if get_at((x, y)) == color]
else:
pts = [(x, y) for x in x_range for y in y_range if get_at((x, y)) != color]
surface.unlock()
return pts
|
c8a0598a556a00fa28c38fb4558d99575f2aeb32
| 582,051
|
def to_binary(num, bitsNum):
""" Convert a interger to a binary string
Args:
num: the interger to be converted to binary
bitsNum: the number of bits of the binary string
Returns:
the binary string
"""
return format(num, '0>'+bitsNum+'b')
|
990f359c79ee403f85e63fa2485b5bfcea7bbc16
| 272,432
|
def sentence_from_ids(vocabulary, ids):
"""
Convenience method, for converting a sequence of ids to words.
:param vocabulary: Language, object of the language to use the look up of.
:param ids: ids, representations of words.
:return: list, containing the ids (int) of the sentence in the same order.
"""
return [vocabulary(word_id) for word_id in ids]
|
12a62d92922991ab7dc5110a5dee6ff38c93757b
| 267,476
|
def get_gui_widgets(gui, **kwargs):
""" Returns the GUI widget objects specified in kwargs
:param gui: (Window) main window gui object containing other widgets
:param kwargs: keyword arguments with argument name being the name
of the widget (str, widget_name) and argument value an integer specifying the
number of copies of that widget
For more than 1 widget copy, assumes the name is assigned as
widget_name_1, widget_name_2, etc.
:return: (dict) dictionary with keywords as widget name and values
as either individual widgets or list of widgets in case of multiple
similarly named widgets
"""
widgets = dict()
for widget_name, widget_number in kwargs.items():
# Check if it is multiple named widgets
if widget_number > 1:
widget_list = []
for widget_index in range(widget_number):
widget_list.append(getattr(
gui,
f'{widget_name}_{widget_index+1}'
))
widgets[widget_name] = widget_list
else:
widgets[widget_name] = getattr(gui, widget_name)
return widgets
|
35ef63e667f88b5a56e21b168a1e343cb59e72f6
| 465,691
|
def close(value: float, target: float = 0.0, epsilon: float = 0.001) -> bool:
""" returns True is value is within epsilon of target """
if (target - epsilon) <= value <= (target + epsilon):
return True
return False
|
b1264b3413f62331e9a8e6ec581b662025ebb1cb
| 484,240
|
def create_data_string(num_bytes):
"""
Returns a an array of '1' bytes of size 'num_bytes'.
:param num_bytes: Number of desired bytes, must be greater than 0.
:type num_bytes int
:return: num_bytes <=0 would return an empty binary string.
:rtype: bytearray
"""
if num_bytes <= 0:
return b''
return bytearray([1] * num_bytes)
|
dc4062dca86bd3f5e1c0570c2cf8f8fb48753000
| 146,007
|
def get_symbol_module(sym):
""" Get the module name belonging to a symbol """
return sym.namespace.name[0]
|
542311fd785df4474df6012576d95278e6805b2d
| 84,471
|
def parse_input(instructions):
"""Parse the parentheses into floor-change deltas (1, -1)."""
instruction_values = {"(": 1, ")": -1}
return [instruction_values[char] for char in instructions]
|
5307e55c01ddf24ebb7589da5be26aaf7c5a4756
| 171,907
|
def get_child_processes(process):
"""Get all running child processes recursively"""
child_processes = set()
for p in process.children(recursive=True):
child_processes.add(p)
return child_processes
|
1dc691e0b91aa0c292975f4ebc5b244e3ec5acb2
| 292,324
|
def repeat_interleave(input, repeats, dim=0):
"""
Repeat and interleave a tensor along a given dimension. The current PyTorch
implementation of `repeat_interleave` is slow and there is an open ticket for it:
https://github.com/pytorch/pytorch/issues/31980
Args:
input (torch.Tensor): Tensor containing the input data.
repeats (int): The number of repetitions for each element.
dim (int). The dimension along which to repeat values.
Returns:
torch.Tensor: Repeated tensor which has the same shape as input, except along
the given axis.
"""
output = input.unsqueeze(dim + 1).expand(-1, repeats, *input.shape[1:])
if dim == 0:
output_shape = (-1,) + input.shape[1:]
else:
output_shape = input.shape[:dim] + (-1,) + input.shape[dim:]
return output.reshape(output_shape)
|
a35411fb46a48b4a485604106298bb54e9c94a50
| 624,232
|
def cleanup_ip(ip):
"""
Given ip address string, it cleans it up
"""
ip = ip.strip().lower()
if (ip.startswith('::ffff:')):
return ip.replace('::ffff:', '')
return ip
|
f5b47a03ff2919775b0bcb6a3c74688271ab9f50
| 124,286
|
def get_jittering(seed_rng, scale_size, translate_size, rotation_size, img_width):
"""
This method returns uniform random values for scaling, translation, and rotation
in the range given by user.
Parameters:
-------------
scale_size: float in [0,1]
the maximum ratio by which image can be scaled.
a value of 0.2 return a random scale in [0.8, 1.2]
translate_size:
the raletive maximum number of pixel by which image can be translated.
a value of 0.2 return a random translate size
[ -0.2 * img_width, 0.2 * img_width]
rotation_size:
the maximum degree by which image can be translated.
returns:
---------
scale: float
the random scale value in range [1 - scale_size, 1 + scale_size]
transalte: float
the random translate pixel size in range
[- translate_size * img_width, translate_size * img_width]
rotate: int
the random rotation degree in the range [-rotation_size, rotation_size]
"""
# getting random scaling size
scale_jitter = seed_rng.uniform(-scale_size, scale_size)
scale = 1.0 + scale_jitter
# getting random translation size
translate_jitter = seed_rng.uniform(-translate_size, translate_size, size=(2))
translate = img_width * translate_jitter
# getting random rotation size
rotate = seed_rng.random_integers(-rotation_size, rotation_size)
return [scale, translate, rotate]
|
d257377cfcf88a50011b2c477ef6968ba8598c9b
| 654,806
|
def GetByPath(d, field_list):
"""Returns the value of a nested field of a dict of dicts."""
for i, f in enumerate(field_list):
try:
d = d[f]
except (KeyError, ValueError):
# Re-raise with a more sensible error message.
raise KeyError('Can\'t find field %s.' % '.'.join(field_list[:i+1]))
return d
|
e441bc9602776871fb9be435ab415c9e1c972fd1
| 261,383
|
def getMultiMessageAttributeIndex(multiMessageAttr, incomingAttr):
"""Returns the index of the incoming attribute connection to the given multi attribute."""
connectionList = multiMessageAttr.listConnections(plugs=True)
for connection in connectionList:
if connection == incomingAttr:
return connectionList.index(connection)
|
8bee962c511c24899ac32eb52dcac9f5b0862a75
| 208,168
|
def is_half(char):
"""是否半角字符
Examples:
>>> is_half(',')
True
>>> is_half('。')
False
"""
if isinstance(char, str):
char = ord(char)
return 0x0020 <= char <= 0x007E
|
04f2b1bbd765aba1a95db2b7b2cc9e1cd2b1659f
| 132,603
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.