content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import re
def load(filename):
"""Load a list of terms from a hand-tagged file."""
pattern = re.compile(r'<JARGON.*text=\"(.*?)\".*/>')
terms = []
f = open(filename)
for line in f:
m = re.match(pattern, line)
if m:
terms.append(m.groups()[0])
# print terms[-1]
f.close()
return set(terms) | b7b772d11f3b400b1b03fcceb9f573384c92bc73 | 126,561 |
def get_covid_articles_count(articles_date, conn):
"""
Get count of covid related articles for specific date
"""
cur = conn.cursor()
cur.execute(
"SELECT count(*) as cnt FROM articles "
f"WHERE published::date = '{articles_date}' AND covid_related = true;"
)
result = cur.fetchone()
cur.close()
return result[0] | b620cafb0084371afc22dc1d78191c7799255eea | 126,563 |
def response_topic_name(topic, id):
"""calculate the name of the reponse topic for a topic"""
return '%s.response.%s' % (topic, id) | fd69cd1eccd02d1094b7a6868439a010d74e6b3c | 126,564 |
def is_cybox(entity):
"""Returns true if `entity` is a Cybox object"""
try:
return entity.__module__.startswith("cybox.")
except AttributeError:
return False | fc644dbf5c0394618986ba0a1ab66f2d1c837dca | 126,567 |
def lang_normalize(lang):
"""
Normalize the representation of the language
"""
return lang.lower() | ce1f1d903477b058dd040ef71b5af8dfdc5b4834 | 126,572 |
def dump_result(filename, every=10):
"""Return a function for dumping the result every so many iterations
For long-running optimizations, it can be useful to dump the current state
of the optimization every once in a while, so that the result is not lost
in the event of a crash or unexpected shutdown. This function returns a
routine that can be passed as a `check_convergence` routine that does
nothing except to dump the current :class:`.Result` object to a file (cf.
:meth:`.Result.dump`). Failure to write the dump file stops the
optimization.
Args:
filename (str): Name of file to dump to. This may include a field
``{iter}`` which will be formatted with the most recent iteration
number, via :meth:`str.format`. Existing files will be overwritten.
every (int): dump the :class:`.Result` every so many iterations.
Note:
Choose `every` so that dumping does not happen more than once every few
minutes, at most. Dumping after every single iteration may slow down
the optimization due to I/O overhead.
Examples:
* dump every 10 iterations to the same file `oct_result.dump`::
>>> check_convergence = dump_result('oct_result.dump')
* dump every 100 iterations to files ``oct_result_000100.dump``,
``oct_result_000200.dump``, etc.::
>>> check_convergence = dump_result(
... 'oct_result_{iter:06d}.dump', every=100)
"""
every = int(every)
if every <= 0:
raise ValueError("every must be > 0")
def _dump_result(result):
iteration = result.iters[-1]
if iteration % every == 0:
outfile = filename.format(iter=iteration)
try:
result.dump(outfile)
except IOError as exc_info:
return "Could not store %s: %s" % (outfile, exc_info)
return None
return _dump_result | e071d07fa88b38e3e4efb74cd84071441144469a | 126,573 |
def _calc_pred_mse(tau, level):
"""Calculate a scaled predicted MSE based on
the predicted noise variance and the level of wavelet decomposition.
Args:
tau (list): predicted noise variance in each subband in the "wavelet" format.
Returns:
pred_mse (float): scaled predicted MSE.
Note:
The predicted MSE is used to determine whether to stop D-VDAMP early.
"""
pred_mse = 0
pred_mse += tau[0] * (4 ** (-level))
for b in range(level):
weight = 4 ** (b - level)
for s in range(3):
pred_mse += tau[b + 1][s] * weight
return pred_mse | 44a008601f8aea96ea701d7b9b4585559c9e0690 | 126,576 |
import select
import errno
def _eintr_retry(func, *args):
"""restart a system call interrupted by EINTR"""
while True:
try:
return func(*args)
except (OSError, select.error) as e:
if e.args[0] != errno.EINTR:
raise | 2a09f08315ddc24ae7c2acbb716a0e1186a6f728 | 126,577 |
def dictvals(dictionary):
"""
Returns the list of elements in a dictionary, unpacking them if they are inside a list
:param dictionary: the dictionary to be unpacked
:returns :[list]
"""
try:
return [x[0] for x in dictionary.values()]
except IndexError:
return list(dictionary.values())
except TypeError:
return list(dictionary.values()) | df6afbb5460ae3bc364da280960fdc8f90d408a1 | 126,581 |
def to_boolean(input_string: str):
"""Returns True, False or None, based on the contents of the input_string (any form of capitalization is allowed)"""
if input_string.lower() == "true":
return True
elif input_string.lower() == "false":
return False
else:
return None | 38b683affa501540290674a2144ee235977c12f0 | 126,582 |
def getIndexName(strTableName, index):
""" If an index name is given, use it otherwise,
use idx_<tablename>_<colNames>
"""
strIndexName = index.getAttribute("name")
if strIndexName and len(strIndexName) > 0:
return strIndexName
cols = index.getAttribute("columns").split(',')
cols = [col.strip() for col in cols ] # Remove spaces
strIndexName = "idx_" + strTableName + '_'.join([col.strip() for col in cols])
return strIndexName | af987d31474f109717a765800b30cfe000bb2a87 | 126,583 |
import gzip
def _extract_fileobj(filepath):
"""
Checks to see if a file is compressed, and if so, extract it with gzip
so that the uncompressed file can be returned.
It returns a file object containing XML data that will be ingested by
``xml.etree.ElementTree.iterparse``.
Args:
filepath: A path-like object or str.
Returns:
_io.BufferedReader or gzip.GzipFile: A file object containing XML data.
"""
with open(filepath, "rb") as gzip_file:
header = gzip_file.read(2)
gzip_magic_number = b"\x1f\x8b"
return (
gzip.GzipFile(filepath) if header == gzip_magic_number else open(filepath, "rb")
) | fcdae8bdde223d289cc5058fa75323f01422c43c | 126,587 |
import inspect
def from_module(module, object):
"""
Return true if the given object is defined in the given module.
"""
if module is None:
return True
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif inspect.isfunction(object):
return module.__dict__ is object.__globals__
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function") | 9eb96e2b586ac0591da6210e05affe92420423b4 | 126,591 |
from typing import ValuesView
def create_characters_list_text(characters_list: ValuesView) -> str:
"""Returns string representation of sequence of characters dictionaries."""
lines = (f'{character["name"]} - {character["height"]} см: {character["url"]}' for character in characters_list)
return '\n'.join(lines) | 32457694200ad29f4d01dabf3364e78b50b7473f | 126,593 |
def parse_first_line_header(line):
"""Parse the first line of the header and extract station code"""
station_code_name = line[1]
station_code = station_code_name.split('_')[0]
return station_code | a0920b9844347bf06f311f726478c9d7345ff7fd | 126,594 |
import math
def closest_column(board, mouse_pos, settings):
"""
Returns the column number in `board` closest to the given mouse position.
"""
x_pos = mouse_pos[0] - settings.padding_left
y_pos = mouse_pos[1] - settings.padding_top
# restrict the clicked position to be within the board
if x_pos < 0 or x_pos > settings.board_size[0]:
return None
if y_pos < 0 or y_pos > settings.board_size[0]:
return None
col = int(math.floor(x_pos / board.cell_length))
return col | ad6addce67eb9cf0c960d12e3e7bec6886ad2293 | 126,596 |
import socket
def in6_isvalid(address):
"""Return True if 'address' is a valid IPv6 address string, False
otherwise."""
try:
socket.inet_pton(socket.AF_INET6, address)
return True
except Exception:
return False | 87b3c031be3bb8a0843255632e5dc3565be89f08 | 126,597 |
def join_endpoints(endpoints, A, B):
"""
Join B's segment onto the end of A"s and return the segments. Maintain endopints dict.
"""
Asegment, Bsegment = endpoints[A], endpoints[B]
# A must be at the end of Asegment
if Asegment[-1] is not A:
Asegment.reverse()
# B must be at the beginning of Bsegment
if Bsegment[0] is not B:
Bsegment.reverse()
Asegment.extend(Bsegment) # connect
del endpoints[A], endpoints[B] # A and B are no longer endopints
# make two new segments
endpoints[Asegment[0]] = Asegment
endpoints[Asegment[-1]] = Asegment
return Asegment, endpoints | f7e55d72dab73be786c004c88813c2bb0471ba26 | 126,603 |
def parse_ped(pedfile):
"""Parse the PED file and store it in a dictionary of
{lineid: [PED_data]}"""
ped_data = {}
with open(pedfile, 'r') as f:
for line in f:
tmp = line.strip().split()
lid = tmp[1]
# add the line to the ped_data
ped_data[lid] = tmp
return (ped_data) | 0563a53b24009d5f1616323364b39bb45d43848d | 126,605 |
def _find_continuous_segment(numbers):
"""Find the longest continuous segment in a list of numbers.
For example:
input:
1, 2, 3, 4, 5, 6
22,70,23,24,25,26
output:
number_list_sorted:
1, 3, 4, 5, 6, 2
22,23,24,25,26,70
segments:
0, 1, 5, 6
which means there are 3 segment with start and end indices
on the number_list_sorted to be: (0, 1), (1, 5), (5, 6)
Args:
numbers: List of pairs of number
Returns:
segment: a list containing the indices of the segment start point
and end point.
number_list: sorted by the first element version of the input.
"""
segments = [0]
number_list_sorted = sorted(numbers, key=lambda elem: elem[0])
for i in range(len(number_list_sorted) - 1):
# continuous segment is a segment which the number in pair is 1 unit
# more than the previous pair
if (number_list_sorted[i + 1][0] - number_list_sorted[i][0] != 1
or number_list_sorted[i + 1][1] - number_list_sorted[i][1] != 1):
segments.append(i + 1)
segments.append(len(number_list_sorted))
return segments, number_list_sorted | 85f083f88ad8f72b06583315a9b74850e47bfb0a | 126,607 |
def rec_copy(d: dict) -> dict:
"""Local replacement for copy.deepcopy(), as transcrypt cannot import the copy module.
We recursively make a copy of a dict.
This code can only handle values that are dicts or scaler types
"""
newdct = dict()
for k, v in d.items():
if isinstance(v, dict):
newdct[k] = rec_copy(v)
else:
newdct[k] = v
return newdct | 1f7b9e56c5ae97b1ce042cc00401830dd312e48a | 126,610 |
def quote(s):
"""
Encodes a byte string using URL encoding. This replaces bytes with their
URL-encoded equivalent %XX where XX is the hexadecimal value of the byte.
In particular, the bytes that are encoded include:
1. Bytes < 0x20 or >= 0x7F (C0 and C1 control codes)
2. Quote/apostrophe (0x27), double quote (0x22), and percent (0x25)
This will always return a byte string. If the argument is a unicode string
then it will be utf-8 encoded before being quoted.
"""
if not isinstance(s, bytes):
if isinstance(s, str):
# We need to utf-8 encode unicode strings
s = s.encode("utf-8")
else:
raise TypeError("Not a string: %s" % str(s))
buf = []
for byte in s:
c = chr(byte)
if byte < 0x20 or byte >= 0x7F:
# Any byte less than 0x20 is a control character
# any byte >= 0x7F is either a control character
# or a multibyte unicode character
buf.append("%%%02X" % byte)
elif c == "%":
buf.append("%25")
elif c == "'":
buf.append("%27")
elif c == '"':
buf.append("%22")
else:
# These are regular bytes
buf.append(c)
return "".join(buf) | 0899cf6c2aa4888cd99d5e4b1249d87fbf84eeb4 | 126,614 |
def is_redirect(page):
"""
Checks if a page is a redirect
Returns
--------
bool
True if the page is a redirect
False if the page is not a redirect
"""
redirect = page.getElementsByTagName('redirect')
if len(redirect) > 0:
return True
return False | c5913b1fb66099c9d38a9851496bfacb76b046aa | 126,615 |
from typing import List
def reconstruct(nums: List[str]) -> List[int]:
"""
Let's assume first number to be 0.
Let's set max_num = min_num = 0.
Everytime we encounter a +, we increment max_num and append it.
Everytime we encounter a -, we decrement min_num and append it.
Finally, iterate over the list again and add min_num to all.
"""
max_num = min_num = 0
size = len(nums)
res = [0] * size
for index in range(1, size):
if nums[index] == "+":
max_num += 1
res[index] = max_num
if nums[index] == "-":
min_num -= 1
res[index] = min_num
return [num - min_num for num in res] | 8560828c9c89c5a5483a43252ab5b51575899824 | 126,616 |
def rk2_first_order_method(f, y, dx, range):
""" Runge-Kutta 2 method for a first order differential
equation.
:param f: Input first order derivative to appromixate.
:type f: lambda
:param y: The initial value given for y.
:type y: float, int
:param dx: Step size. Smaller is better.
:type dx: float
:param range: A list which specifies the beginning and the ending of our domain.
:type range: list
:return: Returns a tuple for the x coordinates corresponding to a set of y coordinates,
which approximate the solution to f.
:rtype: tuple(list, list)
"""
x = min(range)
x_space = [x]
y_space = [y]
while x<=max(range):
yp_mid = f(x+1/2*dx, y + 1/2*dx*f(x,y))
y += yp_mid*dx
x += dx
x_space.append(x)
y_space.append(y)
return (x_space, y_space) | e434030e7ff9195e398ec109a0c1631126248cd0 | 126,618 |
def extract_mid(a, npixel):
"""
Extract a section from middle of a map
Suitable for zero frequencies at npixel/2. This is the reverse
operation to pad.
.. note::
Only the two innermost axes are transformed
:param npixel: desired size of the section to extract
:param a: grid from which to extract
"""
ny, nx = a.shape[-2:]
cx = nx // 2
cy = ny // 2
s = npixel // 2
if npixel % 2 != 0:
return a[..., cx - s:cx + s + 1, cy - s:cy + s + 1]
else:
return a[..., cx - s:cx + s, cy - s:cy + s] | f7c799f11102f253070a36bc566edade6fb878ff | 126,619 |
def check_if_packs_can_be_played(packs, possible_plays):
"""
Function used to check if player can play a pack of cards in turn.
:param packs: list with packs of cards on hand
:param possible_plays: list with all possible cards to play
:return: list with possible to play packs
"""
possible_packs = []
[possible_packs.append(pack) for pack in packs for card in possible_plays if pack == card[1]]
return list(set(possible_packs)) | b415ef0ae86ea83b1f2064701913dacbc71a1993 | 126,629 |
def _threshold_calc(random_edge, max_edge, vertex_degree):
"""
Calculate threshold for branch_gen function.
:param random_edge: number of vertex edges
:type random_edge: int
:param max_edge : maximum edge number
:type max_edge : int
:param vertex_degree: vertex degree
:type vertex_degree: int
:return: threshold as int
"""
threshold = min(random_edge, abs(max_edge - vertex_degree))
return threshold | be50dd97241b2f5aa58c2c67bf9c52f3bde6b361 | 126,633 |
def get_project_subject_session(nrrd_file):
"""
(str) -> tuple(str,str,str)
Return a tuple which contains (project,subject,session) from the given nrrd fiel.
>>>nrrd_file = '/paulsen/MRx/PHD_032/0454/31774/ANONRAW/0454_31774_DWI-31_7.nrrd'
>>>get_scan_type(nrrd_file)
(PHD_032,0450,31774)
"""
nfs = nrrd_file.split("/")
return (nfs[3], nfs[4], nfs[5]) | 68b6094691f31e8b41d2bf2e64a3203df337a3ba | 126,634 |
import json
def get_config(config_file):
"""
Get the content of the config.json file, which includes the amount of euros to spend per trade, which
currencies to trade and more.
Returns:
json:Content of the file
"""
with open(config_file) as file:
config_content = json.load(file)
standing_order_info = config_content["standingOrder"]
return standing_order_info | 2ff873837ea165b48d4565458288489f0805cd78 | 126,640 |
def _get_level(path):
"""Determine the number of sub directories `path` is contained in
Args:
path (string): The target path
Returns:
int: The directory depth of `path`
"""
normalized = path
# This for loop ensures there are no double `//` substrings.
# A for loop is used because there's not currently a `while`
# or a better mechanism for guaranteeing all `//` have been
# cleaned up.
for i in range(len(path)):
new_normalized = normalized.replace("//", "/")
if len(new_normalized) == len(normalized):
break
normalized = new_normalized
return normalized.count("/") | 155c7056866163e49b023193d2ee85f583985ff6 | 126,645 |
import torch
from typing import List
def gpu_pareto_front(samples: torch.Tensor, fronts_number=None) -> List[torch.Tensor]:
"""
Non-dominated sorting algorithm for GPU
Args:
samples (torch.Tensor): m x n tensor
m: the number of samples
n: the number of objectives.
fronts_number (int): number of the top fronts.
None for all the fronts.
Returns:
fronts (List): a list of ordered Pareto fronts
"""
dominate_each = (samples.unsqueeze(1) >= samples.unsqueeze(0)).all(-1)
dominate_some = (samples.unsqueeze(1) > samples.unsqueeze(0)).any(-1)
dominate_each = (dominate_each & dominate_some).to(torch.int16)
fronts = []
while (dominate_each.diagonal() == 0).any():
count = dominate_each.sum(dim=0)
front = torch.where(count == 0)[0]
fronts.append(front)
dominate_each[front, :] = 0
dominate_each[front, front] = -1
if fronts_number and len(fronts) >= fronts_number:
break
return fronts | 52f386f80cf219b66105afdfc35a13deb789a74f | 126,646 |
import math
def min_max_nonsingleton_sizes(sets):
"""Finds the minimum and maximum nonsingleton set sizes."""
low = math.inf
high = -math.inf
for size in sets.sizes():
if size > 1:
low = min(low, size)
high = max(high, size)
return low, high | 8a96ff18a331d457028975789c10a0d37faf74d7 | 126,647 |
def _uuid_and_vector_from_descriptor(descriptor):
"""
Given a descriptor, return a tuple containing the UUID and associated
vector for that descriptor
:param descriptor: The descriptor to process.
:type descriptor: smqtk.representation.descriptor_element.DescriptorElement
:return: Tuple containing the UUID and associated vector for the given
descriptor
:rtype: tuple[collections.Hashable, numpy.ndarray]
"""
return (descriptor.uuid(), descriptor.vector()) | d1d2de8e633c99ce2837a94481c9c22f635796a3 | 126,648 |
def renderable(tiddler, environ=None):
"""
Return true if the provided tiddler's type is one
that can be rendered by the wikitext render subsystem.
"""
if not environ:
environ = {}
return (not tiddler.type
or tiddler.type == 'None'
or tiddler.type
in environ.get('tiddlyweb.config', {}).get(
'wikitext.type_render_map', [])) | bb75692e9e2fa94e97a3d1b0e1bb5fec58ffb23b | 126,653 |
def fromCSV(s):
"""Converts a CSV string to a vector/array"""
return [float(x) for x in s.split(',') if len(s) > 0] | 867b50411efbf277eb68d3785c8adc270f99384f | 126,654 |
def z2v(z, zc):
"""Convert the redshift to km/s relative to the cluster center"""
return 2.99792458e5 * (z - zc) / (1 + zc) | 60430147718fc1f3a08fa24957257a8ea7bd7e85 | 126,659 |
def _unflatten(flat_data):
"""Converts a flat dict of numpy arrays to the batch tuple structure."""
o_tm1 = {
'penalties': flat_data['penalties_tm1'],
'position': flat_data['position_tm1'],
'velocity': flat_data['velocity_tm1'],
}
a_tm1 = flat_data['action_tm1']
r_t = flat_data['reward_t']
d_t = flat_data['discount_t']
o_t = {
'penalties': flat_data['penalties_t'],
'position': flat_data['position_t'],
'velocity': flat_data['velocity_t'],
}
return (o_tm1, a_tm1, r_t, d_t, o_t) | 8865d5cfc1f6f3bc62ca2704f6b5664da119d13b | 126,667 |
def comp_dice_score(tp, fp, tn, fn):
"""
Binary dice score.
"""
if tp + fp + fn == 0:
return 1
else:
return (2*tp) / (2*tp + fp + fn) | d654ba9d0b4ac78366b09566977be01e934d4871 | 126,671 |
import math
def rotate_around_point_highperf(xy, radians, origin=(0, 0)):
"""Rotate a point around a given point.
I call this the "high performance" version since we're caching some
values that are needed >1 time. It's less readable than the previous
function but it's faster.
"""
x, y = xy
offset_x, offset_y = origin
adjusted_x = (x - offset_x)
adjusted_y = (y - offset_y)
cos_rad = math.cos(radians)
sin_rad = math.sin(radians)
qx = offset_x + cos_rad * adjusted_x + sin_rad * adjusted_y
qy = offset_y + -sin_rad * adjusted_x + cos_rad * adjusted_y
return qx, qy | 6bcbad2d80c351da2e30f7927f61306f714e582f | 126,672 |
def cn(uc, w, eis, frisch, vphi):
"""Return optimal c, n as function of u'(c) given parameters"""
return uc ** (-eis), (w * uc / vphi) ** frisch | 74247338f5d26c81e24748f597f1e47386829832 | 126,673 |
def guide(request):
"""
Guide page.
"""
return {} | 06c785c93e625a8f5ca91d06ca5f93459a9e22ca | 126,675 |
def get_vertices_from_edge_list(graph, edge_list):
"""Transforms a list of edges into a list of the nodes those edges connect.
Returns a list of nodes, or an empty list if given an empty list.
"""
node_set = set()
for edge_id in edge_list:
edge = graph.get_edge(edge_id)
a, b = edge['vertices']
node_set.add(a)
node_set.add(b)
return list(node_set) | f052615bd666343c9032be56ae8b9bde585eb798 | 126,679 |
from enum import Enum
def dict_to_enum(name, cfg_data):
"""Dynamically create ModelZoo enum type using model zoo YAML
:param name: (str) name of enum
:param cfg_data: (dict)
:returns (Enum)
"""
enum_data = {kv[0].upper(): i for i, kv in enumerate(cfg_data.items())}
return Enum(name, enum_data) | c008cb0ae0dcec8286be7d68782e08b8fa8446fb | 126,683 |
import logging
def load_joint_order(handle):
"""
Loads a joint order from the given handle.
:param io.file handle
:rtype list[str]
"""
logging.info('Loading joint order')
result = [line.strip() for line in handle]
logging.info('Loaded {} joints'.format(len(result)))
return result | 608eecefd17a2ea2c04aae3faa3b008e87a813ac | 126,686 |
def safe_index(elements, value):
"""Find the location of `value` in `elements`, return -1 if `value` is
not found instead of raising ``ValueError``.
Parameters
----------
elements : Sequence
value : object
Returns
-------
location : object
Examples
--------
>>> sequence = [1, 2, 3]
>>> safe_index(sequence, 2)
1
>>> safe_index(sequence, 4)
-1
"""
try:
return elements.index(value)
except ValueError:
return -1 | 0942e90689bb15aaa92401c57c3fb7f2cc3f8d94 | 126,691 |
def flatten(x):
"""Flatten an array of data.
This function flattens an array in "C" like order, which is identical
to the process that TensorFlow's flatten function performs. Batch size is
not affected
Parameters
----------
x : numpy.ndarray
Input data.
Returns
-------
numpy.ndarray
Flattened array.
"""
return x.reshape((x.shape[0], -1), order="C") | 60dcf67cfaca2f91a05ea567c989f7bdd2365428 | 126,693 |
from unittest.mock import patch
def patch_bond_device_properties(return_value=None):
"""Patch Bond API device properties endpoint."""
if return_value is None:
return_value = {}
return patch(
"homeassistant.components.bond.Bond.device_properties",
return_value=return_value,
) | 76778f545e69eb28bce4615480474ac24dc79de0 | 126,697 |
def isNull(text):
"""
Test if a string 'looks' like a null value.
This is useful for querying the API against a null key.
Args:
text: Input text
Returns:
True if the text looks like a null value
"""
return str(text).strip().lower() in ['top', 'null', 'none', 'empty', 'false', '-1', ''] | 03b67a2bc25d42525cff06c0767fcd3930b7539c | 126,698 |
import random
def sample_independently(L,p):
"""
Return a sample of the given list of precincts, sampled according to p.
L = input list of precincts, 0<=i<n; each is (size,name) pair.
p = input list of their probabilities.
Select precincts with probability proportional to probabilities in p.
Each precinct is selected *independently*.
"""
n = len(L)
sample = []
for i in range(n):
if random.random() <= p[i]:
sample.append(L[i])
return sample | c51af014c0cbbb39a6cf4a881bcfea3d3a866f3a | 126,703 |
def fitfunc_e2(epsilon, a, b):
"""
Fit function for epsilon dependence of NSPT results.
By construction the Runge-Kutta used in the calculations the
part linear in epsilon is cancelled.
Parameters
----------
epsilon : float
NSPT time step epsilon
a : float
Fit parameter for quadratic term
b : float
Fit parameter for constant term
Returns
-------
a*epsilon**2+b : float
"""
return a*epsilon**2 + b | 23d3917b51309d634e5340b28201bbb4413c31f2 | 126,705 |
def _get_value(lst, row_name, idx):
"""
Helper function to parse the ics lines using a key to get the corresponding metadata.
Args:
----------
lst: list
Ics lines
row_name: string
Key
idx: int
Index of the metadata
Returns:
----------
val : string
Metadata
"""
val = None
for l in lst:
if not l:
continue
if l[0] == row_name:
try:
val = l[idx]
except Exception:
print (row_name, idx)
print (lst)
val = None
break
return val | 66c207201556ae4c4aa057ae4ba4a6606fa85af8 | 126,710 |
def sign(number):
"""Return the sign of a number."""
return 1 if number >= 0 else -1 | 5157fddbdc1cb21135e25176bacc7a94cb84f5f2 | 126,711 |
import functools
import time
def time_print(f):
"""It provides a wrapper to print the time cost on a process."""
@functools.wraps(f)
def g(*args, **kwargs):
now = time.time()
ret = f(*args, **kwargs)
print(f.__name__, " cost time:", time.time() - now)
return ret
return g | fad194b9bdc771b3b0c12181c14349aeb130e11f | 126,712 |
from xml.dom import minidom
def pretty_xml(xml):
"""Given a string with XML, return a string with the XML formatted pretty"""
dom = minidom.parseString(xml)
return dom.toprettyxml() | 3954a263c70167df423fcc310377dce6d67ef1e5 | 126,718 |
def score_progress(board):
"""Return # of candidates remaining in board."""
return sum(sum(len(cell) for cell in row) for row in board) | 9983d390104f3db6fcab7bc3dd203d282c5d0d49 | 126,719 |
import string
def format_sample(sample):
"""
将单个样本转化为方便阅读的字符串
Parameters
----------
sample: dict{'tokens': list[str], 'h': [str, str, list[list[int]]], 't': [str, str, list[list[int]]]}
FewRel数据集中的单个句子
Returns
----------
res: sentence
方便阅读的字符串
"""
for idx in sample['h'][2]:
sample['tokens'][idx[0]] = '{' + sample['tokens'][idx[0]]
sample['tokens'][idx[-1]] = sample['tokens'][idx[-1]] + '}'
for idx in sample['t'][2]:
sample['tokens'][idx[0]] = '[' + sample['tokens'][idx[0]]
sample['tokens'][idx[-1]] = sample['tokens'][idx[-1]] + ']'
sentence = ''
sz = len(sample['tokens'])
for i in range(sz):
cur = sample['tokens'][i]
nxt = None
if i + 1 < sz:
nxt = sample['tokens'][i + 1]
sentence += cur
if nxt is not None and nxt not in string.punctuation and cur not in '([-':
sentence += ' '
return sentence | 974f6c9e68f768b361d74f64c59c2834ddb4ab99 | 126,721 |
def scale_features(X, approach='standard'):
"""Scale feature matrix.
Parameters
----------
X : torch.Tensor
Tensor of shape (n_samples, n_channels, lookback, n_assets). Unscaled
approach : str, {'standard', 'percent'}
How to scale features.
Returns
-------
X_scaled : torch.tensor
Tensor of shape (n_samples, n_channels, lookback, n_assets). Scaled.
"""
n_samples, n_channels, lookback, n_assets = X.shape
if approach == 'standard':
means = X.mean(dim=[2, 3]) # for each sample and each channel a mean is computed (over lookback and assets)
stds = X.std(dim=[2, 3]) + 1e-6 # for each sample and each channel a std is computed (over lookback and assets)
means_rep = means.view(n_samples, n_channels, 1, 1).repeat(1, 1, lookback, n_assets)
stds_rep = stds.view(n_samples, n_channels, 1, 1).repeat(1, 1, lookback, n_assets)
X_scaled = (X - means_rep) / stds_rep
elif approach == 'percent':
X_scaled = X * 100
else:
raise ValueError('Invalid scaling approach {}'.format(approach))
return X_scaled | c7e73e5789f9418f5215298da0cee9301e8a0447 | 126,726 |
def removeprefix(target, prefix):
"""Remove a prefix from a string, based on 3.9 str.removeprefix()"""
if target.startswith(prefix):
return target[len(prefix) :]
else:
return target[:] | 4b771ba6e153a1485a5fa16e0948d60f418327e1 | 126,728 |
def _parse_impedance_ranges(settings):
"""Parse the selected electrode impedance ranges from the header.
Parameters
----------
settings : list
The header settings lines fom the VHDR file.
Returns
-------
electrode_imp_ranges : dict
A dictionary of impedance ranges for each type of electrode.
"""
impedance_ranges = [item for item in settings if
"Selected Impedance Measurement Range" in item]
electrode_imp_ranges = dict()
if impedance_ranges:
if len(impedance_ranges) == 1:
img_range = impedance_ranges[0].split()
for electrode_type in ['Data', 'Reference', 'Ground']:
electrode_imp_ranges[electrode_type] = {
"imp_lower_bound": float(img_range[-4]),
"imp_upper_bound": float(img_range[-2]),
"imp_range_unit": img_range[-1]
}
else:
for electrode_range in impedance_ranges:
electrode_range = electrode_range.split()
electrode_imp_ranges[electrode_range[0]] = {
"imp_lower_bound": float(electrode_range[6]),
"imp_upper_bound": float(electrode_range[8]),
"imp_range_unit": electrode_range[9]
}
return electrode_imp_ranges | a8ae60cdf5cff7257acf03a26e96c8e58c08ed06 | 126,732 |
def one_hot(elements, classes):
"""
This function transform a list of labels in a matrix where each line is a one-hot encoding of
the corresponding label in the list. If the labels are not in classes, the corresponding vector has all the
values set to 0.
:param elements: list of labels to transform in one-hot encoding.
:param classes: list of labels.
:return: a matrix where each line is a one-hot encoding of the labels in input.
"""
final = []
for el in elements:
if el not in classes:
print('Error. Class {} not recognized in: {}'.format(el, elements))
current = [1 if i == el else 0 for i in classes]
final.append(current)
return final | 3aa12f4ca906d3f0feefb9636e5bd9632146f9bb | 126,733 |
import math
def get_direct_radiation(normal_surface_direct_radiation: float, solar_altitude: float, solar_azimuth: float,
surface_tilt_angle: float, surface_azimuth: float) -> float:
"""
傾斜面の直達日射量を求める関数
:param normal_surface_direct_radiation: 法線面直達日射量, W/m2
:param solar_altitude: 太陽高度角, degree
:param solar_azimuth: 太陽方位角, degree
:param surface_tilt_angle: 傾斜面傾斜角, degree
:param surface_azimuth: 傾斜面方位角, degree
:return: 傾斜面直達日射量, W/m2
"""
# 傾斜面に対する太陽光線の入射角, degree
sunlight_incidence_angle\
= math.sin(math.radians(solar_altitude)) * math.cos(math.radians(surface_tilt_angle))\
+ math.cos(math.radians(solar_altitude)) * math.sin(math.radians(surface_tilt_angle))\
* math.cos(math.radians(solar_azimuth - surface_azimuth))
return normal_surface_direct_radiation * sunlight_incidence_angle | 105e7d92035ebf82bcfdada3c9269cbbd98e3565 | 126,734 |
def STR_CASE_CMP(x, y):
"""
Performs case-insensitive comparison of two strings. Returns
1 if first string is “greater than” the second string.
0 if the two strings are equal.
-1 if the first string is “less than” the second string.
https://docs.mongodb.com/manual/reference/operator/aggregation/strcasecmp/
for more details
:param x: The first string or expression of string
:param y: The second string or expression of string
:return: Aggregation operator
"""
return {'$strcasecmp': [x, y]} | 8fe54c3b62cb3b286b4b59583911a8a637f0fa69 | 126,735 |
def lin_comb(cofs, vals):
"""
Returns linear combination of given values with given coefficients
:param cofs: Array of variable coefficients
:param vals: Array of variable values
:return: Variable representing the linear combination
"""
return sum([c*v for (c,v) in zip(cofs, vals)]) | 456fbb598fbd1eeb9f6ed3dd6e0d0bb064d336de | 126,737 |
from pathlib import Path
def message_files(messages_dir):
""" A test fixture returning a list of Harmony message files. """
return list(Path(messages_dir).rglob("*.msg")) | c3e49b3fd503058423d02bf8a97015a570edca99 | 126,743 |
def palindrome(my_str):
"""
Returns True if an input string is a palindrome. Else returns False.
"""
stripped_str = "".join(l.lower() for l in my_str if l.isalpha())
return stripped_str == stripped_str[::-1] | b9edee1242f4b8f04a5ca55a97071937d8a9adf4 | 126,748 |
def evaluate(out, labels):
"""
Calculates the accuracy between the prediction and the ground truth.
:param out: predicted outputs of the explainer
:param labels: ground truth of the data
:returns: int accuracy
"""
preds = out.argmax(dim=1)
correct = preds == labels
acc = int(correct.sum()) / int(correct.size(0))
return acc | 286134e8d1a6f374d7d53214560bb537f74a339e | 126,752 |
import torch
def get_kl(mu: torch.Tensor, logsigma: torch.Tensor):
"""Calculate KL(q||p) where q = Normal(mu, sigma and p = Normal(0, I).
Args:
mu: the mean of the q distribution.
logsigma: the log of the standard deviation of the q distribution.
Returns:
KL(q||p) where q = Normal(mu, sigma and p = Normal(0, I).
"""
logsigma = 2 * logsigma
return -0.5 * (1 + logsigma - mu.pow(2) - logsigma.exp()).sum(-1) | b20e2aed5b1b75eaad2ad5d1510f3d62c7537115 | 126,754 |
def solution(A): # O(NlogN)
"""
Given a list of tuples, merge the tuples to show only the non-overlapping intervals.
>>> solution([(1, 3), (2, 6), (8, 10), (15, 18)])
[(1, 6), (8, 10), (15, 18)]
>>> solution([(1, 4), (4, 8), (2, 5), (14, 19)])
[(1, 8), (14, 19)]
"""
A.sort(key=lambda value: value[0]) # O(NlogN)
merged = [] # O(1)
target = () # O(1)
for i in A: # O(N)
if not target: # O(1)
target = i # O(1)
elif target[1] >= i[0]: # O(1)
target = (target[0], max(target[1], i[1])) # O(1)
else: # O(1)
merged.append(target) # O(1)
target = i # O(1)
if target: # O(1)
merged.append(target) # O(1)
return merged # O(1) | 0d57898c5d9f50bfa120be1f550aefe5f67bae75 | 126,756 |
def get_negations(tokens):
"""Identify, color and count negations (nicht)"""
# get negations
nicht = [t for t in tokens if t.full_pos == 'PTKNEG' and t.lemma == "nicht"]
kein = []
# kein = [t for t in tokens if t.full_pos == 'PIAT' and t.lemma == 'kein']
negations = nicht + kein
# color
for t in negations:
t.pattern_color.append('Negations')
return len(negations) | 3e021b49be17035f692abb093abf23ced33b068a | 126,757 |
def is_hashable(arg):
"""Determine whether `arg` can be hashed."""
try:
hash(arg)
except Exception:
return False
return True | 2b43f2a7640a976a82517b5e35bec5a2c002664b | 126,764 |
def get_distances(sequence):
"""
Get distance between two occurrences of each unique element in sequence
Returns 0 if it only occurs once, returns the distance between the last two occurrences if element occurs more
than twice 0 if only occurs once
:param sequence: list
:return: dictionary with (element, distance) as key, value pairs
"""
distances = dict((s, {}) for s in set(sequence))
for i in range(len(sequence)):
distances[sequence[i]]["distance"] = i - distances[sequence[i]].get("last index", i)
distances[sequence[i]]["last index"] = i
return {key: value["distance"] for (key, value) in distances.items()} | 34e170906d8bc0546d339337a787bdc823dc412e | 126,771 |
def quat_from_pose(pose):
"""get the quaternion from a pose
Parameters
----------
pose : [type]
[description]
Returns
-------
Quaternion, np array of four floats
[description]
"""
return pose[1] | 7bd5a42683abcd6ea8e47661d168bd6fc0108db1 | 126,772 |
def when(b, x):
"""
Like Python's `if` in comprehensions.
Named for Clojure's :when keyword, which has the same function in
its comprehensions.
>>> list(x+y for x in 'zazbzcz' if x!='z' for y in 'abc' if x!=y)
['ab', 'ac', 'ba', 'bc', 'ca', 'cb']
>>> list(In('zazbzcz', lambda x:
... when(x!='z', In('abc', lambda y:
... when(x!=y, (x+y,) )))))
['ab', 'ac', 'ba', 'bc', 'ca', 'cb']
"""
return x if b else () | 979c08c258ac434ece2fa0b9eec540097d17970d | 126,776 |
def is_span(node: dict) -> bool:
"""Check whether a node is a span node."""
return node.get('_type', '') == 'span' or isinstance(node, str) or hasattr(node, 'marks') | 01eabbec7a28beafde36a98dcf6c011830e4cca6 | 126,777 |
from typing import Optional
from pathlib import Path
def _find_in_cwd_or_parents(name: str) -> Optional[Path]:
"""Finds a file with the given name starting in the cwd and working up to root."""
for parent in (Path().absolute() / name).parents:
path = parent / name
if path.is_file():
return path
return None | 0bfbe6d1e660ed79c5356b2ae83bd987db3f389c | 126,779 |
def binary_search(seq, target):
"""
:param seq: ascending sequence
:param target: target num that in search
:ret: index if found or None if not found
"""
head, tail = 0, len(seq) - 1
while head <= tail:
# for cython, use
# mid = head + ((tail - head) / 2)
mid = (head + tail) >> 1
num = seq[mid]
if num == target:
return mid
elif num < target:
head = mid + 1
else:
tail = mid - 1 | 2b5cb733aed737fa7be54710e07cbd12a46222ac | 126,792 |
def or_options(values, initial_value=0):
"""
Combine all given values using binary OR.
"""
options = initial_value
for value in values:
options |= value
return options | 7e6866cfa0cb1a70616455ec2e97b7165c3a8b9d | 126,797 |
def _has_renderer(renderer_name):
"""Returns a function that returns whether a RendererItem has the given renderer name."""
return lambda item: item.get_renderer_name() == renderer_name | 2eaa3195ea60dda8240a91b555549296811a756c | 126,798 |
def rm_key(data, key):
"""Function: rm_key
Description: Remove a key from a dictionary if it exists and return a
copy of the modified dictionary.
Arguments:
(input) data -> Original dictionary.
(input) key -> Name of key to be removed.
(output) mod_data -> Modified dictionary of original dictionary.
"""
mod_data = dict(data)
if key in mod_data:
del mod_data[key]
return mod_data | fee3134d0df682d215e42a0794ab2e1382bf72ca | 126,801 |
def alias(name):
"""
Create a filesystem-friendly alias from a string.
Replaces space with _ and keeps only alphanumeric chars.
"""
name = name.replace(" ", "_")
return "".join(x for x in name if x.isalnum() or x == "_").lower() | d70755f316ebb00f8bf320aea3bb255d7280a88d | 126,806 |
def _convert(msg):
"""Convert a graphite key value string to pickle."""
path, timestamp, value = msg.split(' ')
m = (path, (timestamp, value))
return m | 6f2fdca3dbb9fbf3691d216b33ac5058b290a3c5 | 126,808 |
import calendar
def next_leap_year(current_year: int) -> int:
""" Prints out which year is the next leap year after current_year
>>> next_leap_year(2019)
2020
>>> next_leap_year(2018)
2020
>>> next_leap_year(2020)
2024
"""
iter_year = current_year + 1
while True:
if calendar.isleap(iter_year):
return iter_year | e595efc5ff9c838f5bd4b81795fd6ad9b9e0125d | 126,809 |
def _get_keywords_from_textmate(textmate):
"""Return keywords from textmate object.
"""
return [
kw["match"] for kw in textmate["repository"]["language_keyword"]["patterns"]
] | 938dbeb8b9456c1914ca73e4a72150ee4f16696b | 126,813 |
def checker(wordlist, filename):
""" Return if the file matches all words in the word list """
content = open(filename).read().lower().split()
return all(word in content for word in wordlist) | bee2f45576987e6e6a706aee0055281d8a5b6446 | 126,815 |
def str_count(string: str, letter: str) -> int:
""" This function returns an integer of the count of occurrences the 2nd argument is found in the first one. """
count_letter = 0
for i in string:
if i == letter:
count_letter += 1
return count_letter | ebefafbe49408ffc9946d363474cd4b4be3e489e | 126,818 |
from typing import Sequence
from typing import Mapping
from typing import Any
from typing import MutableMapping
from typing import AbstractSet
def consistent_refvars(events: Sequence[Mapping[str, Any]]) -> bool:
"""Detects whether all refvars with identical IDs use same constraints.
Args:
events: List of Event (refer to TypeScript class Event in the frontend file for format)
objects.
Returns:
True if all refvars of same ID have consistent constraints, False otherwise.
"""
refvar_dict: MutableMapping[str, AbstractSet[str]] = {}
for event in events:
if "args" in event and event["args"]:
for arg in event["args"]:
if "refvar" not in arg or arg["refvar"] is None:
continue
if (
arg["refvar"] in refvar_dict
and set(arg["constraints"]) != refvar_dict[arg["refvar"]]
):
return False
else:
refvar_dict[arg["refvar"]] = set(arg["constraints"])
return True | 02e3f75fdffd3292156a296d2c922c1d83bc12d5 | 126,825 |
def drop_bias(matrix):
"""Remove the bias-unit column from a weight or delta matrix."""
return matrix[:,1:] | ee57a663cb3a2441a121fff3b41e63a8ee47c15b | 126,835 |
def update_average(old_avg: float, old_num: int, new_avg: float, new_num: int):
"""Updates the old average with new data.
Params:
- old_avg (float): The current average value
- old_num (int): The number of elements contributing to the current average
- new_avg (float): The new average value
- new_num (int): The number of elements contributing to the new average
"""
old_sum = old_avg * old_num
new_sum = new_avg * new_num
updated_sum = old_sum + new_sum
updated_num = old_num + new_num
updated_avg = updated_sum / updated_num
return updated_avg | cb470cd0e79a165c3a3beca7eb0d26898f400704 | 126,841 |
def getname(method):
"""Descriptive name for the function.
A name combining the function name and module.
Parameters
----------
method: callable
Returns
-------
name: str
"""
module = method.__module__
group = module.split(".")[-1]
variant = method.__name__
return "{group:s} ({variant:s})".format(group=group,
variant=variant) | ba7d02f762ab2e86f51746f7e1a26e978d0b1a3e | 126,842 |
def get_predict(Y, X, coef):
""" Get predictions from model.
Parameters
----------
Y : matrix
N x 1 matrix of the dependent variable.
N is the number of observations.
X : matrix
N x K matrix of the independent variables.
N is the number of observations.
K is the number of independent variables.
coef : matrix
K x 1 matrix of coefficients.
Returns
-------
(predict, resid)
predict : matrix
K x 1 matrix of predicted values.
resid : matrix
K x 1 matrix of residuals.
"""
predict = X.dot(coef)
resid = Y - predict
return(predict, resid) | 1a1fd18e2e1036ab2d42d75c8b4acce73a8ab6a4 | 126,843 |
def split_comments(line, comment_char=';'):
"""
Splits `line` at the first occurence of `comment_char`.
Parameters
----------
line: str
comment_char: str
Returns
-------
tuple[str, str]
`line` before and after `comment_char`, respectively. If `line` does
not contain `comment_char`, the second element will be an empty string.
"""
split = line.split(comment_char, 1)
data = split[0].strip()
if len(split) == 1:
return data, ''
else:
return data, split[1].strip() | 8c87e8576e097d34299251c8e41a871ae81cc4fa | 126,849 |
import json
def load_json(filename: str):
"""Loads a json file"""
with open(filename, encoding="utf-8", mode="r") as file:
data = json.load(file)
return data | ac3412d5afe0c81e0acbe288cb164a864171420d | 126,853 |
def within_bounds(pixel_x, pixel_y, bounds):
"""Check whether x and y pixel coordinates are within bounds"""
return (pixel_y < bounds[0]) and (pixel_x < bounds[1]) and (pixel_y > 0) and (pixel_x > 0) | b4aeb47719bba6bafce4bab74eafb84048d1226a | 126,856 |
import re
def camel_to_snake( s, separator='_' ):
"""
Converts camel to snake case
>>> camel_to_snake('CamelCase')
'camel_case'
>>> camel_to_snake('Camel_Case')
'camel_case'
>>> camel_to_snake('camelCase')
'camel_case'
>>> camel_to_snake('USA')
'usa'
>>> camel_to_snake('TeamUSA')
'team_usa'
>>> camel_to_snake('Team_USA')
'team_usa'
>>> camel_to_snake('R2D2')
'r2_d2'
>>> camel_to_snake('ToilPre310Box',separator='-')
'toil-pre-310-box'
>>> camel_to_snake('Toil310Box',separator='-')
'toil-310-box'
"""
s = re.sub( '([a-z0-9])([A-Z])', r'\1%s\2' % separator, s )
s = re.sub( '([a-z])([A-Z0-9])', r'\1%s\2' % separator, s )
return s.lower( ) | 11d64a27f851819b449c8f292fd65bf077eb227c | 126,862 |
def multiply_and_round(num: float, factor: float = 100, precision: int = 2) -> float:
"""
Takes a floating point value (presumably one between 0 and 1), multiplies it with a given factor (default 100)
and rounds it with the given precision.
:param num: number to multiply and round
:param factor: multiplying factor (default = 100, to create percentages)
:param precision: rounding precision
:return: product rounded with precision
"""
return round(num * factor, precision) | 00f9a4a1a4bb1a232507e54c443f21f78c5c12a1 | 126,863 |
def filter_cmd(cmd):
"""Remove some non-technical option strings from `cmd`, which is assumed to
be a string containing a shell command.
"""
cmd = cmd.replace(' -outputname /dev/null', '')
cmd = cmd.replace(' -q', '')
return cmd | bffec1f998801707977a8df351663d064b9133d1 | 126,864 |
def check_cell(y, x, field):
"""Checks whether at a given position there is a living cell."""
if -1 < x < len(field[0]) and -1 < y < len(field):
return bool(field[y][x])
else:
return False | d834dbf944de8b011e0a4ee27113eeddb548b812 | 126,866 |
import torch
def align_coordinates(boxes):
"""Align coordinates (x1,y1) < (x2,y2) to work with torchvision `box_iou` op
Arguments:
boxes (Tensor[N,4])
Returns:
boxes (Tensor[N,4]): aligned box coordinates
"""
x1y1 = torch.min(boxes[:,:2,],boxes[:, 2:])
x2y2 = torch.max(boxes[:,:2,],boxes[:, 2:])
boxes = torch.cat([x1y1,x2y2],dim=1)
return boxes | 7f783b467ad3494d8d18fe9c692e38b00bc74dde | 126,868 |
def is_valid_positive_float(in_val):
"""Validates the floating point inputs
Args:
in_val (string): The string to check
Returns:
bool: True if the string can be converted to a valid float
"""
try:
_ = float(in_val)
except:
return False
if float(in_val) < 0.0:
return False
return True | acafdd5b073de82902e19338d465b21d695e4c20 | 126,870 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.