content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import json
def read_json(filename):
"""Read JSON file and store to a variable.
Args:
filename (str): file name (input)
Returns:
(dict): data in json format
Example:
>>> import log
>>> data = log.read_json("test.json")
"""
with open(filename) as j:
return json.load(j) | ecb869fa23dc4d515cc58b705e85b412a279b47c | 114,135 |
def general_constructor(loader, tag_suffix, node):
"""
Just a string flattener for custonm yaml tags such as the ones that occur in cfn files
:param loader: the ruamal.yaml instance
:param tag_suffix: the tag name
:param node: the tag node from the loaded yaml file
:return: t\just the simple node value as a string
:rtype: string
"""
return node.value | f3bf3f7beec43df70fefbb367d17773871ec9fc9 | 114,148 |
def _to_int(x: int) -> int:
"""Convert a signed completion and error code to the proper value.
This function is necessary because the VISA specification is flawed: It defines
the VISA codes, which have a value less than zero, in their internal 32-bit
signed integer representation. However, this is positive. ctypes doesn't
care about that and (correctly) returns the negative value, which is left as
such by Python.
Parameters
----------
x : int
Value in 32-bit notation as listed in the VPP-4.3.2 specification
Returns
-------
int
Properly signed value
"""
if x > 0x7FFFFFFF:
return int(x - 0x100000000)
else:
return int(x) | 6af30858ec1e74ab1ebffc254bb9bdd4b8e1fc56 | 114,152 |
def reduce_dam(grand_proc, new_class):
""" Creates a collapsed main use category that lumps dams
listed as Recreation, Fisheries, and Other into a generic "Other expanded"
class.
Parameters
-----------
grand_proc : geodataframe
Processed geodataframe to add new column
new_class : string
Name of new dam main use column to be created
Returns
--------
grand_proc :geodataframe
The geodataframe with the added newly created column
"""
grand_proc[new_class]=grand_proc['MAIN_USE']
grand_proc.loc[(grand_proc['MAIN_USE'] == 'Recreation') |
(grand_proc['MAIN_USE'] == 'Fisheries') |
(grand_proc['MAIN_USE'] == 'Other'), 'MAIN_RED'] = 'Other expanded'
return grand_proc | 6c8a170f18f902fec330bb6c9b5b3ee15207491a | 114,156 |
def combine_posts(posts):
"""
Function to combine posts to single text
:param posts: all posts by a user
:return: (String) combined text
"""
combined_text = '.'.join(posts)
return combined_text | 3ecf3a3c53fdcb00ba3a03fb8a47487728206c8e | 114,157 |
def read_file(filename):
"""Function reads contents of file
Args:
filename (str): Filename to read
Returns:
list: List with lines of text in file
"""
with open(filename, "r", encoding="utf-8") as fp:
return fp.readlines() | 23924d0c86712b14be7d3cf0c1faec186f8cc726 | 114,169 |
import math
def exp_pdf(value, rate):
"""
Returns the probability density for an exponential distribution
with an intensity of rate, evaluated at value.
"""
return float(rate) * math.exp(-1.0 * rate * value) | a50625b216fdbd492c7aff725f965b9085a3201a | 114,172 |
import struct
def read_body_array(packet_body):
"""
Reads array formatted struct packet body. On error returns None.
:param packet_body: Body of packet
:return: Array of items
"""
size_fmt = '!i'
string_fmt = '!%ds'
body_array = []
body_len = len(packet_body)
offset = 0
if body_len <= 0:
return None
try:
while offset < body_len:
# Read size of item
item_size = struct.unpack_from(size_fmt, packet_body, offset)[0]
offset += 4
# Read string
item_string = struct.unpack_from(string_fmt % item_size, packet_body, offset)[0]
body_array.append(item_string)
offset += item_size
return body_array
except (struct.error, TypeError):
return None | 857a92972d898056b513c3561fb4103c6e844d22 | 114,176 |
import hmac
import hashlib
import struct
def bin_pbkdf2_hmac(hashname, password, salt, rounds, dklen=None):
"""Password-Based Key Derivation Function (PBKDF) 2"""
h = hmac.new(key=password, digestmod=lambda d=b'': hashlib.new(hashname, d))
dklen = h.digest_size if dklen is None else dklen
def prf(data):
hm = h.copy()
hm.update(data)
return bytearray(hm.digest())
key = bytearray()
i = 1
while len(key) < dklen:
T = U = prf(salt + struct.pack('>i', i))
for _ in range(rounds - 1):
U = prf(U)
T = bytearray(x ^ y for x, y in zip(T, U))
key += T
i += 1
return bytes(key[:dklen]) | 7db500e413151e9086e5e790118a924cac67a71d | 114,177 |
def get_max(dic):
"""
Given a dictionary of keys with related values, return the entry with the highest value
Example: A scoreboard of players and their scores.
"""
if len(dic.items()) == 0:
return None
max_item, max_value = list(dic.items())[0]
for elem in dic:
if dic[elem] > max_value:
max_value = dic[elem]
max_item = elem
return { max_item: max_value } | ede4ec0c6f0f080b7ad1af894e7ba32f236a8ab2 | 114,181 |
import calendar
def _timestamp_ms_for_datetime(datetime_obj):
"""Returns time since the epoch in ms for the given UTC datetime object."""
return (
int(calendar.timegm(datetime_obj.timetuple()) * 1000) +
datetime_obj.microsecond / 1000) | c406c69a1868a529ade0fe7a2df9a9bdad6b7329 | 114,182 |
def splitList(content, cut):
"""splits an array in two.
arguments :
`content` : (ndarray/list)
the array to split in two.
`cut` : (float) in [0:1]
the ratio by which we will split the array.
"""
c = int(len(content) * cut)
return (content[:c], content[c:]) | 1fc4a0d52f906ef69789d640b17837dc1a6fcd12 | 114,183 |
def _float_almost_equal(float1, float2, places=7):
"""Return True if two numbers are equal up to the
specified number of "places" after the decimal point.
"""
if round(abs(float2 - float1), places) == 0:
return True
return False | d5956ad80867585576dda433f5f4b9da5f9284d1 | 114,187 |
def unique(sequence):
"""
Return a list of unique items found in sequence. Preserve the original
sequence order.
For example:
>>> unique([1, 5, 3, 5])
[1, 5, 3]
"""
deduped = []
for item in sequence:
if item not in deduped:
deduped.append(item)
return deduped | f2beed1ee28515642b617971a32b8caab920aad5 | 114,188 |
import torch
def calc_batch_accuracy(output: torch.Tensor,
target: torch.Tensor) -> float:
"""Calculates accuracy for a batch.
Args:
output: Output predictions of the network on one-hot encoding.
target: Targets for the predictions.
"""
oa = output.argmax(1) # output argmax
correct = (oa == target).sum()
return float(correct) / float(target.shape[0]) | d73387903c30ecfa2c464a17766fa73ec2e5ac3d | 114,189 |
import asyncio
def create_task(coro, loop):
# pragma: no cover
"""Compatibility wrapper for the loop.create_task() call introduced in
3.4.2."""
if hasattr(loop, 'create_task'):
return loop.create_task(coro)
return asyncio.Task(coro, loop=loop) | 8f41d15f1d7a9394b5e9ad7bb31aca83f81999d3 | 114,192 |
def get_subclasses(klass):
"""Return list of all subclasses of `klass`.
Also recurses into subclasses.
"""
subclasses = []
for cls in klass.__subclasses__():
subclasses.append(cls)
subclasses += get_subclasses(cls)
return subclasses | 7bf82a53f4b90aa11dcf5a3765eaf8b107ab0111 | 114,197 |
def is_leap_year(year: str) -> bool:
"""
Helper function used to determine if a string is a leap year or not.
:param year: The year to check.
:return: True if a leap year, false otherwise.
"""
if int(year) % 4 == 0:
if int(year) % 100 == 0:
if int(year) % 400 == 0:
return True
return False
return True
return False | 8da6d2a940eb60512d4c4395a32d1c164a6a8f7a | 114,198 |
def arrival_filter(row):
""" Copy arrival time from arrival if missing """
if not row["arrival_time"] and row["departure_time"]:
row["arrival_time"] = row["departure_time"]
return row | 108661d107e540ccd7a4ac1325abfff4fb6f8505 | 114,199 |
def sentence_tokenize(sentence, spec_chars='.,;!?'):
""" prepossess sentence, all as lower characters and remove special chars
:param str sentence:
:return [str]:
>>> s = 'Hi there, how are you?'
>>> sentence_tokenize(s)
['hi', 'there', 'how', 'are', 'you']
"""
for char in spec_chars:
sentence = sentence.replace(char, ' ')
tokens = sentence.lower().split()
return tokens | fc11961e340f518eec1b0213d50fab4946895c7c | 114,203 |
import torch
def real_to_complex(X):
"""A version of X that's complex (i.e., last dimension is 2).
Parameters:
X: (...) tensor
Return:
X_complex: (..., 2) tensor
"""
return torch.stack((X, torch.zeros_like(X)), dim=-1) | b789724d3866987e9270e12da27c99ed9131da23 | 114,205 |
import json
def process_pair(path):
"""
Reading a json file with a pair of graphs.
:param path: Path to a JSON file.
:return data: Dictionary with data.
"""
data = json.load(open(path))
return data | da9c7daa0747b41b303fad9f027d5a30f019e799 | 114,208 |
def binary_search(a, k):
"""
Do a binary search in an array of objects ordered by '.key'
returns the largest index for which: a[i].key <= k
like c++: a.upperbound(k)--
"""
first, last = 0, len(a)
while first < last:
mid = (first + last) >> 1
if k < a[mid].key:
last = mid
else:
first = mid + 1
return first - 1 | 1a6477f0774470d2bf2025525f769f6a4b48a060 | 114,209 |
def season_id_to_int(season_id):
"""
Util to convert a season_id to an int.
"""
return int(season_id[:4]) | 5b77bc655db6ce32e27990a48dc6145fd57e2298 | 114,210 |
def get_mean_degree_for_edges(edge_index, degree):
"""Finds the mean degree for each edge. This is done by finding the mean of the degree at each end of the edges.
Parameters
----------
edge_index : array
A 2 dimensional array, containing the node indexes on either side of each edge.
degree : array
The degree for each node in the graph.
Returns
-------
mean_degree : array
The mean degree for each edge.
"""
index1, index2 = edge_index[0], edge_index[1]
mean_degree = 0.5 * (degree[index1] + degree[index2])
return mean_degree | 4a07fd0ec06e789bf1b06209f5f2b0a4c57f43aa | 114,211 |
def monkey_patch_override_instance_method(instance):
"""
Override an instance method with a new version of the same name. The
original method implementation is made available within the override method
as `_original_<METHOD_NAME>`.
"""
def perform_override(override_fn):
fn_name = override_fn.__name__
original_fn_name = '_original_' + fn_name
# Override instance method, if it hasn't already been done
if not hasattr(instance, original_fn_name):
original_fn = getattr(instance, fn_name)
setattr(instance, original_fn_name, original_fn)
bound_override_fn = override_fn.__get__(instance)
setattr(instance, fn_name, bound_override_fn)
return perform_override | 013bbcff43dc0b66ba78c3519ef1370be5650467 | 114,213 |
def checkpoint_test_loss(filename):
"""
Reads the checkpoint test loss off of the filename
and returns it. Returns float('inf') if not possible
"""
par = filename.split('-')
if len(par) < 5:
return float('inf')
try:
loss = par[-4][1:]
loss = float(loss)
except:
return float('inf')
return loss | 26c4ded10382c7f11b57fd14cd47a806f01199d0 | 114,215 |
def isprimeF(n: int, b: int) -> bool:
"""True if n is prime or a Fermat pseudoprime to base b."""
return pow(b, n - 1, n) == 1 | 15d85be3790a0d3c11f62845b1ec1e4ffc04c088 | 114,217 |
def cross2d(v0, v1):
"""Cross product for 2D vectors. Right corner from 3D cross product."""
return v0[0] * v1[1] - v0[1] * v1[0] | abddbc1f460fba13ec1c1e9a7219d7b0a0b92db4 | 114,221 |
def extract_star_names(file):
"""Extracts star names from a text file
Arguments:
file {str} -- the path to the text file
Returns:
star_names {list} -- a list of star_names from text file
"""
names = open(file, 'r')
star_names = [line[:-1] for line in names.readlines()]
return star_names | 182a7d101da130b0035d8d18a7ff0b9ae142e4d6 | 114,222 |
import torch
def select_topk(prob_tensor: torch.Tensor, topk: int = 1, dim: int = 1) -> torch.Tensor:
"""
Convert a probability tensor to binary by selecting top-k highest entries.
Args:
prob_tensor: dense tensor of shape ``[..., C, ...]``, where ``C`` is in the
position defined by the ``dim`` argument
topk: number of highest entries to turn into 1s
dim: dimension on which to compare entries
Output:
A binary tensor of the same shape as the input tensor of type torch.int32
Example:
>>> x = torch.tensor([[1.1, 2.0, 3.0], [2.0, 1.0, 0.5]])
>>> select_topk(x, topk=2)
tensor([[0, 1, 1],
[1, 1, 0]], dtype=torch.int32)
"""
zeros = torch.zeros_like(prob_tensor)
topk_tensor = zeros.scatter(1, prob_tensor.topk(k=topk, dim=dim).indices, 1.0)
return topk_tensor.int() | a6a12693bef6bc795fbc83617dc82d1d6e7ce0c3 | 114,224 |
def display_fav_sub() -> str:
"""
081
Ask the user to type in their favourite school subject. Display it
with “-” after each letter, e.g. S-p-a-n-i-s-h-.
"""
fav_sub = input("Enter your favorite subject: ")
[print(s, end="-") for s in fav_sub]
return "" | 3975ac8f3c3a5a21d379ab03631d091886902a18 | 114,225 |
import math
def dms(deg):
"""Convert degree to degree-minute-second
If deg is negative, all values of tuple are negative or 0.
"""
seconds = deg * 3600
d = seconds / 3600
m = math.fmod(seconds, 3600) / 60
s = math.fmod(seconds, 60)
return (int(d), int(m), s) | 8c880d77bab007f103062eb77de421decb42d15b | 114,231 |
import random
def generate_nested_sequence(length, min_seglen=5, max_seglen=10):
"""Generate low-high-low sequence, with indexes of the first/last high/middle elements"""
# Low (1-5) vs. High (6-10)
seq_before = [(random.randint(1,5)) for x in range(random.randint(min_seglen, max_seglen))]
seq_during = [(random.randint(6,10)) for x in range(random.randint(min_seglen, max_seglen))]
seq_after = [random.randint(1,5) for x in range(random.randint(min_seglen, max_seglen))]
seq = seq_before + seq_during + seq_after
# Pad it up to max len with 0's
seq = seq + ([0] * (length - len(seq)))
return [seq, len(seq_before), len(seq_before) + len(seq_during)-1] | f99b3becad6a30daf77eb4b231178a945fb6ec1e | 114,236 |
def order_field(field):
"""
Sets 'admin_order_field' attribute (this attribute is used by list_display).
"""
def decorator(func):
func.admin_order_field = field
return func
return decorator | 1b084705e529ac4d603ffdf6e7b52016517e7ae9 | 114,237 |
def dict_to_rank_order(vote_dict):
"""
Convert a vote of the form {candidate: rank, ...} (1 best, 2 second-best, etc.)
to [{set of candidates 1}, {set of candidates 2, ...}] (best first)
:param vote_dict: vote as dictionary
:return: vote as ranked list of tied sets
"""
inverse_dict = dict()
for cand, rank in vote_dict.items():
inverse_dict.setdefault(rank, set()).add(cand)
return [inverse_dict[r] for r in sorted(inverse_dict)] | 1e943fec4184c4d2facde58c2bcb3bbb9df07b11 | 114,239 |
def state_to_index(grid_cells, state_bounds, state):
"""Transforms the state into the index of the nearest grid.
Args:
grid_cells (tuple of ints): where the ith value is the number of
grid_cells for ith dimension of state
state_bounds (list of tuples): where ith tuple contains the min and
max value in that order of ith dimension
state (np.ndarray): state to discretize
Returns:
state discretized into appropriate grid_cells
"""
index = []
for i in range(len(state)):
lower_bound = state_bounds[i][0]
upper_bound = state_bounds[i][1]
if state[i] <= lower_bound:
index.append(0)
elif state[i] >= upper_bound:
index.append(grid_cells[i] - 1)
else:
index.append(
int(((state[i] - lower_bound) * grid_cells[i]) //
(upper_bound-lower_bound))
)
return tuple(index) | ab5eb2138b49d15141b5bdc8936be17910990809 | 114,240 |
def _oid_key(qcl):
"""Build oid key from qualified class name."""
return 'oid(%s)' % qcl | 676ea052c86c948ceff6fed898943db73a43858f | 114,244 |
def getWordIdx(token, word2Idx):
"""Returns from the word2Idex table the word index for a given token"""
if token in word2Idx:
return word2Idx[token]
elif token.lower() in word2Idx:
return word2Idx[token.lower()]
return word2Idx["UNKNOWN_TOKEN"] | e8a0d84d9cc8e06b9a1947f5e3c634767805002c | 114,248 |
import six
def _resolve_name(val):
"""Takes an object or a name and returns the name."""
return val if isinstance(val, six.string_types) else val.name | d9459a218757f41f05f49a21285111868e72cfad | 114,251 |
def __get_beta(beta, eta):
"""
Get and check the beta argument. The argument can be None (which then uses 0.4/eta) or a single
value. The value must be positive and less than 1/eta.
"""
if beta is None: return 0.4/eta
if beta <= 0 or beta >= 1/eta: raise ValueError('beta')
return beta | c9e654297d392bc271b6596bc41e509200f62c4a | 114,252 |
import requests
def upload_via_lewla(url_to_upload):
"""Upload video via https://lew.la"""
site_url = "https://lew.la/reddit/download"
response = requests.post(site_url, data={
'url': url_to_upload
})
uploaded_link = f"https://lew.la/reddit/clips/{response.text}.mp4"
return uploaded_link | 947a19030da8ab2f45927094dc5f128bba55a72f | 114,253 |
def get_lines_as_list(filename, to_del='\n'):
"""Returns all the lines of the file in a list"""
with open(filename, "r") as f:
text = f.readlines()
f.close()
lines = list([])
for i in range(len(text)):
lines.append(text[i].rstrip(to_del))
return lines | 00d068194b0bea42da2c5ea2fe3e87cfb67a08d8 | 114,254 |
def lfff_name(lt):
"""Create mch-filename for icon ctrl run for given leadtime.
Args:
lt (int): leadtime
Returns:
str: filename of icon output simulation in netcdf, following mch-convention
"""
hour = int(lt) % 24
day = (int(lt) - hour) // 24
remaining_s = round((lt - int(lt)) * 3600)
sec = int(remaining_s % 60)
mm = int((remaining_s - sec) / 60)
return f"lfff{day:02}{hour:02}{mm:02}{sec:02}.nc" | 8148cc4fc6d415363ffebb2a35913fc91e0a38eb | 114,255 |
def binary_recursive_fibonacci(n: int) -> int:
"""
Returns the nth Fibonacci number.
This function implements a recursion based on the formulas below:
- F(2k+1) = F(k)**2 + F(k+1)**2
- F(2k) = F(k+1) * (2F(k+1) -F(k))
>>> [binary_recursive_fibonacci(i) for i in range(20)]
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181]
"""
if n == 0:
return 0
if n == 1:
return 1
if n == 2:
return 1
if n == 3:
return 2
k, rem = divmod(n, 2)
tmp1 = binary_recursive_fibonacci(k + 1)
tmp2 = binary_recursive_fibonacci(k)
if rem == 1:
return tmp1 ** 2 + tmp2 ** 2
else:
return tmp2 * (2 * tmp1 - tmp2) | ddc859876cc67eb2b6cb36c3fdb4aa9a16ca41fe | 114,262 |
def decode_IP(IP_address):
"""
Returns a long int for an IPv4 or IPv6 address.
@param IP_address : like "192.168.1.101"
@type IP_address : str of decimal separated ints
@return: int
"""
parts = IP_address.split('.')
if len(parts) == 4 or len(parts) == 6:
ipvalue = 0
for i in range(len(parts)):
shift = 8*(len(parts)-1-i)
ipvalue += int(parts[i]) << shift
return ipvalue
else:
raise RuntimeError("Invalid IP address: %s" % IP_address) | 23e2a7d236337ce7121e92a7541c4394ae32abab | 114,263 |
def extract_coordinates(user_information):
"""
Extracts the turker's coordinates: (latitude, longitude)
Args:
user_information: dict with the user information of the turkers (returned from @extract_information_per_turker
Returns:
locations: latitude, longitude) pairs
"""
coordinates = []
for turker_id in user_information.keys():
latitude = user_information[turker_id]['latitude']
longitude = user_information[turker_id]['longitude']
coordinates.append((latitude, longitude))
return coordinates | fd1e2ecd18712b1a34e4f836253fb9174fac8952 | 114,265 |
def mix(x, y, a):
"""Performs a linear interpolation between `x` and `y` using
`a` to weight between them. The return value is computed as
:math:`x\times a + (1-a)\times y`.
The arguments can be scalars or :class:`~taichi.Matrix`,
as long as the operation can be performed.
This function is similar to the `mix` function in GLSL.
Args:
x (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specify
the start of the range in which to interpolate.
y (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specify
the end of the range in which to interpolate.
a (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specify
the weight to use to interpolate between x and y.
Returns:
(:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): The linear
interpolation of `x` and `y` by weight `a`.
Example::
>>> x = ti.Vector([1, 1, 1])
>>> y = ti.Vector([2, 2, 2])
>>> a = ti.Vector([1, 0, 0])
>>> ti.mix(x, y, a)
[2, 1, ]
>>> x = ti.Matrix([[1, 2], [2, 3]], ti.f32)
>>> y = ti.Matrix([[3, 5], [4, 5]], ti.f32)
>>> a = 0.5
>>> ti.mix(x, y, a)
[[2.0, 3.5], [3.0, 4.0]]
"""
return x * (1.0 - a) + y * a | 5885a1b4ad3d465af0dc3e8bdb099f114f440471 | 114,268 |
def normalize_index(i):
"""Ensure 0 <= i < 2."""
return max(0, min(2, i)) | aa70e9e5e704bdebfa7f5f43c4ed6ad32b8e29ee | 114,274 |
import pickle
def load_model(filename):
"""
Function to load an HMM model from a pickle file.
:param filename: full path or just file name where to save the variable
:type filename: str
:return: the trained HMM that was in the file
:rtype: object
"""
with open(filename, 'rb') as f:
model = pickle.load(f)
return model | 6a8c3dc590da4f299a17f1e3ffc7378e97be95ba | 114,276 |
def getBasesLinear(cls, stop_at=object):
"""Return a list of the linear tree of base classes of a given class."""
bases = [cls]
next_base = cls.__bases__[0]
while next_base != stop_at:
bases.append(next_base)
next_base = next_base.__bases__[0]
bases.append(next_base)
return bases | f49a191ddc6b49e0835f5b03d5420e0297212666 | 114,279 |
def createXYlabels(x, y, xlabel, xunits, ylabel, yunits):
"""
Checks that x and y labels are appropriate and tries to make some if they are not.
"""
if x is None:
if xlabel is None: xlabel='i'
if xunits is None: xunits=''
else:
if xlabel is None: xlabel='Longitude'
#if xunits is None: xunits=u'\u00B0E'
if xunits is None: xunits=r'$\degree$E'
if y is None:
if ylabel is None: ylabel='j'
if yunits is None: yunits=''
else:
if ylabel is None: ylabel='Latitude'
#if yunits is None: yunits=u'\u00B0N'
if yunits is None: yunits=r'$\degree$N'
return xlabel, xunits, ylabel, yunits | 05df9c06d03fefc25a8998cc411eab2e5158b0ac | 114,283 |
def V_0(phi_0, k, l, om, f):
"""Meridional velocity amplitude. Wavenumber and frequency should be in
angular units."""
return ((l*om - 1j*k*f)/(om**2 - f**2))*phi_0 | 5a0f6dc8d5db41c20249f5c215824aa0cec523de | 114,284 |
from bs4 import BeautifulSoup
def parse_html(html):
""" Parse html string with BeautifulSoup. """
return BeautifulSoup(html, 'html.parser') | cad4da5607dfce8a0b4b41155d283f945a322f7a | 114,285 |
def render_with_errors(bound_field):
"""
Usage: {{ field|render_with_errors }} as opposed to {{ field }}.
If the field (a BoundField instance) has errors on it, and the associated widget implements
a render_with_errors method, call that; otherwise, call the regular widget rendering mechanism.
"""
widget = bound_field.field.widget
if bound_field.errors and hasattr(widget, "render_with_errors"):
return widget.render_with_errors(
bound_field.html_name,
bound_field.value(),
attrs={"id": bound_field.auto_id},
errors=bound_field.errors,
)
else:
return bound_field.as_widget() | d1e5cffabac9834c5cde5c94227a2510baf1768d | 114,287 |
def replace_all(text, dict):
"""Replace all using dictionary of find:replace"""
for k, v in dict.items():
text = text.replace(k, v)
return text | b209a1c9c67ec9017c38a9bc953f46659db26437 | 114,289 |
def group_bases(dna_seq, step=2):
"""
Group the DNA base from a sequence into groups by length "step"
:return: a list of groups
"""
bases_groups = []
for i in range(0, len(dna_seq), step):
bases_groups.append(dna_seq[i:i + step])
return bases_groups | 579f440c7660cd5a8f1347e74848f4710fec745e | 114,292 |
def is_parent(t, s):
"""Whether t is s's parent."""
return t is s.parent | 4bab879707814f537c71a84e40b6dbeb94fd0c4a | 114,294 |
def yes_or_no(prompt: str) -> bool:
"""Get a yes or no answer from the CLI user."""
yes = ("y", "yes")
no = ("n", "no")
try:
while True:
choice = input(prompt + " [{}/{}]: ".format(yes[0], no[0])).strip().lower()
if choice in yes:
return True
if choice in no:
return False
print(
"Please respond with {} or {}".format(
"/".join("'{}'".format(y) for y in yes), "/".join("'{}'".format(n) for n in no)
)
)
except KeyboardInterrupt:
# Add a newline to mimic a return when sending normal inputs.
print()
return False | d1d8fdba55f71c7e72a65d72e6fa8335f228221e | 114,295 |
import yaml
def read_config_file(config_file_path):
"""Read configuration file in YAML
Args:
config_file_path (str): the path to the configuration file
Returns:
dict: The dictionary with file contents
"""
with open(config_file_path, 'r') as config_file:
config = yaml.full_load(config_file)
return config | ef8bab82521da44f72ee8ccce875852eaf0ebb56 | 114,297 |
from typing import Optional
def int_append_int(int_a: int, int_b: int, l_shift_a: int = 0, bits_oi_b: Optional[int] = None) -> int:
"""
Shift a (leading 1 of a) x left and append bits_oi_b b
:param int_a: integer
:param int_b: integer to append
:param l_shift_a: left shift a
:param bits_oi_b: bits of interest to append to a of b
:return: interger
"""
bits_oi_b = bits_oi_b if bits_oi_b is not None else l_shift_a
return int_a << l_shift_a | (int_b & 2 ** bits_oi_b - 1) | bc86eec06ae7434a9903bab12cf990c67b851533 | 114,305 |
import random
def _random_tracking_rating() -> int:
"""
Return a random tracking rating.
"""
return random.randint(0, 5) | d2827dbdaafa256854ffe3419e996b85141cffcd | 114,306 |
import pytz
def get_tz(tz):
"""
Pass a string and return a pytz timezone object.
This function can also infer the timezone from the city, if it is a substring of the timezone. e.g. 'Dubai' will return 'Asia/Dubai.'
"""
try:
t_z = pytz.timezone(tz)
except pytz.UnknownTimeZoneError:
print("Searching for TZ...")
zones = pytz.common_timezones
for z in list(zones):
if (tz in z) and (tz not in list(zones)):
tz = z
print(f"Timezone: {z}")
t_z = pytz.timezone(tz)
return t_z | 476f135ad76bbdcacd38acb1eb67e52bd2a59273 | 114,310 |
def get_url(ticker, interval):
"""Sets the URL
Args:
ticker (str): Ticker Symbol
interval (str): Time interval
Returns:
str: The final URL for the API
"""
# Get the final API url
alpha_api = 'O1C7ECLZQODUYN6D'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY'
outputsize = 'full'
return url + '&symbol=' + ticker + '&outputsize=' + outputsize + \
'&interval=' + interval + '&apikey=' + alpha_api | 861044ee2922cf1420639e5ada1e73583d5b1539 | 114,314 |
def get_sampling_weights(alternatives, sampling_weights_col=None):
"""
Returns sampling weights for the alternative set.
Parameters:
-----------
alternatives: pandas.DataFrame
Data frame of alternatives to sample from.
sampling_weights_col: str
Name of column containing sampling weights.
Returns:
--------
pandas.Series, None if `sampling_weights_col` is None
"""
if sampling_weights_col is None:
return None
return alternatives[sampling_weights_col] | d5f87ed2bc0173fec0d965ef9204bfa7de34eb74 | 114,316 |
def get_zero_measure_date(dates, start_date):
"""
Given a start date returns the date of the measure closest in the past.
dates: list of sorted datetetime.dates
start_date: datetime.date
"""
null_date = min(dates)
for date in dates:
if date <= start_date:
null_date = date
return null_date | 772a68f01ed05c47fd36fd09bdcd5fbbe225808c | 114,317 |
import bz2
import pickle
def save_dataset_dump(input_space, labels, feature_names, label_names, filename=None):
"""Saves a compressed data set dump
Parameters
----------
input_space: array-like of array-likes
Input space array-like of input feature vectors
labels: array-like of binary label vectors
Array-like of labels assigned to each input vector, as a binary
indicator vector (i.e. if 5th position has value 1
then the input vector has label no. 5)
feature_names: array-like,optional
names of features
label_names: array-like, optional
names of labels
filename : str, optional
Path to dump file, if without .bz2, the .bz2 extension will be
appended.
"""
data = {'X': input_space, 'y': labels, 'features': feature_names, 'labels': label_names}
if filename is not None:
if filename[-4:] != '.bz2':
filename += ".bz2"
with bz2.BZ2File(filename, "wb") as file_handle:
pickle.dump(data, file_handle)
else:
return data | 9278016145cba2c49a45a97154158d49d6f95c66 | 114,318 |
from datetime import datetime
def text_to_datetime(dt):
"""Convert text date and time to datetime format then put in ISO8601 format."""
converted = datetime.strptime(dt, "%Y-%m-%d %H:%M:%S")
reformat_iso = datetime.strftime(converted, "%Y-%m-%dT%H:%M:%S")
return reformat_iso | 5d7e9c2a744e4b56d815987cd5110fccd0476cc8 | 114,320 |
def convert_adc_samples(unit='ADC Counts'):
"""
Provides the conversion factor to go from raw ADC samples or Streamer counts
to a voltage output at the SQUID in Volts.
Parameters
----------
unit : string ['ADC Counts', 'Streamer'], default='ADC Counts'
Starting unit. 'ADC Counts' to convert from adc units such as those
from get_fast_samples. 'Streamer' to convert from the units used in the
demodulator streamer channels (ledgerman, parser, get_samples, etc)
Returns
-------
float
Value in volts at the SQUID Output.
"""
if not unit.upper()=='ADC COUNTS':
prefactor = 1/128. # This came from a demod streamer -- between the
# 16-vs-24 bits scaling and the presence of the
# mixer, this is a factor of 128 scaling
else:
prefactor = 1.
gain_1st_stage = 16 # On SQCB (topology: 1+G1/G2)
gain_2nd_stage = 4 # On SQCB (inverting topology: G1/G2)
gain_3rd_stage = 4 # On Mezz, with 50ohm on SQCB and 50ohn on Mezz (inverting topology: G1/G2)
gain_4th_stage = 0.5 # Passive network between last amplifier and ADC chip
adc_bits = 16 # SQUID has P-P voltage of 2 Volts. AROCs are in Peak Amplitudes
vsq_per_aroc = ( (1./ (gain_1st_stage*gain_2nd_stage*gain_3rd_stage*gain_4th_stage))
/(2**(adc_bits-1)) )
return vsq_per_aroc*prefactor | 55d06862cecfafe24d96f47b8ea43ff447514fa7 | 114,326 |
def value(dictionary, key):
""" Utility function to be used as jinja filter, to ease extraction of values from dictionaries,
which is sometimes necessary.
Args:
dictionary (dict): The dictionary from which a value is to be extracted
key (str): Key corresponding to the value to be extracted
Returns:
any: The value stored in the key
"""
return dictionary[key] | 808ab9d473b4d801b96d1a71694ec3110286e1e9 | 114,329 |
import math
def jump_search(arr: list, x: int) -> int:
"""
Pure Python implementation of the jump search algorithm.
Examples:
>>> jump_search([0, 1, 2, 3, 4, 5], 3)
3
>>> jump_search([-5, -2, -1], -1)
2
>>> jump_search([0, 5, 10, 20], 8)
-1
>>> jump_search([0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610], 55)
10
"""
"""edge cases"""
if len(arr) == 0:
return -1
if arr[0] > x:
return -1
if arr[len(arr) - 1] < x:
return -1
targetIndex = -1
length = len(arr) - 1
lowerBound = 0
upperBound = arr[length]
block = math.floor(math.sqrt(x))
for index in range(0, length, block):
if arr[index] < x:
lowerBound = index
else:
upperBound = index
break
for index in range(lowerBound, upperBound, 1):
if arr[index] == x:
targetIndex = index
break
return targetIndex | b0e73ccf02fdc179d0d7560713e6b6012c3f3ea5 | 114,331 |
def _tree_query_radius_parallel_helper(tree, *args, **kwargs):
"""Helper for the Parallel calls in RadiusNeighborsMixin.radius_neighbors.
The Cython method tree.query_radius is not directly picklable by
cloudpickle under PyPy.
"""
return tree.query_radius(*args, **kwargs) | 283d0a6dee0ae42986a74190e0e1d5397cad0336 | 114,334 |
def generate_grid(obj, size):
"""Returns an array of x,y pairs representing all the coordinates in a
square (size * size) grid centered around a RoomPosition object"""
pos = obj.pos or obj
# set boundaries to respect room position limits
left, right = max(0, pos.x - size), min(49, pos.x + size) + 1
top, bottom = max(0, pos.y - size), min(49, pos.y + size) + 1
return [[x, y] for x in range(left, right) for y in range(top, bottom)] | 50024640d5b59320d594ec4bfccb241657294e01 | 114,336 |
import math
def get_equidistant_circle_points(r, num_points=8):
"""Gets equidistant points on a circle."""
points = []
for index in range(num_points):
points.append([r*math.cos((index*2*math.pi)/num_points),
r*math.sin((index*2*math.pi)/num_points)])
return points | 23049c31d030ba4ff2480e17997d2be14e89093f | 114,343 |
import types
def _needs_add_docstring(obj):
"""
Returns true if the only way to set the docstring of `obj` from python is
via add_docstring.
This function errs on the side of being overly conservative.
"""
Py_TPFLAGS_HEAPTYPE = 1 << 9
if isinstance(obj, (types.FunctionType, types.MethodType, property)):
return False
if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE:
return False
return True | 7994ffb2a11e1eb8681556c138251c1ce21f617a | 114,347 |
def _none_to_neg_1(x):
""" Swaps None to -1 for torch tensor compatibility. """
if x is None:
return -1
else:
return x | 3498c3d5457933f4ccdfbcd366fe4f45ee276a1d | 114,348 |
def _get_units(target, av_corr, flux_cal={}):
"""Determine the amplitude units of a target.
Parameters
----------
target : :class:`katpoint.Target`
target whose amplitude units are to be obtained
av_corr : dict
dict with model flux densities, keyed by target.name + '_model'
flux_cal : dict, optional
dict of flux calibrated targets
Returns
-------
str : units
"""
# targets with these tags are implicitly flux-calibrated
# by the pipeline if a flux model is provided
implicit_tags = ['bpcal', 'bfcal']
implicit_gain = any([t in target.tags for t in implicit_tags])
model_key = target.name + '_model'
is_model = model_key in av_corr
is_flux = target.name in flux_cal.keys()
if (implicit_gain and is_model) or is_flux:
units = 'Jy'
else:
units = 'Counts'
return units | 915cf6dca33eb6e9feac3e8258d3e936e8b35bb0 | 114,349 |
from typing import Tuple
import re
def split_house_number(house_number: str) -> Tuple[int, str]:
"""Splits house_number into a numerical and a remainder part."""
match = re.search(r"^([0-9]*)([^0-9].*|)$", house_number)
if not match: # pragma: no cover
return (0, '')
number = 0
try:
number = int(match.group(1))
except ValueError:
pass
return (number, match.group(2)) | add35ac5ff489b8e5387826bbdb82f113366ba3f | 114,350 |
def get_endpoint(query):
"""
Generates the endpoint for Salesforce given the passed in SOQL query.
"""
return '/services/data/v51.0/query/?q=' + query | a3b95df6b431943b1ec3a1f7cd2f9541077e2485 | 114,355 |
from pathlib import Path
def example_repo_path(root_path: str) -> str:
"""Fixture that returns the path of the example feature repository"""
return str(Path(root_path) / "example_feature_repo") | 939714b1403ddbbca2b78458368b35bda647193a | 114,356 |
import re
def read_joblist(filelist):
"""
Read all the files in the filelist, strip all the comments, and return
the list of test jobs. Each test job is identified by a string.
"""
joblist = []
for filename in filelist:
fp = open(filename,"r")
lines = fp.readlines()
fp.close()
for line in lines:
mobj = re.search("#",line)
if mobj:
# Remove everything after a "#" character
line = line[:mobj.start()]
# Delete all leading and trailing whitespace
line = line.strip()
# If there is anything left add it to the joblist
if len(line) > 0:
joblist.append(line)
return joblist | 8a90b1251f2bd8c05fd29ad8cef9ee6b8b529595 | 114,360 |
def logistic_map(x, r=4.-1./32):
"""Logistic map r*x*(1-x)"""
return r * x * ( 1. - x ) | b972a07f0fbe3bd8b4826ce7b18211b3be03f085 | 114,362 |
def achromatic_lightness_correlate(Q):
"""
Returns the *achromatic Lightness* correlate :math:`L_p^\star`.
Parameters
----------
Q : numeric
Achromatic response :math:`Q`.
Returns
-------
numeric
*Achromatic Lightness* correlate :math:`L_p^\star`.
Examples
--------
>>> Q = -0.000117024294955
>>> achromatic_lightness_correlate(Q) # doctest: +ELLIPSIS
49.9998829...
"""
return Q + 50 | 2389f09ea724f27a828b5dbeacf0e07fb3104aca | 114,367 |
def checkGenbankFile(filin):
"""Check GenBank annotation file given by user"""
line = filin.readline()
# Test 'LOCUS'
if line[0:5] != 'LOCUS':
return 1
else:
return 0 | 4de58bd938b1ca7f721c3ace23ec9a18f7d38118 | 114,376 |
import re
def dot_escape(s):
"""Return s in a form suitable for dot"""
s = re.sub(r'([^a-zA-Z0-9" ])', r"\\\1", s)
return s | 89d4650d18c2078dc4238c90d8cd928c12af2e69 | 114,378 |
def match(parsed_spec, dt):
"""
Returns true if parsed_spec would trigger on the datetime dt
"""
# dt.weekday() of monday is 0
return (
dt.minute in parsed_spec.minute and
dt.hour in parsed_spec.hour and
dt.day in parsed_spec.dom and
dt.month in parsed_spec.month and
dt.weekday()+1 in parsed_spec.dow
) | 7f2ecfe0f407857f6b39d8d6397c7526ca56fdbe | 114,383 |
def check_binary(check_number):
"""
function that checks if the input consists of either 0 or 1.
As the binary numbers can only be 0 or 1, so we need to check this
before doing any calculations on it.
:param check_number: the number entered by the user
:return: True/False
"""
check_list = [int(i) for i in (sorted(set(list(str(check_number)))))]
print(f'\nchecking {check_number}')
for number in check_list:
if number not in [0, 1]:
# print(f'invalid binary number')
return False
return True | 01e7668c29685905f3b9b50c77867a22d1e16664 | 114,386 |
def write_kr_titl_string(trs):
"""
write_kr_titl_string(trs)
Write a title string for the list of lists of k-ratios and
uncertainties output from the compKRs function.
Input
-----
trs The transition set used to compute the K-ratios
Return
------
A string with the title
"""
titl = "["
for tr in trs:
trName = tr.toString().split("-")[0]
titl += "["
titl += trName + ".mu," + trName + ".unc],"
titl += "]"
titl = titl.replace("],]", "]]")
titl = titl.replace(" ", "")
titl = titl.replace("All", "K") # for light element when all there is is K
titl = titl.replace(",", ", ") # add spaces in title at the end
return(titl) | 7fb50796d28c33a889beba6cc651989dcc82ab78 | 114,388 |
def _category_errmsg(particle, require, exclude, any_of, funcname) -> str:
"""
Return an appropriate error message for when a particle does not
meet the required categorical specifications.
"""
category_errmsg = (
f"The particle {particle} does not meet the required "
f"classification criteria to be a valid input to {funcname}. "
)
errmsg_table = [
(require, "must belong to all"),
(any_of, "must belong to any"),
(exclude, "cannot belong to any"),
]
for condition, phrase in errmsg_table:
if condition:
category_errmsg += (
f"The particle {phrase} of the following categories: " f"{condition}. "
)
return category_errmsg | 18369b6fee758daa27800a03846d901dfe0b0b90 | 114,391 |
from typing import Optional
from typing import Union
from typing import List
from typing import Any
def get_key_str(
dictionary: dict,
key: Optional[Union[str, List[str]]],
) -> Any:
"""
Takes value from dict by key.
Args:
dictionary: dict
key: key
Returns:
value
"""
return dictionary[key] | e2e2e6852c58a1bc44425b2d1f58c5a5a01d0b42 | 114,397 |
def map_resource_instance_type(data_type: str, default_instance_definition: str) -> str:
"""
in the C++ Client, instances can be SINGLE instanced, MULTIPLE Instanced or PTR.
SINGLE or MULTIPLE are both LwM2M standards and thus can be extracted from a standard
xml Document and passed to as the `default_instance_definition`.
The PTR Instance Type is only available inside our custom C++ lwm2m client,
it helps optimize read-only strings and pointers to executables. it's determined by the `data_type` argument.
:param data_type: the data type string returned by the function `map_to_cpp_data_type()`
:param default_instance_definition: the default string value of the MultipleInstances tag from a standard xml doc
:return: the C++ defined instance type string
:rtype: str
"""
# for executables and pointer (i.e char*) data types, use the C++ defined PTR Instance Type.
if data_type == "executable" or "*" in data_type:
return "PTR"
return default_instance_definition | 8756dc06b9e3b8f3b54a4d123c74f79ff0144a33 | 114,399 |
def factors_of(number: int) -> list[int]:
"""
Factor an integer into a list where the product of the list is equal to the original number.
"""
return [i for i in range(1, number + 1) if number % i == 0] | 5d4564f13799fdbb83c094f3528e2be467919444 | 114,401 |
def _determine_output(iteration) -> str:
"""Determine what the output of each iteration is based on its divisibility."""
fizz = iteration % 3 == 0
buzz = iteration % 5 == 0
if fizz and buzz:
output = 'FizzBuzz'
elif fizz:
output = 'Fizz'
elif buzz:
output = 'Buzz'
else:
output = str(iteration)
return output | e00779e9daad9cf288afa07ec79f32da1ab1c12a | 114,402 |
def format_code(code):
"""
Formats some code with line numbers
Parameters
----------
code : str
Code
Returns
-------
str
Code prefixed with line numbers
"""
lines = ['']
lines.extend(["%-5d %s" % (i, l) for i, l
in enumerate(code.split('\n'), 1)])
return '\n'.join(lines) | 4294f61f202a28fecf8aed211dadbf3efd45d267 | 114,403 |
import json
def artists(sjsonfile, index):
"""Generates a string with a list of artists in a given index in a sjson.
Args:
sjsonfile (string): Entire sjson from Spotify
index (int): Index of the track from which will be extracted the artists
Returns:
string: A string containing a list of the song's artists separated by a space
"""
artists = ""
for art in json.loads(sjsonfile)[index]['track']['artists']:
artists += " " + art['name']
return artists | dc3faaf737f4c8191631d932f9d67310c6f2b64d | 114,404 |
def remove_managed_variant(store, managed_variant_id):
"""Remove a managed variant."""
removed_variant = store.delete_managed_variant(managed_variant_id)
return removed_variant | b6e4af712b276d30d7b27ba0210316132fcf8ba4 | 114,409 |
def get_language(languages, language_id):
"""Retrieve a language from the given language list with the given ID"""
for language in languages:
if language['id'] == language_id:
return language
return None | 991591f8c20b3bcab650931bf4c510d91876f45d | 114,410 |
def iso_string_to_sql_utcdate_sqlite(x: str) -> str:
"""
Provides SQLite SQL to convert a column to a ``DATE`` in UTC. The argument
``x`` is the SQL expression to be converted (such as a column name).
"""
return f"DATE({x})" | a664b0d34a791c8b54eed416d1e9efd6dbe891a7 | 114,411 |
import functools
import operator
def get_in(keys, coll, default=None):
""" Reaches into nested associative data structures. Returns the value for path ``keys``.
If the path doesn't exist returns ``default``.
>>> transaction = {'name': 'Alice',
... 'purchase': {'items': ['Apple', 'Orange'],
... 'costs': [0.50, 1.25]},
... 'credit card': '5555-1234-1234-1234'}
>>> get_in(['purchase', 'items', 0], transaction)
'Apple'
>>> get_in(['name'], transaction)
'Alice'
>>> get_in(['purchase', 'total'], transaction)
>>> get_in(['purchase', 'items', 'apple'], transaction)
>>> get_in(['purchase', 'items', 10], transaction)
>>> get_in(['purchase', 'total'], transaction, 0)
0
"""
try:
return functools.reduce(operator.getitem, keys, coll)
except (KeyError, IndexError, TypeError):
return default | 3d82585e1873930b12fa74fecb547e4b7e28ca9b | 114,412 |
def generate_traefik_path_labels(url_path, segment=None, priority=2,
redirect=True):
"""Generates a traefik path url with necessary redirects
:url_path: path that should be used for the site
:segment: Optional traefik segment when using multiple rules
:priority: Priority of frontend rule
:redirect: Redirect to path with trailing slash
:returns: list of labels for traefik
"""
label_list = []
# check segment
segment = f'.{segment}' if segment is not None else ''
# fill list
label_list.append(f'traefik{segment}.frontend.priority={priority}')
if redirect:
label_list.append(
f'traefik{segment}.frontend.redirect.regex=^(.*)/{url_path}$$')
label_list.append(
f'traefik{segment}.frontend.redirect.replacement=$$1/{url_path}/')
label_list.append(
f'traefik{segment}.frontend.rule=PathPrefix:/{url_path};'
f'ReplacePathRegex:^/{url_path}/(.*) /$$1')
else:
label_list.append(
f'traefik{segment}.frontend.rule=PathPrefix:/{url_path}')
return label_list | 424a358b32832d03ab5fe9531a12722327c671e9 | 114,413 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.