content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def column_convertor(x):
"""
Converts 1d array to column vector
"""
x.shape = (1, x.shape[0])
return x | 7f9af6e42c36e7ebeee2fb53e4730ee36cc6cfd0 | 92,411 |
def format_number(num, digits=0):
""" Give long numbers an SI prefix. """
formatstring = '{{:.{}f}}{{}}'.format(digits)
prefixes = [(1e24, 'Y'), (1e21, 'Z'), (1e18, 'E'), (1e15, 'P'),
(1e12, 'T'), (1e9, 'G'), (1e6, 'M'), (1e3, 'k'), (1, '')]
for magnitude, label in prefixes:
if num >= magnitude:
return formatstring.format(num / magnitude, label) | 36ec538af61a5c2d0e06ecc836402221bf2d4427 | 92,419 |
def parse_curl_error(proc_stderr):
"""Report curl failure.
Args:
proc_stderr (str): Stderr returned from curl command.
Returns:
str: Reason for curl failure.
"""
curl_err = ""
if isinstance(proc_stderr, bytes):
proc_stderr = proc_stderr.decode("utf-8")
try:
curl_err = proc_stderr.rstrip("\n")
curl_err = curl_err.split(None, 2)[2]
except IndexError:
pass
return curl_err | ed7fba8424b00e98a96b898160207047390c3baf | 92,421 |
def ApplyCompression(params):
"""Returns a bool indicating whether compression library is to be used."""
return not params.apply_pruning and params.pruning_hparams_dict is not None | 7fa7da6d6d5952bf1bef791e4ebe2f0f0c2bbcd7 | 92,423 |
def _get_filename(dataset_name, config):
"""Get a filename based on the configuration used for model and dataset"""
return dataset_name + '_{e}_epochs_{h}_{nl}_layers_{t}_timesteps'.format(
e=config['epochs'], h=config['model_config']['hidden_size'],
nl=config['model_config']['n_layers'], t=config['sequence_length']
) | 1c0fdbf17c29cee441e17aa8f670f7d6c1e01610 | 92,425 |
def _get_kwargs(**kwargs):
"""
Returns dictionary of relevant training parameters from **kwargs
Args:
**kwargs: key word arguments
Returns:
dict: relevant relevant kwargs, else default values
"""
return {
'model': kwargs.get('model'),
'train_ds': kwargs.get('train_ds'),
'eval_ds': kwargs.get('eval_ds'),
'epochs': kwargs.get('epochs', 100),
'batch_size': kwargs.get('batch_size', 32),
'valid_size': kwargs.get('valid_size', 0.2),
'patience': kwargs.get('patience', 32),
'lr_decay': kwargs.get('lr_decay', 0.0),
'lr': kwargs.get('lr', 0.001),
'beta_1': kwargs.get('beta_1', 0.9),
'beta_2': kwargs.get('beta_2', 0.999),
'eps': kwargs.get('eps', 1e-08),
'weight_decay': kwargs.get('weight_decay', 0.0),
'hidden_dim': kwargs.get('hidden_dim', 128),
'n_hidden': kwargs.get('n_hidden', 2),
'dropout': kwargs.get('dropout', 0.0),
'amsgrad': kwargs.get('amsgrad', False)
} | c3210f4cc49c450c90404dc062cec5ba38c6b848 | 92,428 |
import warnings
def deprecated(message=None):
"""
Return a decorator that wraps a function with the given warning
message.
"""
def mk_deprecated_wrapper(function):
msg = (message
if message is not None
else f'`{function.__module__}.{function.__name__}` '
'is deprecated')
def wrap_deprecated(*args, **kwds):
warnings.warn(DeprecationWarning(msg), stacklevel=2)
return function(*args, **kwds)
return wrap_deprecated
return mk_deprecated_wrapper | d009c90a9ad462bfa0023a1055f44e93abf46981 | 92,431 |
import struct
def unpack(msg):
"""
Unpack a garmin device communication packet.
uint16_t=pid, uint16_t=length, data[]
"""
pid, length = struct.unpack("<HH", msg[:4])
data = msg[4:4 + length]
return pid, length, data | 1c9d11ac42e75e12e8a5424871c369cb05bb400c | 92,438 |
def format_dict_str(record):
"""
Transform all dict values into strings
"""
for key, value in record.items():
record[key] = str(value)
return record | 8602f708e6e8bbd56d636ceb9023f58e80b114c6 | 92,440 |
def guess_casing(s: str) -> str:
"""Guess snake case or Pascal case."""
if "_" in s:
return "snake"
if any(c.isupper() for c in s):
return "pascal"
return "snake" | d7059efde571bd4e9f4b5ec8a1686fd4b5c81ab1 | 92,441 |
def merge_shortcuts_dicts(dicts_list):
"""Merge all shortcut dictionaries in a given list."""
_super_dict = {}
for _dict in dicts_list:
for k in _dict.keys():
_super_dict[k] = _dict[k]
return _super_dict | 467046f271f8d3e72789da70202903e95b6c2893 | 92,448 |
from typing import List
def build_pagination_list(
current_page: int,
max_page: int,
num_end_pages: int = 3,
) -> List[str]:
"""
Builds a list of numbers for pagination, potentially including the first /
last few pages.
Returns:
A list of ranges for the current_page, along with the the end pages if specified,
broken up by ellipsis if they are not within 1 of each other.
For example:
>>> build_pagination_list(current_page=7, max_pages=12, num_end_pages=3)
[1,2,3,"...",6,7,8,"...",10,11,12]
"""
last_added = 0
pages = []
for page in range(1, max_page + 1):
is_end_page = page <= num_end_pages or page > max_page - num_end_pages
in_current_range = current_page - 1 <= page <= current_page + 1
if is_end_page or in_current_range:
if page != last_added + 1:
pages.append("...")
pages.append(str(page))
last_added = page
return pages | cb8470606fd47968fdf43e02b6f3fa2be23cfae4 | 92,449 |
def format_datetime(obj):
"""Format a datetime object."""
return obj.strftime("%Y-%m-%d %H:%M") | add04e6337799f625d3a0a1cbb809523c5dfc68f | 92,453 |
def read_until(steg_bytes: bytes, offset: int, ending: str):
"""
Read the bytes of the steg_bytes from the offset until the ending byte sequence is found.
Return the bytes read and the offset of the ending byte sequence.
"""
# Create a variable to hold the bytes read
bytes_read = b""
# Loop through the steg_bytes
while offset < len(steg_bytes):
# Check if the current byte is the ending byte sequence
if steg_bytes[offset:offset + len(ending)] == ending.encode():
# Return the bytes read and the offset of the ending byte sequence
return bytes_read, offset
# Read the next byte
bytes_read += steg_bytes[offset:offset + 1]
offset += 1 | 9056a5053425121e214402d2eae8fba5eebbd33f | 92,455 |
from typing import List
from typing import Optional
def _get_next(next_urls: List[str]) -> Optional[str]:
"""
Extract the next URL from the list of candidates
:param List[str] next_urls: List of URLs to check
:rtype: Optional[str]
:return: A URL or None if invalid
"""
if len(next_urls) < 1:
return None
next_url = next_urls[0]
if next_url is None:
return None
next_url = next_url.strip()
if next_url.startswith('/'):
return next_url
return None | e6d0e4dad03acca2a184ef245169246a82f0c8ed | 92,459 |
def style(style_type, content, title=None, summary=None):
"""Android/Amazon style builder.
:keyword style_type: String. Must be one of "big_text", "big_picture",
or "inbox".
:keyword content: String or array of strings. Content of the style object.
If style_type is set to "inbox", this will be an array of strings.
Otherwise, it will be a single string.
:keyword title: Optional string. Override the notification.
:keyword summary: Optional string. Override the summary of the
notification.
"""
mapping = {"big_text": "big_text", "big_picture": "big_picture", "inbox": "lines"}
if style_type not in mapping.keys():
raise ValueError(
"style_type must be one of {}.".format(", ".join(mapping.keys()))
)
payload = {
"type": style_type,
mapping[style_type]: content,
"title": title,
"summary": summary,
}
return {key: val for key, val in iter(payload.items()) if val is not None} | cc8af3155013abecf695476898b6f91962e62435 | 92,463 |
def list_order_by(l,firstItems):
"""given a list and a list of items to be first, return the list in the
same order except that it begins with each of the first items."""
l=list(l)
for item in firstItems[::-1]: #backwards
if item in l:
l.remove(item)
l.insert(0,item)
return l | 2179a18af22924dc021d86d94437318ce225f045 | 92,464 |
def pre_id_to_post(pre_id):
"""
Convert a synset id of the format n88888888 to 88888888-n
"""
return "{}-{}".format(pre_id[1:], pre_id[0]) | f2c5610f1e494e78ba8140000116c6b609e23187 | 92,465 |
import random
def space() -> str:
"""90% chance of ' ', 10% of unicode-only space."""
return random.choice([" "] * 90 + ["\u2007"] * 10) | 3ff9e4139b25bd74cc217b95c95f81e2945de6c3 | 92,472 |
def DUMMY(_workflow, view):
"""Never takes any action."""
return {v: None for v in view} | 2db51a13a82f0e0eff405369d2e4563ae5a6a252 | 92,475 |
def build_filter_query(key, values):
"""Create a text query that matches a union of all values for a key
build_filter_query("foo", ["x", "y"])
=> foo = |("x"c, "y"c)
build_filter_query("~#foo", ["1"])
=> #(foo = 1)
"""
if not values:
return u""
if key.startswith("~#"):
nheader = key[2:]
queries = ["#(%s = %s)" % (nheader, i) for i in values]
if len(queries) > 1:
return u"|(%s)" % ", ".join(queries)
else:
return queries[0]
else:
text = ", ".join(
["'%s'c" % v.replace("\\", "\\\\").replace("'", "\\'")
for v in values])
if len(values) == 1:
return u"%s = %s" % (key, text)
else:
return u"%s = |(%s)" % (key, text) | d98269c36be0e8879340a6a649f75eec1fbee4e4 | 92,478 |
def escape(text: str) -> str:
"""
Replaces the following chars in `text` ('&' with '&', '<' with '<' and '>' with '>').
:param text: the text to escape
:return: the escaped text
"""
chars = {"&": "&", "<": "<", ">": ">"}
for old, new in chars.items(): text = text.replace(old, new)
return text | 8e8b19436b4a5c478d955588cde0ef977136505b | 92,480 |
def get_evidence_for_item(search_results, item):
"""
Get evidence fields with positive scores for a particular search result
item.
"""
if search_results.get('evidence', None) is None:
return []
item_evidence = []
for i, score in enumerate(item['scores']):
if score > 0:
evidence = search_results['evidence'][i]
evidence['score'] = score
item_evidence.append(evidence)
return item_evidence | 133b3af4595a8e184968e5742b8c053302bb0f80 | 92,482 |
def _pad_plot_frame(ax, pad=0.01):
"""
Provides padding on sides of frame equal to pad fraction of plot
"""
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
xr = xmax - xmin
yr = ymax - ymin
ax.set_xlim(xmin - xr*pad, xmax + xr*pad)
ax.set_ylim(ymin - yr*pad, ymax + yr*pad)
return ax | 20b0800d6cd3eaf90572f434287cc0e191f0597e | 92,483 |
def replace(group):
"""
takes in a pandas group, and replaces the
null value with the mean of the none null
values of the same group
"""
mask = group.isnull()
group[mask] = group[~mask].mean()
return group | 97b0cab2ff247d689a58a6717a07b5716fcc2cd2 | 92,484 |
def support(model,state,formula):
"""Returns True iff the state in the model supports the proposition expressed by the formula"""
return state in formula.getSupportSet(model) | b37dda7cdc8e9b22c65e85d0478e4eb0ce7da2e4 | 92,486 |
import math
def sine_easein(pos):
"""
Easing function for animations: Sine Ease In
"""
return math.sin((pos - 1) * math.pi / 2) + 1 | 098da42954942955bc5ed78c1818a625a3ae5d88 | 92,490 |
def get_closest_point(x0, y0, a, b, c):
"""
Returns closest point from x0,y0 to
ax + by + c = 0
"""
x = (b * (b * x0 - a * y0) - a * c) / (a ** 2 + b ** 2)
y = (a * (-b * x0 + a * y0) - b * c) / (a ** 2 + b ** 2)
return x, y | 9b004df6e28905cad09e42a14643e48b69d11e30 | 92,496 |
def print_list(label, array):
"""
Pretty print an array of strings with a given label prefix.
"""
list = ""
if array:
array.sort()
list = ", ".join(array)
print("%s: %s" % (label, list))
return True | 4752cdb622cf64dedb2a6e05946e9cf3ddce74ca | 92,503 |
def db_list_maker(info):
"""
Creates a list from a tuple of lists.
:param info: Tuple of Lists
:return: List
"""
answer = []
for i in info:
answer.append(i[0])
return answer | 6f0ee8d31c5c0977bc7ab7a1d5c1f5f7576c0b4f | 92,504 |
import math
def angle_rad(coord, origin=(0.0, 0.0)):
""" Absolute angle (radians) of coordinate with respect to origin"""
return math.atan2(coord[1] - origin[1], coord[0] - origin[0]) | 34b35f945c4c226c4f64aa4584286116c206b872 | 92,521 |
def vector_inverse(v):
"""
Computes inverse vector
:param v: Vector
:return:
"""
return [-n for n in v] | 3e688666116d35e885ba8372b99f679a9443f824 | 92,523 |
import requests
def get_tor_session(port):
"""
The python requests lib does not work nativly with tor, so we need to
tell it to connect to the tor proxy.
Heavily influenced by: https://stackoverflow.com/a/33875657/5843840
param: port : port which tor is listening on (often 9050, 9051, or 9151)
returns: a requests.session object.
"""
proxies = {
'http': 'socks5h://127.0.0.1:{}'.format(port)
}
session = requests.Session()
session.proxies = proxies
return session | 8f4eb2c6bae872d5df83b038ac30132e64560a67 | 92,526 |
import unicodedata
def normalize(s, allow_nonascii=True):
"""Applies Unicode normalization and optionally strips all non-ASCII characters."""
if (allow_nonascii):
return unicodedata.normalize('NFKC', s)
else:
return unicodedata.normalize('NFKD', s).encode('ascii', 'ignore').decode('ascii') | 7b04ec04a10aad183b688d60a20e6d773f3f6902 | 92,529 |
from typing import Sequence
def lcsubstring(A, B, traceback=True):
"""
Computes the longest common, contiguous substring in both
sequences `A`, `B`. The elements in `A` and `B` must be comparable
with these comparison functions:
https://docs.python.org/3/reference/datamodel.html#object.__lt__
:param A
The first sequence of elements.
:param B
The second sequence of elements.
:param traceback = True
Whether to gather a list of all longest substrings in the
resulting computation.
:return
if traceback:
(<list:substrings>, <int:longest_size>)
else:
<int:longest_size>
"""
# verification
if not isinstance(A, Sequence) or not isinstance(B, Sequence):
raise ValueError("Sequences A, B must be of Sequence type")
La = len(A)
Lb = len(B)
# maxlen, table & stack
maxlen = 0
W = [ [ 0 for _ in range(Lb + 1) ] for _ in range(La + 1) ]
S = []
for i, a in enumerate(A):
Ti = i + 1
for j, b in enumerate(B):
Tj = j + 1
if a == b:
k = W[Ti-1][Tj-1]
W[Ti][Tj] = k + 1
if k + 1 >= maxlen:
S.append((Ti, Tj))
maxlen = k + 1
def _traceback(Ti, Tj):
nonlocal W, S, A
result = []
while W[Ti][Tj] > 0:
result.insert(0, A[Ti-1])
Ti -= 1
Tj -= 1
return result
if traceback:
traceback = []
for pos in reversed(S):
Ti, Tj = pos
if W[Ti][Tj] < maxlen:
break
traceback.append(_traceback(Ti, Tj))
return traceback, maxlen
return maxlen | 77cde389d6aadbef1b7cb6326b092258909d7bc7 | 92,534 |
def sparse_dot(full_matrix, sparse_matrix):
"""
Convenience function to compute the dot product of a full matrix and a sparse matrix.
Useful to avoid writing code with a lot of transposes.
:param full_matrix: dense matrix
:type full_matrix: ndarray
:param sparse_matrix: sparse matrix
:type sparse_matrix: csc_matrix
:return: full_matrix.dot(sparse_matrix)
:rtype: ndarray
"""
return sparse_matrix.T.dot(full_matrix.T).T | 78c1c0b9083e85c84f072f35e72ca6b3229a4971 | 92,536 |
def error_max_retry(num_retries, err_string):
"""error_max_retry message"""
return "Unable to retrieve artifact after {} retries: {}".format(num_retries, err_string) | 16fd360db6e25fe5b4ce7c34b145c2325e52cd19 | 92,537 |
def get_restaurants(restaurants, category=None):
"""
This function takes a list of dictionaries as an argument and returns a list of strings that includes restaurants' names
Parameters:
restaurants (list): A list of dictionaries, each representing a restaurant
category (list): A list of strings containing different categories of restaurants
Returns:
restaurants_names (list): A list containing the restaurants' names
"""
restaurants_names = []
for restaurant in restaurants:
if category:
if restaurant['Category'] in category:
restaurants_names.append(restaurant['Name'])
else:
restaurants_names.append(restaurant['Name'])
return restaurants_names | af61eaec4a0bc632921f030eb6779e5b8f4caac1 | 92,538 |
def remove_multiple_elements_from_list(a_list, indices_to_be_removed):
"""
remove list elements according to a list of indices to be removed from that list
:param a_list: list
list to be processed
:param indices_to_be_removed: list
list of the elements that are no longer needed
"""
return [a_list[i] for i in range(len(a_list)) if i not in indices_to_be_removed] | e8fc60facc50688efb083d7a4988fbcce5691890 | 92,542 |
def divide_vector(vector: list, number: float) -> list:
"""Divides a vector."""
divided_vector = [value / number for value in vector]
return divided_vector | 5ef5e75cbcec875d924562d5fe6a1e4b8111108d | 92,544 |
def generate_sess_end_map(sess_end, sessId, time):
"""
Generate map recording the session end time.
:param sess_end: the map recording session end time, a dictionary see_end[sessId]=end_time
:param sessId:session Id of new action
:param time:time of new action
:return: sess_end: the map recording session end time, a dictionary see_end[sessId]=end_time
"""
if sessId in sess_end:
sess_end[sessId] = max(time, sess_end[sessId])
else:
sess_end[sessId] = time
return sess_end | 838055441b6eb1188b58758593c4752cb219b3c8 | 92,545 |
def get_options(value):
"""Returns all options for the given option group."""
return value.option_set.all() | 50d030128054a0380bd24a26a02c9a9e85a289a4 | 92,546 |
def get_nm(ltag):
"""Return the value of the NM tag."""
for tag in ltag:
if tag[0] == "NM":
return tag[1]
return None | ddba897379fc1b82e76c0c7ae8f153434a416be2 | 92,547 |
def subsetindex(full,subset):
"""
Get the indices of the subset of a list.
"""
if isinstance(subset,str):subset=[subset]
idx=[]
for s in subset:
idx += [i for i, x in enumerate(full) if x == s]
return idx | 1d8edf2dc270755bbf831aed1539395623c2acd8 | 92,552 |
import requests
def api_query(data_code, params):
"""
Retrieves a query from the World Bank Data API.
Args:
data_code: (str) World Bank reference code for data source.
params: (dict) Query parameters.
Returns:
json object.
"""
link = 'https://api.worldbank.org/v2/en/country/all/indicator/'
r = requests.get(link + data_code, params=params)
return r.json() | b6ba224c614c0110a0c48e9d637dd65255cefa2b | 92,556 |
from datetime import datetime
def unix_to_human_time(timestamp):
"""
Returns a human readable string of the unix timestamp provided.
:param timestamp: Unix timestamp.
"""
return datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S') | c48d7f157088813eab197bb136169b4cbe7958c8 | 92,559 |
def get_pecha_num_list(pechas):
"""Extract all the pecha numbers from `catalog_data.
Args:
pechas (list): list of pecha metatdata in list.
Returns:
list: list of pecha numbers.
"""
return [int(pecha[0][2:8]) for pecha in pechas] | 8e5ddac28b1c31e281b3584e1ea21d45917343e0 | 92,560 |
def goto_column(column_num):
"""To go to a certain column.
This function uses the ``l`` command to move the cursor to
a certain column on the **current line**.
Usage:
`In a config file:`
.. code-block:: yaml
- goto_column: 5
`Using the API:`
.. code-block:: python
ezvi.tools.goto_column(5)
:type column_num: int
:param column_num: The number of the column to move the cursor to.
:rtype: str
:return: ``0`` plus a column position appended with ``l``.
"""
# This would be much cleaner if I could get the cursor's position.
to_write = "0" + str(column_num - 1) + "l"
return to_write | 49da75ff53aca53e0fffd7a7104bbe8a4a1eb259 | 92,561 |
def GetExpandedList(l, s):
"""return a list where every elment in l is duplicated s times.
"""
nl = []
for x in l:
nl += [x] * s
return nl | 2453d1e14cf41259081f53e6e69ed4391f3e3a53 | 92,564 |
def is_number(s: str) -> bool:
"""
A basic helper function because Python str methods do not
have this ability...
"""
try:
float(s)
return True
except:
return False | def37e67567b3f1811992f0ad56af464fed7d133 | 92,565 |
def get_location_coordinates_from_address(
self,
address: str,
) -> list:
"""Lookup the latitude and longitude of an address. It will return
an array of locations that match the object, in the order of the
locations that match the address the best. So if you're looking for
the best match, you should use the first element in the array.
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - location
- GET
- /location/addressToLocation?address={address}
:param address: Address to find latitude and longitude coordinates,
e.g. ``3333 Scott Blvd Santa Clara, CA 95054``
:type address: string
:return: Returns list of dictionaries of location coordinates \n
[`dict`]: location object \n
* keyword **address_components** (`list`): list of
dictionaries of formatted components of address such as
street, town, county, full address etc.
* keyword **formatted_address** (`str`): Full formatted
address
* keyword **geometry** (`dict`): multiple lat/long pairs for
supplied address. There are more keywords that documented
here, but simplest to retrieve lat long is outlined. \n
* keyword **location** (`dict`): Lat/Long location \n
* keyword **lat** (`float`): Latitude coordinate
* keyword **lng** (`float`): Longitude coordinate
:rtype: list
"""
return self._get("/location/addressToLocation?address={}".format(address)) | 624100de910176487d2e09d12f66ca3ef3223f2d | 92,567 |
def keep_within_range(minimum, maximum, x):
"""
Returns the value of x limited to the range [min, max]
This means if x is less than min, min is returned.
If x is greater than max, max is returned.
"""
if x <= minimum:
return minimum
elif x >= maximum:
return maximum
else:
return x | 752c4abbb87d8f1e700d33679d2b51edff89e18e | 92,569 |
def is_monotonic(a,b,c):
"""
Determines if the three input numbers are in sequence, either low-to-high
or high-to-low (with ties acceptable).
Parameters
----------
a : float
b : float
c : float
Returns
----------
boolean
"""
# if b is equal to either a or c then the sequence is monotonic
if b == a:
return True
elif b == c:
return True
# otherwise, a and c must be on different sides of b
elif (a > b) != (c > b):
return True
else:
return False | 832c166fcd98866cfe3f16b8f2dcb64f6b9209a7 | 92,571 |
def best1bin(i, F, X, gbest):
"""
Strategy best1bin.
Mutate the best vector by adding one scaled difference vector.
"""
return gbest + F * (X[i[0]] - X[i[1]]) | 5397066b5be57496fa26ec8fa6f7805810634c31 | 92,573 |
def get_letter_count(message):
"""Returns a dictionary with keys of single letters and values of the
count of how many times they appear in the message parameter."""
letter_count = {}
for letter in message.upper(): # Uniform letters to uppercase
if letter in [' ', ',', '.']:
continue
if letter not in letter_count:
letter_count[letter] = 1
else:
letter_count[letter] += 1
return letter_count | b4281c065976aacd36030da69f80312266405560 | 92,574 |
def filterEvent(evt_src=-1, evt_type=-1, evt_value=-1, filters=()):
"""The event is processed by each and all filters in strict order
unless one of them returns None (in which case the event is discarded)
:param evt_src: (object) object that triggered the event
:param evt_type: (TaurusEventType) type of event
:param evt_value: (object) event value
:param filters: (sequence<callable>) a sequence of callables, each returning
either None (to discard the event) or the tuple (with
possibly transformed values) of
(evt_src, evt_type, evt_value)
:return: (None or tuple) The result of piping the event through the given
filters.
"""
evt = evt_src, evt_type, evt_value
for f in filters:
evt = f(*evt)
if evt is None:
return None
return evt | cdf326d9111412085c0005a25636e3b9a06cae3b | 92,575 |
def CleanWords(WordList):
"""
Pass in a wordlist and this method will return the same list of words,
with no whitespace either side of each word.
"""
return [word.strip() for word in WordList] | de2883ed7781bcfee669192e37fb83fc7ade50a7 | 92,578 |
def int_to_string(number, alphabet, padding=None):
"""
Convert a number to a string, using the given alphabet.
The output has the most significant digit first.
Stolen from shortuuid.
"""
output = ""
alpha_len = len(alphabet)
while number:
number, digit = divmod(number, alpha_len)
output += alphabet[digit]
if padding:
remainder = max(padding - len(output), 0)
output = output + alphabet[0] * remainder
return output[::-1] | 1294fc5b43b53f5cb8d7624257c76366b320bae0 | 92,581 |
from datetime import datetime
def format_date(date):
"""Formats date as m/d/yyyy."""
d = datetime.strptime(date, '%Y-%m-%d')
return d.strftime("%m/%d/%Y") | ac04e1a9e6fc8bd4834a99fe22b16e3cdae03762 | 92,585 |
def get_ecs_config_in_s3(cluster):
"""Get the name of the ECS config file in S3.
Args:
cluster
The name of the cluster it's for.
Returns:
The name of the file in S3, minus the bucket.
"""
return str(cluster) + "/ecs.config" | 8143a5c8d5988401150697d6d8195ec1ddaf7d4f | 92,589 |
def add_to_list(a_list: list, element: object):
"""
Attempts to append element to a list. If already contained,
returns 0. Else 1.
"""
if element not in a_list:
a_list.append(element)
return 1
return 0 | a6490d0cbdd9a422a491906413046bce27dd6855 | 92,590 |
def is_fin_st(id):
"""Used in p_one_line()
---
Checks if id begins with f or F or if or IF.
"""
return ( (id[0] in {'f','F'})
or ((len(id) > 1) and (id[0:2] == 'if' or id[0:2] == 'IF'))
) | 774d2a78cba33a0a87185b6e4505dc460c163f49 | 92,591 |
def npv(Rn, i, i0, pe=0):
"""Net present value (NPV) is the difference between
the present value of cash inflows and the present value
of cash outflows over a period of time.
Args:
Rn: Expected return list
i: Discount rate
i0: Initial amount invested
pe: Profit or expense at the end of investment
Returns:
Net present value
Example:
Given the expected return list `Rn`, `i` as
discount rate, and `i0` as initial amount invested
you can calcuate NPV like this:
>>> import malee
>>> malee.npv([5000, 8000, 12000, 30000], 0.05, 40000)
7065.266015703324
"""
npv_sum = 0
for idx, Ri in enumerate(Rn):
if Ri == Rn[-1] and pe != 0:
npv_sum += Ri + pe / ((1 + i) ** (idx + 1))
else:
npv_sum += Ri / ((1 + i) ** (idx + 1))
return npv_sum - i0 | 1edc20f1418bb8fec48639d3f3f478ce1949dbe1 | 92,594 |
def find_empty(board):
"""
This function finds the empty spaces, which we represented with 0
:param board: The actual sudoku board
:return: the empty spaces
"""
# starting from [0][0], finds the place where the number is zero
for i in range(len(board)):
# b[0] means the length of each row
for j in range(len(board[0])):
if board[i][j] == 0:
# this is the position for the next valid() method
return i, j # row and col,
return None | 922627383b2b9936a3c8d899144ec46e19617ef4 | 92,598 |
def to_numpy(trials, space):
"""Convert trials in DataFrame to Numpy array of (params + objective)"""
return trials[list(space.keys()) + ["objective"]].to_numpy() | 0009c0977c805644cb9009cca741f11ea34522a2 | 92,599 |
def is_a_pooling_layer_label(layer_label):
""" Returns true if a pooling layer. """
return 'pool' in layer_label | bcc4a82e15a9addf2ad9f73b02f6984786ab8a1f | 92,603 |
def calculate_fuel(mass):
"""Calculate the fuel required for a module
>>> calculate_fuel(12)
2
>>> calculate_fuel(14)
2
>>> calculate_fuel(1969)
654
>>> calculate_fuel(100756)
33583
"""
return mass // 3 - 2 | d2d2ae239027515fa9240c4bd59f6c7efb056d13 | 92,608 |
import re
def parse_converged_genscfman(i, data, offset=0):
""" Extract the total energy from converged SCF cycles."""
j = offset
patt = r"\s+\d+\s+(-?\d+\.\d+)\s+(\d+\.\d+[e]-\d+)\s+\d{5}\s(Convergence criterion met)"
end_string = "Timing for Total SCF:"
SCF_conv = 0
while True:
if end_string in data[i+j]:
break
match = re.search(patt, data[i+j])
if match:
SCF_conv = match.group(1)
break
j += 1
return float(SCF_conv) | acdd4895faef58ac131fa5bfc9139d282c9b4e3b | 92,612 |
from typing import Any
def binary_search(L: list, v: Any) -> int:
"""Return the index of the first occurrence of value in L, or return
-1 if value is not in L.
>>> binary_search([1, 3, 4, 4, 5, 7, 9, 10], 1)
0
>>> binary_search([1, 3, 4, 4, 5, 7, 9, 10], 4)
2
>>> binary_search([1, 3, 4, 4, 5, 7, 9, 10], 5)
4
>>> binary_search([1, 3, 4, 4, 5, 7, 9, 10], 10)
7
>>> binary_search([1, 3, 4, 4, 5, 7, 9, 10], -3)
-1
>>> binary_search([1, 3, 4, 4, 5, 7, 9, 10], 11)
-1
>>> binary_search([1, 3, 4, 4, 5, 7, 9, 10], 2)
-1
>>> binary_search([], -3)
-1
>>> binary_search([1], 1)
0
"""
# Mark the left and right indices of the unknown section.
i = 0
j = len(L) - 1
while i != j + 1:
m = (i + j) // 2
if L[m] < v:
i = m + 1
else:
j = m - 1
if 0 <= i < len(L) and L[i] == v:
return i
else:
return -1 | f844dc94aa3beab375beb051bb8a7a90baeb85fe | 92,613 |
import unicodedata
def replace_fw(text):
"""Replace full-width characters with half-width's one. Eg. `?` -> `?`
Parameters
----------
text : str
Text to be processed.
Returns
-------
str
Processed text.
"""
return unicodedata.normalize('NFKC', text) | 562f42c91c01239aef9bfa5cb70c2ed028f69b3c | 92,615 |
import random
def generate_lat_lon(country, main_dict):
"""
Generates latitude and longitude for a city in the country.
The values have a defined range of randomness near
the city (the coordinates of the city are from the main_dict).
"""
lats = []
lons = []
n_agents = main_dict[country]['number_of_agents']
print("n_agents: ", n_agents)
for i in range(n_agents):
lat = float(main_dict[country]['city_coordinates'][0])
lon = float(main_dict[country]['city_coordinates'][1])
dev = 0.25
min_lat, max_lat = lat - dev, lat + dev
min_lon, max_lon = lon - dev, lon + dev
round_to = 4
lat1 = round(random.uniform(min_lat, max_lat), round_to)
lon1 = round(random.uniform(min_lon, max_lon), round_to)
lats.append(str(lat1))
lons.append(str(lon1))
return lats, lons | 5fc92bc8868d132b8f18a9ce4e7debfdd0547f4a | 92,617 |
def details_changed(prev_details, new_details):
"""
Checks if at least one of the main details from a subscriptions
has changed.
Arguments:
prev_details - previous details of a subscription
new_details - new details of a subscription
Returns:
changed - True/False
"""
changed = False
if (
(prev_details.handout_status != new_details.handout_status)
or (prev_details.subscription_name != new_details.subscription_name)
or (
prev_details.subscription_status != new_details.subscription_status
)
or (
prev_details.subscription_expiry_date
!= new_details.subscription_expiry_date
)
or (prev_details.handout_budget != new_details.handout_budget)
or (prev_details.subscription_users != new_details.subscription_users)
):
changed = True
return changed | 051af1e009ff5a47d34cab46c488329dd50580cc | 92,618 |
def run_episode(env):
"""run_episode runs a game by executing a random policy
Parameters
----------
env : ObstacleTowerEnv
game environment with OpenAI gym wrapper
Returns
-------
float
reward earned in the episode
"""
done = False
episode_reward = 0.0
while not done:
action = env.action_space.sample()
# run one timestep of the environment's dynamics
# obs - agent's observation of the current environment
# reward - amount of reward returned from previous action
# done - whether the episode has ended
# info - auxialiary diagnostic information
obs, reward, done, info = env.step(action)
episode_reward += reward
return episode_reward | 437645fef7578489d94234c311bbe16bec5b001e | 92,620 |
def parse_excel_row(row):
"""
Parse a row in the sheet and return the data.
"""
number = row[0].value
title = row[2].value
doctype = row[1].value
return number, title, doctype | 77008ae17a6c4dd1f882c543eb7104e5ab43d454 | 92,626 |
def get_default(dictionary, attr, default):
"""
Functions like :meth:`dict.get`, except that when `attr` is in `dictionary`
and `dictionary[attr]` is `None`, it will return `default`.
Parameters
----------
dictionary: dict
attr: collections.abc.Hashable
default
Returns
-------
object
The value of `dictionary[attr]` if `attr` is in `dictionary` and
`dictionary[attr]` is not None. `default otherwise.`
"""
item = dictionary.get(attr, None)
if item is None:
item = default
return item | 17c811f69d4bd0a5063d373a89108fa3241e8dc5 | 92,628 |
def to_str(to_convert: list) -> list:
"""
Convert a string (or real value) to any alias, band or index.
You can pass the name or the value of the bands.
.. code-block:: python
>>> to_str(["NDVI", "GREEN", RED, "VH_DSPK", "SLOPE", DEM, "CLOUDS", CLOUDS])
['NDVI', 'GREEN', 'RED', 'VH_DSPK', 'SLOPE', 'DEM', 'CLOUDS', 'CLOUDS']
Args:
to_convert (list): Values to convert into str
Returns:
list: str bands
"""
if not isinstance(to_convert, list):
to_convert = [to_convert]
bands_str = []
for tc in to_convert:
if isinstance(tc, str):
band_str = tc
else:
try:
band_str = tc.name
except AttributeError:
band_str = tc.__name__
bands_str.append(band_str)
return bands_str | 112eacae7f4b7ce68b1892a700fade5757088e0a | 92,631 |
import math
def fancy(lat, lon):
"""Stringifies a point in a rather fancy way, e.g. "44°35'27.6"N
100°21'53.1"W", i.e. with arc minutes and seconds."""
# helper function as both latitude and longitude are stringified
# basically the same way
def fancy_coord(coord, pos, neg):
coord_dir = pos if coord > 0 else neg
coord_tmp = abs(coord)
coord_deg = math.floor(coord_tmp)
coord_tmp = (coord_tmp - math.floor(coord_tmp)) * 60
coord_min = math.floor(coord_tmp)
coord_sec = round((coord_tmp - math.floor(coord_tmp)) * 600) / 10
coord = f"{coord_deg}°{coord_min}'{coord_sec}\"{coord_dir}"
return coord
lat = fancy_coord(lat, "N", "S")
lon = fancy_coord(lon, "E", "W")
return f"{lat} {lon}" | 3015542c8b78f71dc9b903dafe7b3e400fb38dbf | 92,632 |
def get_server_modules(cfg):
"""
Get list of modules to add at users request.
Parameters
__________
cfg: ServerModules.cfg path for the module being added.
Returns
_______
List of module names that need to be added to the main .cfg file to be built
"""
mod_list = []
with open(cfg) as f:
for line in f.readlines():
m = line.split("#")[0].split("/")[-1].strip()
mod_list += [m] if m else []
return mod_list | e0bce3ca0f0630a11c23fd639fd13aea18ee4c9e | 92,636 |
import pickle
def loads(string):
"""
Wraps pickle.loads.
Parameters
----------
string : str
Returns
-------
object
Examples
--------
>>> from libtbx.easy_pickle import dumps, loads
>>> print loads(dumps([1, 2, 3])
[1, 2, 3]
"""
return pickle.loads(string) | c49992ade74561fa7f60472a9e27ba36c471f370 | 92,645 |
def read_uic2tag(fh, byteorder, dtype, planecount, offsetsize):
"""Read MetaMorph STK UIC2Tag from file and return as dict."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 6*planecount).reshape(planecount, 6)
return {
'ZDistance': values[:, 0] / values[:, 1],
'DateCreated': values[:, 2], # julian days
'TimeCreated': values[:, 3], # milliseconds
'DateModified': values[:, 4], # julian days
'TimeModified': values[:, 5]} | c191e6d1c0a8ef5405fdc1b35dd5d6d813b50f71 | 92,650 |
def score_function(word1, word2):
"""A score_function that computes the edit distance between word1 and word2."""
if not word1: # Fill in the condition
# BEGIN Q6
return len(word2)
# END Q6
elif not word2: # Feel free to remove or add additional cases
# BEGIN Q6
return len(word1)
# END Q6
elif word1[0] == word2[0]:
return score_function(word1[1:], word2[1:])
else:
add_char = score_function(word1, word2[1:]) # Fill in these lines
remove_char = score_function(word1[1:], word2)
substitute_char = score_function(word1[1:], word2[1:])
# BEGIN Q6
return 1 + min(add_char, remove_char, substitute_char)
# END Q6 | 5d88a008bdaae87e9541451922d0b8099d028b21 | 92,652 |
import requests
def url_exists(url):
"""Check if the URL exists."""
resp = requests.head(url)
if resp.status_code == requests.codes.found:
return url_exists(resp.headers['Location'])
return resp.status_code == requests.codes.ok | c06dea3515366478b0fd12dc186d9c7438b482b6 | 92,653 |
def clamp(value, min_value, max_value):
"""Clamps the given value between min and max"""
if value > max_value:
return max_value
if value < min_value:
return min_value
return value | c721f748d1a1cd7d40c42348d436657058fe8d21 | 92,663 |
def ts_train_test_split(df_input, n, time_colname, ts_id_colnames=None):
"""
Group data frame by time series ID and split on last n rows for each group.
:param df_input: input data frame
:param n: number of observations in the test set
:param time_colname: time column
:param ts_id_colnames: (optional) list of grain column names
:return train and test data frames
"""
if ts_id_colnames is None:
ts_id_colnames = []
ts_id_colnames_original = ts_id_colnames.copy()
if len(ts_id_colnames) == 0:
ts_id_colnames = ['Grain']
df_input[ts_id_colnames[0]] = 'dummy'
# Sort by ascending time
df_grouped = (df_input.sort_values(time_colname).groupby(ts_id_colnames, group_keys=False))
df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-n])
df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-n:])
# drop group column name if it was not originally provided
if len(ts_id_colnames_original) == 0:
df_head.drop(ts_id_colnames, axis=1, inplace=True)
df_tail.drop(ts_id_colnames, axis=1, inplace=True)
return df_head, df_tail | 54da7cb563503a53d57fe3e0adaba24da941d284 | 92,664 |
def switch_turn(match, team):
"""
Change the team who can pick.
:param match: Match object
:param team: The team who is currently picking
:return: Next team to pick
"""
# Toggle turn
team.captain.is_turn = False
# Get the other team
other = match.teams[team.id - 1]
other.captain.is_turn = True
return other | 6d487e70c0a7b700c80679f8171d1526701e50b1 | 92,670 |
def rescale(value, orig_min, orig_max, new_min, new_max):
"""
Rescales a `value` in the old range defined by
`orig_min` and `orig_max`, to the new range
`new_min` and `new_max`. Assumes that
`orig_min` <= value <= `orig_max`.
Parameters
----------
value: float, default=None
The value to be rescaled.
orig_min: float, default=None
The minimum of the original range.
orig_max: float, default=None
The minimum of the original range.
new_min: float, default=None
The minimum of the new range.
new_max: float, default=None
The minimum of the new range.
Returns
----------
new_value: float
The rescaled value.
"""
orig_span = orig_max - orig_min
new_span = new_max - new_min
try:
scaled_value = float(value - orig_min) / float(orig_span)
except ZeroDivisionError:
orig_span += 1e-6
scaled_value = float(value - orig_min) / float(orig_span)
return new_min + (scaled_value * new_span) | 427a17bfcfd28fa99000eada732e874bb6e60bcd | 92,680 |
def dec_to_bin(number: int, bits: int) -> str:
"""Converts an integer to a binary string with a certain bit length
Parameters
----------
number: int
The integer to convert
bits: int
The amount of bits that can be used
Returns
-------
str
The binary string
"""
return ('0' * bits + bin(number)[2:])[-bits:] | 581acd5145d13efac4c6d6c375630037eaee5e16 | 92,684 |
from typing import Set
def arg_to_set(arg: str) -> Set[str]:
"""
Convert given comma separated string to set.
Args:
arg (str): list as comma separated string.
Returns: Set
"""
if not isinstance(arg, str):
return set()
arg_list = arg.split(',')
return set(arg_list) | 07b2e245eb7e284c8ab4a5fc4929a97461b6a3d5 | 92,693 |
import base64
def base64_to_base32(base64_string):
"""Converts base64 string to base32 string"""
b32_string = base64.b32encode(base64.b64decode(base64_string)).decode('ascii')
return b32_string | 1bcb3a71c4c06e590a97408ecf1771109a29f1cb | 92,696 |
def get_key_ordered_dictionary_repr(dictionary, delimiter=': ',
decorator_in='{', decorator_out='}'):
"""Get a string representation of a dictionary ordered by keys.
Parameters
----------
dictionary : dictionary
Dictionary to be represented as a string.
delimiter : string
Delimiter to put between every key-value pair.
decorator_in : string
Decorator to put at the beginning of the string.
decorator_out : string
Decorator to put at the end of the string.
Returns
-------
dict_str : string
String representation of the key-ordered dictionary.
Notes
-----
In the string representation the dictionary keys are displayed ordered by
their value and properly indented, e.g.
{
'key1': value1,
'key2': value2,
}
"""
dict_str = decorator_in
for key in sorted(dictionary.keys()):
dict_str += "\n\t'{}'{}{}".format(key, delimiter, dictionary[key])
dict_str += '\n{}'.format(decorator_out)
return dict_str | 25ea171bfc59268bdfb4f590231e1c7c00b64193 | 92,698 |
def get_last(l, default=''):
""" pop from list and return default if empty list """
return l.pop() if len(l) else default | d477b41dbaf4d1d7b6f49dff4ee546b967109c96 | 92,701 |
def _convert_nested_dict_values_to_to_sets(nested_dict):
"""
Given a nested dictionary from kmer -> ID -> list of positions
convert it to a dictionary from kmer -> ID -> set of positions.
This simplifies code which wants to use these positions as a set
against which membership is repeatedly checked.
"""
return {
kmer: {
key: set(position_list)
for (key, position_list) in id_to_positions_dict.items()}
for (kmer, id_to_positions_dict)
in nested_dict.items()
} | db955fc5ab0b76d761bdb72ed41e901e3804e38c | 92,703 |
def CmpThumb(first, second):
""" Compares two thumbs in terms of file names and ids. """
if first.GetFileName() < second.GetFileName():
return -1
elif first.GetFileName() == second.GetFileName():
return first.GetId() - second.GetId()
return 1 | 37f615ed6e2d5dc90efd0679a2458a29531431fc | 92,706 |
def crit_func(test_statistic, left_cut, right_cut):
"""
A generic critical function for an interval,
with weights at the endpoints.
((test_statistic < CL) + (test_statistic > CR) +
gammaL * (test_statistic == CL) +
gammaR * (test_statistic == CR))
where (CL, gammaL) = left_cut, (CR, gammaR) = right_cut.
Parameters
----------
test_statistic : np.float
Observed value of test statistic.
left_cut : (float, float)
(CL, gammaL): left endpoint and value at exactly the left endpoint (should be in [0,1]).
right_cut : (float, float)
(CR, gammaR): right endpoint and value at exactly the right endpoint (should be in [0,1]).
Returns
-------
decision : np.float
"""
CL, gammaL = left_cut
CR, gammaR = right_cut
value = ((test_statistic < CL) + (test_statistic > CR)) * 1.
if gammaL != 0:
value += gammaL * (test_statistic == CL)
if gammaR != 0:
value += gammaR * (test_statistic == CR)
return value | 0616d81c46b01944173dee3a4b076e1c9c3f9527 | 92,712 |
import requests
def get_gbif_tax_from_id(gbifid: int):
"""
Obtaining extensive taxonomic information from the gbif API, when we have the taxonKey of the gbif backbone
Parameters
----------
gbifid: int
taxonKey (speciesKey, parentKey etc.) of the taxon
Returns
----------
Dictionary containing all the information of the taxon available in the API. Note: the information here is more complete than when we search the taxon through its name
"""
api = f"https://api.gbif.org/v1/species/{gbifid}"
response = requests.get(api)
content = response.json()
return content | 7c1a900d3e9fd40e289bd94c0f28c083feab8e4f | 92,714 |
def createFile(quant, conceal, splitLayer):
"""Creates a data file based on the desired options
"""
fileName = splitLayer+"_"
if quant!="noQuant":
fileName += f"{quant.nBits}BitQuant_"
if conceal!="noConceal":
fileName += "EC"
fileName += ".npy"
return fileName | a649c8171db24f3b7454e67009aa9c66ad90a0ae | 92,717 |
def load_astropy_angle_dataset(h_node,base_type,py_obj_type):
"""
loads astropy angle
Parameters
----------
h_node (h5py.Dataset):
the hdf5 node to load data from
base_type (bytes):
bytes string denoting base_type
py_obj_type :
final type of restored dtype
Returns
-------
resulting py_obj_type
"""
unit = h_node.attrs["unit"]
q = py_obj_type(h_node[()], unit)
return q | d21e7ed42c69e51c5c06a7281aa074c055fbe241 | 92,733 |
def starts_bilabial(text: str) -> bool:
"""
Checks if the word starts with b, m, or p.
:param text: the string to check
:return: true if the input starts with one of 'bmp'
"""
return len(text) > 0 and text.lower()[0] in "bmp" | f321dbcf1373d131e4d599f1a9bcadae015614e4 | 92,736 |
import json
def load_document_stats(filepath):
"""Loads document level stats which were collected
during index creation
"""
with open(filepath, 'r') as f:
return json.loads(f.read()) | dcc46711fb81b8cc057ff39583ac50c062c6a396 | 92,742 |
def find_axes(header):
"""Idenfities how many axes are present in a FITS file, and which is the
Faraday depth axis. Necessary for bookkeeping on cube dimensionality,
given that RM-clean only supports 3D cubes, but data may be 4D files."""
Ndim=header['NAXIS']
FD_axis=Ndim
#Check for FD axes:
for i in range(1,Ndim+1):
try:
if 'FDEP' in header['CTYPE'+str(i)].upper():
FD_axis=i
except:
pass #The try statement is needed for if the FITS header does not
# have CTYPE keywords.
return Ndim,FD_axis | 8704f807a41325147bb66b248ac9e1f6a15e58fb | 92,743 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.