content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import numpy
def bytscl(array, maxv=None, minv=None, top=255, nan=False):
"""
Replicates the bytscl function available within IDL
(Interactive Data Language, EXELISvis).
Scales all values of array in the range
(minv <= value <= maxv) to (0 <= scl_value <= top).
:param array:
A numpy array of any type.
:param maxv:
The maximum data value to be considered.
Otherwise the maximum data value of array is used.
:param minv:
The minimum data value to be considered.
Otherwise the minimum data value of array is used.
:param top:
The maximum value of the scaled result. Default is 255.
The mimimum value of the scaled result is always 0.
:param nan:
type Bool. If set to True, then NaN values will be ignored.
:return:
A numpy array of type byte (uint8) with the same dimensions
as the input array.
Example:
>>> a = numpy.random.randn(100,100)
>>> scl_a = bytscl(a)
:author:
Josh Sixsmith; josh.sixsmith@gmail.com; joshua.sixsmith@ga.gov.au
:history:
* 2013/10/24: Created
:copyright:
Copyright (c) 2014, Josh Sixsmith
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
"""
if (maxv is None):
if (nan):
maxv = numpy.nanmax(array)
else:
maxv = numpy.amax(array)
if (minv is None):
if (nan):
minv = numpy.nanmin(array)
else:
minv = numpy.amin(array)
if (top > 255):
top = 255
int_types = ['int', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16',
'uint32', 'uint64']
flt_types = ['float', 'float16', 'float32', 'float64']
if (array.dtype in int_types):
rscl = numpy.floor(((top + 1.) * (array - minv) - 1.) / (maxv - minv))
elif (array.dtype in flt_types):
rscl = numpy.floor((top + 0.9999) * (array - minv) / (maxv - minv))
else:
msg = ("Error! Unknown datatype. "
"Supported datatypes are: "
"int8, uint8, int16, uint16, int32, uint32, int64, uint64, "
"float32, float64.")
raise ValueError(msg)
# Check and account for any overflow that might occur during
# datatype conversion
numpy.clip(rscl, 0, top, out=rscl)
scl = rscl.astype('uint8')
return scl | 45b17cfdb22acbef15efcb3d824ee0c08e9d5be0 | 47,641 |
def resolve_from_path(path):
"""Resolve a module or object from a path of the form x.y.z."""
modname, field = path.rsplit(".", 1)
mod = __import__(modname, fromlist=[field])
return getattr(mod, field) | fffda7518a78c72a441547116f2b33ed459adb05 | 47,642 |
import math
def svo_angle(mean_allocation_self, mean_allocation_other):
"""
Calculate a person's social value orientation angle (based on the slider measure).
params: A mean allocation to self and a mean allocation to other, based on the six primary items of the SVO slider
returns: The person's social value orientation angle
effects: None
"""
# With the default values of the slider measure, the origin is at 0,0 but the center of the circle is at 50,50
# By subtracting 50 from both mean allocations we compute the angle from the center of the circle
return math.degrees(math.atan2(mean_allocation_other - 50, mean_allocation_self - 50)) | 0b3f39309c44d6e3fee893debb54ef00786d321e | 47,643 |
def power(base_int: int, power_int: int) -> int:
"""
Returns base raised to the power.
Parameters:
first (int) : First Integer
second (int) : Second Integer
"""
return base_int ** power_int | 826d227998bb1c7782682037af3e3b6b84948c5a | 47,644 |
def validate_mp_xy(fits_filenames, control_dict):
""" Quick check for frequently made errors in MP XY entries in control dict.
:param fits_filenames:
:param control_dict:
:return: 2-tuple, (mp_xy_files_found, mp_xy_values_ok). [2-tuple of booleans, both True if OK]
"""
mp_location_filenames = [mp_xy_entry[0] for mp_xy_entry in control_dict['mp xy']]
mp_xy_files_found = all([fn in fits_filenames for fn in mp_location_filenames])
mp_location_xy = [(item[1], item[2]) for item in control_dict['mp xy']][:2]
flatten_mp_location_xy = [y for x in mp_location_xy for y in x]
mp_xy_values_ok = all([x > 0.0 for x in flatten_mp_location_xy])
return mp_xy_files_found, mp_xy_values_ok | 2d129eae8e7fed9f6a11ae4d5bc2a2e285c1f694 | 47,645 |
def search_link_when(page_offset):
"""When function factory."""
def _inner(search_dict):
p = search_dict['_pagination']
if page_offset < 0 and p.prev_page is None:
return False
elif page_offset > 0 and p.next_page is None:
return False
else:
return True
return _inner | d138a08a8efe1881a4687af730c1c85e137c952a | 47,646 |
import uuid
def GetRandomUUID():
"""Generates a random UUID.
Returns:
str: the UUID.
"""
return str(uuid.uuid4()) | a1984535b67775c31235979f315f8e46f1fdd53b | 47,647 |
def get_hosts(path, url):
"""
Creates windows host file config data
:param path:
:param url:
:return: string
"""
info = """
# host for %%path%%
127.0.0.1\t%%url%%
""".replace("%%url%%", url).replace("%%path%%", path)
return info | 5e606c6dd36706f5c0fe1e492143231c4bd229ee | 47,648 |
def boom(iterable, target: int) -> str:
"""Find the target in iterable.
Return 'Boom!' if found.
Return f"there is no {target} in the list"
"""
for number in iterable:
if str(target) in str(number):
return "Boom!"
return f"there is no {target} in the list" | 2161ad0e165180cc54649a0078c9899f65bcb714 | 47,649 |
def _normalized_import_cycle(cycle_as_list, sort_candidates):
"""Given an import cycle specified as a list, return a normalized form.
You represent a cycle as a list like so: [A, B, C, A]. This is
equivalent to [B, C, A, B]: they're both the same cycle. But they
don't look the same to python `==`. So we normalize this list to
a data structure where different representations of the cycle
*are* equal. We do this by rearranging the cycle so that a
canonical node comes first. We pick the node to be the node in
cycle_as_list that is also in sort_candidates. If there are
multiple such nodes, we take the one that's first alphabetically.
We assume a simple cycle (that is, one where each node has only
one incoming edge and one outgoing edge), which means that each
node only occurs here once, so the sort order is uniquely defined.
"""
sort_elts = [node for node in cycle_as_list if node in sort_candidates]
if not sort_elts: # probably impossible, but best to be safe
sort_elts = cycle_as_list
min_index = cycle_as_list.index(min(sort_elts))
# The weird "-1" here is because A occurs twice in the input
# cycle_as_list, but we want min_elt to occur twice in the output.
return tuple(cycle_as_list[min_index:-1] + cycle_as_list[:min_index + 1]) | 328046069999c8f3f960cc526b277a21c7daab5d | 47,650 |
def expand_features_and_labels(x_feat, y_labels):
"""
Take features and labels and expand them into labelled examples for the
model.
"""
x_expanded = []
y_expanded = []
for x, y in zip(x_feat, y_labels):
for segment in x:
x_expanded.append(segment)
y_expanded.append(y)
return x_expanded, y_expanded | 5cd5dbe18285fdcbc633809cd95dde17e32dd82b | 47,651 |
def rle(seq):
""" Create RLE """
run = []
run.append(seq[0])
howmany = 1
for index in range(1, len(seq)):
if seq[index] == run[-1]:
howmany += 1
elif seq[index] != run[-1] and howmany != 1:
run.append(str(howmany))
howmany = 1
run.append(seq[index])
else:
run.append(seq[index])
if howmany != 1:
run.append(str(howmany))
return ''.join(run) | 9b333dcc92fa51b2261b8167da9ef6375e022792 | 47,652 |
def pl_max(pos_list):
"""Solution to exercise R-7.11.
Implement a function, with calling syntax max(L), that returns the max-
imum element from a PositionalList instance L containing comparable
elements.
"""
max_val = pos_list.first().element()
for element in pos_list:
if element > max_val:
max_val = element
return max_val | 2e4f0c3dbb3984245dcde819a91c0e2f425b30b0 | 47,653 |
def add_extension(file_name, ext='py'):
"""
adds an extension name to the file_name.
:param file_name: <str> the file name to check for extension.
:param ext: <str> add this extension to the file name.
:return: <str> file name with valid extension.
"""
if not file_name.endswith(ext):
return file_name + '.{}'.format(ext)
return file_name | 123a8a01c70cd3bf98f189c9582796fcdfa97ee3 | 47,654 |
def drop_empty(df):
"""Removes rows with empty values from dataframe"""
new_df = df.dropna(axis=0)
result = set(df.index).symmetric_difference(set(new_df.index))
if len(result) == 0:
print("No empty rows found")
else:
print("\nIndexes removed from data because of missing data:\n",
result)
return(new_df) | 5ad672c1cd6980cdd5d96e5506dcfa912331b509 | 47,655 |
def join_rsc_table_captions(document):
"""Add wrapper tag around Tables and their respective captions
Arguments:
document {[type]} -- [description]
"""
for el in document.xpath('//div[@class="table_caption"]'):
next_el = el.getnext()
if next_el.tag == 'div' and next_el.attrib['class'] == 'rtable__wrapper':
caption_el = el
table_el = next_el
table_el.insert(0, caption_el)
return document | 5cc717fedafbe11a83d3a7da82616d942eb4d052 | 47,656 |
def largest_number_of_edges():
"""Solution to exercise R-14.2.
If G is a simple undirected graph with 12 vertices and 3 connected com-
ponents, what is the largest number of edges it might have?
---------------------------------------------------------------------------
Solution:
---------------------------------------------------------------------------
According to Proposition 14.10, a simple undirected graph G with n vertices
must have m <= n(n-1)/2 edges.
Technically, a subgraph consisting of 1 vertex is connected. Knowing this,
we can maximize m by putting 10 vertices in one connected component, and a
single vertex in the other two connected components. These latter two
connected components will have zero edges, and so the largest number of
edges is thus m for n = 10:
m <= 10*(10-1)/2
m <= 10*9/2
m <= 90/2
m <= 45
The largest number of edges is 45.
"""
return True | f4c6f4bb7144058bb8c323e0c8584dcff1be7507 | 47,658 |
def get_url_with_query_params(request, location, **kwargs):
"""
Returns link to given location with query params.
Usage:
get_url_with_query_params(request, location, query_param1=value1, query_param2=value2)
returns:
http(s)://host/location/?query_param1=value1&query_param2=value2&...
"""
url = request.build_absolute_uri(location)
if kwargs:
url += "?"
for key, value in kwargs.items():
url += f"{key}={value}&"
return url | 350003c0c86ff80db5f70ba16de6d86edadbf6e4 | 47,659 |
def is_hex_digit(char):
"""Checks whether char is hexadecimal digit"""
return '0' <= char <= '9' or 'a' <= char <= 'f' | 172ae4a57bd77e7e33237bec77831417e961babd | 47,660 |
def is_valid_term(iterms):
""" Checks if term is correct """
cterms = ['Angle', 'Proper-Dih.', 'Improper-Dih.', 'LJ-14', 'Coulomb-14', 'LJ-(SR)', 'Coulomb-(SR)', 'Coul.-recip.', 'Position-Rest.', 'Potential', 'Kinetic-En.', 'Total-Energy', 'Temperature', 'Pressure', ' Constr.-rmsd', 'Box-X', 'Box-Y', ' Box-Z', 'Volume', 'Density', 'pV', 'Enthalpy', 'Vir-XX', 'Vir-XY', 'Vir-XZ', 'Vir-YX', 'Vir-YY', 'Vir-YZ', 'Vir-ZX', 'Vir-ZY', 'Vir-ZZ', 'Pres-XX', 'Pres-XY', 'Pres-XZ', 'Pres-YX', 'Pres-YY', 'Pres-YZ', 'Pres-ZX', 'Pres-ZY', 'Pres-ZZ', '#Surf*SurfTen', 'Box-Vel-XX', 'Box-Vel-YY', 'Box-Vel-ZZ', 'Mu-X', 'Mu-Y', 'Mu-Z', 'T-Protein', 'T-non-Protein', 'Lamb-Protein', 'Lamb-non-Protein']
return all(elem in cterms for elem in iterms) | 882570c2865aedd3698317153a974a92d9966f1c | 47,661 |
import os
def list_dir(path) -> str:
"""list all files/directories under given directory path
encapsulation for system function.
"""
return os.listdir(path) | a500a828a74b6db64574806887c115aaccf91815 | 47,663 |
import socket
def get_ip(domain):
"""
获取ip
:param domain:
:return:
"""
ip = socket.getaddrinfo(domain, "http")[0][4][0]
return ip | e9e9186debebbf885bd284abc65346b65b0117b8 | 47,664 |
def is_valid_optimizer(optimizer):
"""
Checks if optimizer is valid or not
"""
# If the optimizer is None returning False
if not optimizer:
return False
try:
# Calling the get_optimizer method to details of the optimizer
optimizer_details = optimizer.get_optimizer()
# Checking the optimizer_details, it should return a dict
if not isinstance(optimizer_details, dict):
return False
# checking all the keys of object returned from the get_optimizer method
optimizer_arguments = optimizer_details["keyword_arguments"]
optimizer_function_ref = optimizer_details["optimizer"]
# Checking the optimizer_arguments, it should return a dict or None
if optimizer_arguments and not isinstance(optimizer_arguments, dict):
return False
# Checking the optimizer_function_ref
if not optimizer_function_ref:
return False
# All good
return True
# Handling exception
# pylint: disable=broad-except
except Exception:
return False | 6e45704c2663b8bc8deed47bb6e31cd4d316d516 | 47,665 |
def subtracttime(d1, d2):
"""Return the difference in two dates in seconds"""
dt = max(d1, d2) - min(d1, d2)
return 86400 * dt.days + dt.seconds | bae7668ef9b593c7ebe072d789e1b298f81cda3e | 47,667 |
def _unary_op(fn):
"""Wrapper that restricts `fn` to have the correct signature."""
def unary_op_wrapper(x, name=None):
return fn(x, name=name)
return unary_op_wrapper | 5fa43c8495b2ad1cdc7e815e2fe7220917da59ad | 47,670 |
def removeWords(answer):
"""Removes specific words from input or answer, to allow for more leniency."""
words = [' town', ' city', ' island', ' badge', 'professor ', 'team ']
answer = answer.lower()
for word in words:
answer = answer.replace(word, '')
return answer | 088debb26571dc31415591e5026972b057987229 | 47,671 |
def process_cabin(df):
"""Process the Cabin column into pre-defined 'bins'
Usage
------
train process_cabin(train)
"""
df["Cabin_type"] = df["Cabin"].str[0]
df["Cabin_type"] = df["Cabin_type"].fillna("Unknown")
df = df.drop('Cabin',axis=1)
return df | 2c8585b158908a8ab0749142ec0e2ea1b22f3089 | 47,672 |
def get_device_properties(node, device_index=1):
"""Return a tuple with (motor_device, resolution, joint_parameters)
Parameters
----------
node: (str, node)
The joint node that contains the device to extract properties from
index: int
Use 1 for device and 2 for device2
"""
if device_index < 1 or device_index > 2:
raise RuntimeError(f"Invalid device_index: {device_index}")
motor_device = None
resolution = -1
dev_key = "device"
jp_key = "jointParameters"
pos_key = "position"
if device_index == 2:
dev_key += "2"
jp_key += "2"
pos_key += "2"
joint_position = node[1][pos_key][1]
for field in node[1][dev_key][1]:
if field["__type"] == "RotationalMotor":
motor_device = field
if field["__type"] == "PositionSensor" and field.get("resolution") is not None:
resolution = field.get("resolution")[1]
return (motor_device, resolution, node[1][jp_key][1], joint_position) | 1921ae114338aa102c8e6ddbb932715cac1ff937 | 47,673 |
def _adjust_n_months(other_day, n, reference_day):
"""Adjust the number of times a monthly offset is applied based
on the day of a given date, and the reference day provided.
"""
if n > 0 and other_day < reference_day:
n = n - 1
elif n <= 0 and other_day > reference_day:
n = n + 1
return n | e7a46f8923fb57985e3f32a1130f34e703a58627 | 47,674 |
def mcf_to_boe(mcf=0, conversion_factor=6):
"""Converts mcf to barrels of oil equivalent using the standard 6 mcf/boe conversion factor."""
return (mcf/conversion_factor) | e7f7b984ec0e537512cf2b926c72c25c83a3507b | 47,675 |
def symp_h_ratio(counts_coords):
"""example_meyers_demo.yaml from SEIRcity v2"""
return [0.00070175, 0.00070175, 0.04735258, 0.16329827, 0.25541833] | 03d1f010d91e56bb91233dc17dafb0d493c1381f | 47,677 |
def check_won (grid):
"""return True if a value>=32 is found in the grid; otherwise False"""
for i in range(4):
for j in range(4):
if grid[i][j] >= 32:
return True
return False | f93751aa8073bc3e1b3965bd598a17f0c98da967 | 47,680 |
import re
def any_char_matches(substring: str, mainString: str):
"""Scans the string for any matches a certain pattern.
Parameters
----------
substring : str
The string that is used to find matches from `mainString`.
mainString : str
The `mainstring` which contains the original string.
Returns
-------
is_matching : bool
Returns `True` if the `substring` matches with the `mainSting` else `False`.
"""
is_matching = bool(re.search(substring, mainString))
return is_matching | b0db76f9f7ed34cd45ba118c758944afbb7b0090 | 47,681 |
def getDens(mCM):
"""
return density based on type(mCM['dens'])
mCM['dens'] can be a number or a function
Parameters:
mCM, multi-component material dictionary - see init
"""
if type(mCM['dens']) in [float, int]:
dens = mCM['dens']
else:
dens = mCM['dens'](mCM)
return dens | 60e6baa70f5c6cd90cf4578bd9f348e947d18979 | 47,683 |
def parse_sqlplus_arg(database):
"""Parses an sqlplus connection string (user/passwd@host) unpacking the user, password and host.
:param database: sqlplus-like connection string
:return: (?user, ?password, ?host)
:raises: ValueError
when database is not of the form <user>/<?password>@<host>
"""
try:
credentials, host = database.split('@')
if '/' in credentials:
user, password = credentials.split('/')
else:
user = credentials
password = None
return (user, password, host)
except ValueError:
raise ValueError('Invalid sqlplus connection string {}: expected <user>/?<pass>@<host>'.format(database)) | 51bb3304458d4b3d3a69c694b13066e1d713a272 | 47,685 |
def is_palindrome(input_string):
"""
Checks if a string is a palindrome.
:param input_string: str, any string
:return: boolean, True if palindrome else False
>>> is_palindrome("madam")
True
>>> is_palindrome("aabb")
False
>>> is_palindrome("race car")
False
>>> is_palindrome("")
True
"""
if input_string is None or len(input_string) == 0:
return True
if input_string[0] != input_string[-1]:
return False
return is_palindrome(input_string[1:-1]) | 165df98dd983a2d84ad30bafbb70168d9599bd8d | 47,687 |
import codecs
import base64
def convert_r_hash_hex(r_hash_hex):
""" convert_r_hash_hex
>>> convert_r_hash_hex("f9e328f584da6488e425a71c95be8b614a1cc1ad2aedc8153813dfff469c9584")
'+eMo9YTaZIjkJacclb6LYUocwa0q7cgVOBPf/0aclYQ='
"""
r_hash = codecs.decode(r_hash_hex, 'hex')
r_hash_b64_bytes = base64.b64encode(r_hash)
return r_hash_b64_bytes.decode() | 8980e43ff4f30e69e9cfc0ed7427f787ea97d701 | 47,688 |
import re
def contains_curse(sometext):
"""
Checks a particular string to see if it contains any
NSFW type words. curse words, things innapropriate
that you might find in lyrics or quotes
:param sometext: some text
:type sometext: Str
:returns: a boolean stating whether we have a curse word or not
:rtype: Boolean
"""
return re.search(r'hoe*|bitch*|fag*|puss*|nigg*|fuck*|cunt*|shit*|dick*|cock*',sometext.lower()) | e979fec6ba2cd88191dde304445416129a7f5049 | 47,690 |
def get_bib_blocks(content, start_character="@", delim=("{", "}")):
"""
returns all bib blocks (entries enclosed by the specified delimiters)
start_character will look backwards from the start of the block for this character
the result will be a tuple of two strings: from start character to start of the block, and the block content
"""
blocks = []
delimiter_stack = []
for i, c in enumerate(content):
if c == "{":
delimiter_stack.append(i)
elif c == "}" and delimiter_stack:
start = delimiter_stack.pop()
if len(delimiter_stack) == 0:
start_index = content.rfind(start_character, 0, start)
blocks.append(
(content[start_index:start], content[start + 1 : i])
)
return blocks | 9fac62c99e4c508adfa8de06887edc3fc1ae46cb | 47,692 |
def stiefel_dimension(dim_n, dim_p):
"""Return the dimension of a Stiefel manifold St(1, p)^n.
in general, dim St(d, p)^m = mdp - .5 * md(d + 1)
hence here, dim St(1, p)^n = np - n = n(p - 1)
"""
return dim_n * (dim_p - 1) | dca23894cf806fec59fabb1a2bf41a838d94e809 | 47,694 |
def _create_data_folder(path, source="NSRDB", year="2017"):
"""Create data folder
Args:
Returns:
bool: True
"""
TIMESERIES_DIR = path / "raw" / source / "timeseries" / year
META_DIR = path / "raw" / source / "meta" / year
TIMESERIES_DIR.mkdir(exist_ok=True, parents=True)
META_DIR.mkdir(exist_ok=True, parents=True)
return (TIMESERIES_DIR, META_DIR) | 02c6e32b924968a01549e65600fcc3ba0ed3db78 | 47,695 |
def drop_id_prefixes(item):
"""Rename keys ending in 'id', to just be 'id' for nested dicts.
"""
if isinstance(item, list):
return [drop_id_prefixes(i) for i in item]
if isinstance(item, dict):
return {
'id' if k.endswith('id') else k: drop_id_prefixes(v)
for k, v in item.items()
}
return item | 6fc752fa49771a0fc6e7e28e889cf29941a95a10 | 47,696 |
def minindex(A):
"""Return the index of the minimum entry in a list. If there are
multiple minima return one."""
return min((a, i) for i, a in enumerate(A))[1] | 999d9a10f0f62f01d37085efb0b14928b36cf530 | 47,697 |
import argparse
def parse_arguments():
"""
:return:
"""
parser = argparse.ArgumentParser(description='Resource-summary')
parser.add_argument('--cpu-task', action='store_true')
parser.add_argument('--gpu-task', action='store_true')
parser.add_argument('--dataset-batch', action='store_true')
parser.add_argument('--unify', action='store_true')
parser.add_argument('--recalculate', action='store_true')
parser.add_argument('--megalodon', action='store_true')
parser.add_argument('--na19240-winter', action='store_true')
parser.add_argument('--na19240-sumner', action='store_true')
parser.add_argument('--collect-data', action='store_true')
return parser.parse_args() | 0fee5c64c457904ea8836dd549cf7f5565c7b22e | 47,699 |
def getDescendantsTopToBottom(node, **kwargs):
"""
Return a list of all the descendants of a node,
in hierarchical order, from top to bottom.
Args:
node (PyNode): A dag node with children
**kwargs: Kwargs given to the listRelatives command
"""
return reversed(node.listRelatives(ad=True, **kwargs)) | bc7a7fb1ca1ab362943f024c3dd50ce40cfc0ab5 | 47,701 |
import argparse
def CommandLine():
"""Setup argparser object to process the command line"""
cl = argparse.ArgumentParser(description="Cover a sidewalk with drops. 2018 by Paul H Alfille")
cl.add_argument("M",help="Number mash squares on side",type=int,nargs='?',default=100)
cl.add_argument("D",help="Dot side",type=int,nargs='?',default=1)
cl.add_argument("N",help="Number of passes",type=int,nargs='?',default=100)
cl.add_argument("-b","--binwidth",help="Bin size for histogram",type=int,nargs='?',default=None)
cl.add_argument("-c","--CSV",help="comma separated file",nargs='?', type=argparse.FileType('w'))
cl.add_argument("-q","--quiet",help="Suppress more and more displayed info (can be repeated)",action="count")
cl.add_argument("-s","--scaled",help="Show data scaled",action="store_true")
return cl.parse_args() | d9e3314695478d539d0db089c331ed42164b5c19 | 47,702 |
import string
import random
def random_string_generator(size=4, chars=string.ascii_lowercase + string.digits):
"""[Generates random string]
Args:
size (int, optional): [size of string to generate]. Defaults to 4.
chars ([str], optional): [characters to use]. Defaults to string.ascii_lowercase+string.digits.
Returns:
[str]: [Generated random string]
"""
return ''.join(random.choice(chars) for _ in range(size)) | 617e20acd54f218f65f98d89b976efc1bebc095a | 47,703 |
import torch
def cross_entropy(targ, pred):
"""
Take the cross-entropy between predicted and target.
Args:
targ (torch.Tensor): target
pred (torch.Tensor): prediction
Returns:
diff (torch.Tensor): cross-entropy.
"""
targ = targ.to(torch.float)
fn = torch.nn.BCELoss(reduction='none')
diff = fn(pred, targ)
return diff | 22f06a7caf58710208131620bbc773d968e3f910 | 47,705 |
def Z_short(omega):
"""This function exists because it has a descriptive name, but it
just takes `omega` as an argument and returns 0.
"""
return 0 | fe47f2742c715acbfa7c2cc0bb362d1ce0ccc76b | 47,706 |
def merge1(a, b):
"""
:param a:
:param b:
:return:
"""
print("a : ", a)
print("b : ", b)
res = list()
a_idx = b_idx = 0
while a_idx < len(a) and b_idx < len(b):
if a[a_idx] < b[b_idx]:
res.append(a[a_idx])
a_idx += 1
else:
res.append(b[b_idx])
b_idx += 1
if a_idx == len(a):
res.extend(b[b_idx:])
else:
res.extend(a[a_idx:])
return res | 95ad2ee7da910a93f9e44d1b13527d6c6f52714a | 47,708 |
import torch
def tilted_loss(y_pred, y, q=0.5):
"""
Loss function used to obtain quantile `q`.
Parameters:
- y_pred: Predicted Value
- y: Target
- q: Quantile
"""
e = (y - y_pred)
return q * torch.clamp_min(e, 0) + (1-q) * torch.clamp_min(-e, 0) | f43ebdee74ebe10776685634859628222a9bc9ce | 47,712 |
def generate_panel_arrays(nx, ny, panel_size, indentation, offset_x, offset_y):
"""Generate a rectangular array of nx-by-ny panels of the same panel_size
nx, ny: int, how many panels do you want in X-axis and Y-axis.
panel_size: (int, int), dimension of the panels
offset_x, offset_y: int, move around the array
indentation: int, shift between two rows of panels in X-axis.
"""
(dx, dy) = panel_size
# indentation on x axis only
Array = [(i*panel_size[0] + indentation*j%dx + offset_x, j*panel_size[1] + offset_y)
for i in range(nx)
for j in range(ny)
] # bottom-left of each panel
return Array | 4613b6d038856aca927f33b1bf60e8a3f6449406 | 47,713 |
def data_to_category_counts(df):
"""
Extracts opportunity category counts for each NAICS code.
"""
return (
df.groupby(['Opportunity_NAICS', 'Opportunity__r.Category__c'])
.count()
.iloc[:,0]
.to_frame()
.rename(columns={df.columns[0]: 'Count'})
.reset_index()
.pivot(index='Opportunity_NAICS', columns='Opportunity__r.Category__c', values='Count')
.fillna(0)
.rename(columns={
'Commodity': 'Commodity Count',
'Construction': 'Construction Count',
'Personal Services': 'Personal Services Count'
})
.reset_index()
) | fa57218d5a6f439789cb7e013892df09c8207a07 | 47,714 |
def add_zero(nb):
"""Converts a number to a string and adds a '0' if less than 10.
:Parameters:
* **nb** (:obj:`float`): Number to be converted to a string.
:Example:
>>> pycof.add_zero(2)
... '02'
:Returns:
* :obj:`str`: Converted number qs a string.
"""
if nb < 10:
return('0' + str(nb))
else:
return(str(nb)) | 754d21a11a818293ddd03d287ef992af3c89f0f4 | 47,715 |
import pandas
import numpy
def price_mean(trades: pandas.DataFrame) -> numpy.float64:
"""
Get the mean price of all the trades.
Args:
trades: dataFrame of trades
"""
return trades.price.mean() | 885db38ff811e2c1f79b8720e5bf435a006c853e | 47,716 |
import re
def resolve_query_res(query):
"""
Extracts resource name from ``query`` string.
"""
# split by '(' for queries with arguments
# split by '{' for queries without arguments
# rather full query than empty resource name
return re.split('[({]', query, 1)[0].strip() or query | 9f5395c8dd416643c5d8460e0fbec2f83037e4fb | 47,717 |
def calculate_plane_point(plane, point):
"""Calculates the point on the 3D plane for a point with one value missing
:param plane: Coefficients of the plane equation (a, b, c, d)
:param point: 3D point with one value missing (e.g. [None, 1.0, 1.0])
:return: Valid point on the plane
"""
a, b, c, d = plane
x, y, z = point
if x is None:
x = -(b*y + c*z + d) / a
elif y is None:
y = -(a*x + c*z + d) / b
elif z is None:
z = -(a*x + b*y + d) / c
return [x, y, z] | c086e5181a9d595af5a0ef493a221f2166e71423 | 47,718 |
import time
import sys
def populate(db, source):
"""Populate database table annotations with disease informations."""
cur = db.cursor()
timestart = time.time()
i = 0
j = 0
with open(source) as file:
next(file) # skip header
for line in file:
# remove \t and \n from string and format string to list
params = line.strip().split('\t')
# avoid empty lines
try:
# first NULL is index, second NULL is genotype placeholder
cur.execute('INSERT INTO annotations '
'VALUES (NULL, %s, %s, %s, %s, %s, NULL);',
params)
i += 1
except IndexError:
continue
# log progress to console
if i == 1000:
db.commit() # commit 1000 lines at once
i = 0
j += 1
sys.stdout.write(str(j) + 'k lines inserted. Total runtime: %.2f' % round(time.time()-timestart, 2) + ' s.\r')
sys.stdout.flush()
db.commit() # final commit for less than 1000 lines
cur.connection.close()
return '\nDone' | c64f803fd300bf1d1ee7c4f6af8a168279033311 | 47,719 |
def revert(vocab, indices):
"""Convert word indices into words
"""
return [vocab.get(i, 'X') for i in indices] | 457831d28d26c68b19a07585f0d4de9fe31b0203 | 47,720 |
def getSnpIndicesWithinGeneWindow(snp_chrom,snp_pos,gene_chrom,gene_start,gene_stop=None,window=0):
"""
computes for a given gene, a boolean vector indicating which SNPs are close to the gene
input:
snp_chrom : chromosomal position [F]
snp_pos : position [F]
gene_chrom : gene chromosome (scalar)
gene_start : start of the gene (scalar)
gene_stop : end of the gene (scalar), if not given, gene_start is used
window : window around gene (scalar)
"""
if gene_stop is None: gene_stop = gene_start
idx = snp_chrom==gene_chrom
idx*= gene_start - window <= snp_pos
idx*= snp_pos <= gene_stop + window
return idx | 1a87e20b8744c9d28e26bd189f7bdaa056238a2b | 47,722 |
import math
def scalarToVector(magnitude, angle):
"""
Converts a speed and vector into a vector array [x, y, z]
X is horizontal magnitude, Y is vertical magnitude, and Z is magnitude in/out of screen
arg magnitude - the magnitude of the vector
arg angle - the direction in radians of the vector
Returns an array with the x, y, z vector
"""
# We leave z blank for now, since we don't use it
# In the future, add functionality for two angles
return [math.cos(angle) * magnitude, math.sin(angle) * magnitude, 0] | 4d82caed233a3b4df07ff42536fcffa8643e94bf | 47,724 |
def getKey(dictionary: dict, input_value):
"""A bit of web help - the useful function to find a key using a value (different workflow to common one)"""
retKey = None
for key, value in dictionary.items():
if input_value == value:
retKey = key
break
return retKey | 395de560ea200c8d5fe88d85a0d366400772e20f | 47,725 |
def getPositiveCoordinateRangeOverlap(uStart1, uEnd1, uStart2, uEnd2):
"""@return: If the two coordinate ranges overlap on the same strand
returns the overlap range. If no overlap it returns None.
"""
if uEnd1 < uStart2 or uEnd2 < uStart1:
return None
l = [ uStart1, uEnd1, uStart2, uEnd2 ]
l.sort()
return l[1], l[2] | 4151a034bd582b0345e537562e3e72fda90f6b7a | 47,728 |
import subprocess
import platform
def subprocess_execute(cmd, cwd=None):
"""
执行命令返回结果
:param cmd: 命令
:return: 返回输出
"""
res = subprocess.Popen(
cmd,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=cwd
)
stdout, stderr = res.communicate()
# print(stdout, stderr)
# if stdout:
# print(stdout.decode("gbk"))
# if stderr:
# print(stderr.decode("gbk"))
res_info_list = []
platform_sys = platform.system()
if platform_sys == "Windows":
if stdout:
res_info = stdout.decode("gbk")
res_info_list.append(res_info)
if stderr:
res_info = stderr.decode("gbk")
res_info_list.append(res_info)
elif platform_sys == "Linux":
if stdout:
res_info = stdout.decode("utf-8")
res_info_list.append(res_info)
if stderr:
res_info = stderr.decode("utf-8")
res_info_list.append(res_info)
else:
res_info = "无法确认操作系统!"
res_info_list.append(res_info)
# print(res_info)
res_info = ''.join(res_info_list)
return res_info
# stderr = res.stderr
# res_info = ""
# platform_sys = platform.system()
# if platform_sys == "Windows":
# if stderr:
# res_info = stderr.read().decode("gbk")
# else:
# stdout = res.stdout
# res_info = stdout.read().decode("gbk")
# elif platform_sys == "Linux":
# if stderr:
# res_info = stderr.read().decode("utf-8")
# else:
# stdout = res.stdout
# res_info = stdout.read().decode("utf-8")
# else:
# print("无法确认操作系统!")
# # print(res_info)
# return res_info | c56b10dfef619cf13b6f33c2169e692efd9481f4 | 47,730 |
def get_home():
"""Default API path"""
return "To access the API, navigate to /api/robots" | 55a122e43710bd234566df96606fe8541014c5a5 | 47,731 |
def exec_no_show(filepath, globals=None, locals=None):
"""
exec a file except for plt.show
"""
if globals is None:
globals = {}
globals.update(
{
"__file__": filepath,
"__name__": "__main__",
}
)
with open(filepath) as file:
block = file.read()
block = block.replace("plt.show", "")
exec(compile(block, filepath, "exec"), globals)
return globals | 9dc02ddfe417d21bc0c8c27b8438c0a352d85882 | 47,732 |
def set_email_reply(email_from, email_to, email_cc, email_subject, html_body, attachments):
"""Set the email reply from the given details.
Args:
email_from: The email author mail.
email_to: The email recipients.
email_cc: The email cc.
html_body: The email HTML body.
Returns:
str. Email reply.
"""
single_reply = f"""
From: {email_from}
To: {email_to}
CC: {email_cc}
Subject: {email_subject}
"""
if attachments:
attachment_names = [attachment.get('name', '') for attachment in attachments]
single_reply += f'Attachments: {attachment_names}\n'
single_reply += f'\n{html_body}\n'
return single_reply | bab25139011ceab32f5aa92e14da4f58fd6f98c3 | 47,734 |
def inventory_file_path():
"""
This method returns standard inventory file path
"""
return 'data/inventory.json' | fe2e2d606c628c100e6c347217bf1a0784e4a252 | 47,735 |
from typing import Counter
def term_freq(tokens: list[str]) -> dict:
"""
Takes in a list of tokens (str) and return a dictionary of term frequency of each token
"""
term_count = Counter(tokens)
n = len(tokens)
return {term: count/n for term, count in term_count.items()} | 0567a08e9050d030b8411f662e4afb8560e525be | 47,736 |
def find_loop(operation_list):
"""
Идем последовательно по списку операций, запоминаем:
последнюю позицию до цикла, сумму к этому моменту и
список последовательно выполненыех команд
"""
positions_set = set()
positions_list = []
sum = 0
position = 0
while True:
if position in positions_set:
break
if position == len(operation_list):
return sum, -1, positions_list
if operation_list[position][0] == 'nop':
positions_set.add(position)
positions_list.append(position)
position += 1
continue
if operation_list[position][0] == 'jmp':
positions_set.add(position)
positions_list.append(position)
position += int(operation_list[position][1])
continue
if operation_list[position][0] == 'acc':
positions_set.add(position)
positions_list.append(position)
sum += int(operation_list[position][1])
position += 1
return sum, position, positions_list | c5ebc1701c7ac9ba926ce8722510bc0bd9b74492 | 47,737 |
def parseChoices(optString):
"""
The option string is basically our "recipe".
Example:
"time=Q4 2014 dimensions=[time,age,industry] geo="K000000001 obs=obs_col"
"""
time = None # assumed.
geo = None # ...
obs = None # .
dimensions = []
# Look for optional "time=" in optString
if "time=" in optString:
# if "time=" appears more than once in the options string they're doing it wrong
assert len(optString.split("time=")) ==2, "You can only specifiy the time (with time=) once."
# Time may be in two parts (e.g Q1 2014) so split by eliminting the other options (which dont include whitespace delimeters)
tString = optString
tString = [x for x in tString.split("obs=") if "time=" in x][0]
tString = [x for x in tString.split("dimensions=") if "time=" in x][0]
tString = [x for x in tString.split("geo=") if "time=" in x][0]
time = tString[5:] # get rid of "time="
# look for optional obs= in optString
if "obs=" in optString:
obs = optString.split("obs=")
assert len(obs) == 2, "You can only specify the obs column (with obs=) once"
# look for optional geo= in optString
if "geo=" in optString:
obs = optString.split("geo=")
assert len(obs) == 2, "You can only specify the obs column (with geo=) once"
# Finds MANDATORY dimensions= in optString
assert "dimensions=" in optString, "You MUST provide the required dimensions when trying to convert to V4"
# Find the sub-string. Assertion for stupidity.
dimText = [x for x in optString.split(" ") if "dimensions=" in x]
assert len(dimText) == 1, "You should only be specifiying 'dimensions=' once!"
# Build the list of wanted dimensions
dimText = dimText[0].replace("dimensions=", "").replace("[", "").replace("]", "")
dimText = dimText.split(" ")[0].strip()
dimensions = dimText.split(",")
return time, geo, obs, dimensions | de3c33e15b7a411e23247521bab8af2cc767558a | 47,738 |
def get_dict_query_field(dict_field_name: str, sub_field: str):
"""Generate django query key for searching a nested json feild"""
return dict_field_name + "__" + sub_field.replace(".", "__") | 112951293b6545483883515eeb6922a4b205e47e | 47,739 |
def test_arr():
"""Array indexing."""
return """
var arr: [u8] = {1, 2, 3, 4};
fn main() {
{dest} = arr[3];
}
""" | 9712634b9e23061aaca9518cf073c518f04fbcbb | 47,740 |
from typing import Any
from typing import Callable
def lazy(func: Any) -> Callable:
"""A lazy function is one that expects its arguments to be unparsed"""
func.lazy = True
return func | 98b219bf0099089d27cd579b11e9ebfeabef3817 | 47,741 |
def stag_temperature_ratio(M, gamma):
"""Stagnation temperature / static temperature ratio.
Arguments:
M (scalar): Mach number [units: dimensionless].
gamma (scalar): Heat capacity ratio [units: dimensionless].
Returns:
scalar: the stagnation temperature ratio :math:`T_0 / T` [units: dimensionless].
"""
return 1 + (gamma - 1) / 2 * M**2 | 0677524c97a245d93f9eac33c6be885819ed14e5 | 47,742 |
def get_max_depth(es, *, index):
"""Find max depth of root lineage."""
body = {
"id": "max_nested_value",
"params": {"path": "lineage", "field": "node_depth"},
}
res = es.search_template(index=index, body=body)
max_depth = res["aggregations"]["depths"]["max_depth"]["value"]
return max_depth | 33eb318bcf20fb656b84edb349ff840ebefe07b0 | 47,743 |
def compute_gender(last_gender: str,
top_gender_male: int,
top_gender_female: int,
top_gender_7_days: str):
"""Computes the gender type of a visitor using a majority voting rule."""
def majority_voting(lst):
return max(set(lst), key=lst.count)
if top_gender_male > top_gender_female:
top_gender = 'M'
else:
top_gender = 'F'
return majority_voting([last_gender, top_gender, top_gender_7_days]) | 387f04fae4b593de54894eeac017b7fe124706c9 | 47,744 |
import re
def address_group(address, group_name=None):
"""
Return part of address upto group_name
:param address: str hdf address
:param group_name: str name of group
:return: reduced str
"""
if group_name is None:
names = address.replace('\\', '/').split('/')
return '/'.join(names[:-1])
return re.findall(r'(.+?%s.*?)(?:\/|$)' % group_name, address, re.IGNORECASE)[0] | eaebf034e5716f1653ffe112d73109f5a8850e65 | 47,745 |
def combine_dicts(d_tracker, d_return):
"""combines dictionaries"""
# grab new unique keys (sites) in d_return
ls_new_sites = [x for x in d_tracker.keys() if x not in d_return.keys()]
# if new sites are found
if ls_new_sites:
# iteratively add sites to d_tracker
for new_site in ls_new_sites:
d_tracker[new_site] = d_return[new_site]
return d_tracker | 76c2994223d6334168a77837ce2632d1829c9251 | 47,746 |
def provide_user_with_service_type(each_func_name):
# Input for Service Type
"""IaaS, FaaS, {IaaS, FaaS}(default)"""
mixture_pragma = "IaaS, FaaS, {IaaS, FaaS}(default)"
func_service = input(
f"{each_func_name} configuration : " f"service type - {mixture_pragma}:\n "
)
func_service = "FaaS"
return func_service | 674269fbbb8ff539a4c3233bf285027307ae2bbc | 47,749 |
import hashlib
def get_signature(sign_param):
"""
jsapi_ticket签名
签名生成规则如下:参与签名的字段包括noncestr(随机字符串), 有效的jsapi_ticket, timestamp(时间戳), url(当前网页的URL,不包含#及其后面部分) 。对所有待签名参数按照字段名的ASCII 码从小到大排序(字典序)后,使用URL键值对的格式(即key1=value1&key2=value2…)拼接成字符串string1。这里需要注意的是所有参数名均为小写字符。对string1作sha1加密,字段名和字段值都采用原始值,不进行URL 转义。
:param sign_param: 用于生成签名的参数
:return: 签名信息
"""
signature = '&'.join(['%s=%s' % (key.lower(), sign_param[key]) for key in sorted(sign_param)])
return hashlib.sha1(signature.encode('utf-8')).hexdigest() | 4f37a13d855c4cd72bf5d14603518513e009450c | 47,751 |
import sys
def _osx_mode(n):
"""
fstat(2) on UNIX sockets on OSX return different mode bits depending on
which side is being inspected, so zero those bits for comparison.
"""
if sys.platform == 'darwin':
n &= ~int('0777', 8)
return n | ba545be74cee563fe5f9c50332832dedf73a0dad | 47,752 |
import requests
import shutil
def download_file(download_URL, filename):
"""Download file from CURL url using request.
download_URL: """
with requests.get(download_URL, stream=True) as r:
with open(filename, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return filename | 23f542674a776a640aeb5c1d7952c14b5cb09c33 | 47,753 |
from datetime import datetime
def parse_timestamp(timestamp: str):
"""
Parse a timestamp returned by discord
This is not a reliable method at all, and if you need an accurate and safe way to read properties that use this
function, it is advised that you checkout the dateutil or arrow libraries for that.
:param timestamp: An ISO8601 timestamp
:type timestamp: str
:return: A parsed datetime object with the corresponding values
:rtype: datetime.datetime
"""
return datetime.strptime(timestamp[:-6], "%Y-%m-%dT%H:%M:%S.%f") | 49c7300df8d18821cc98ff2770a93e7d119c54af | 47,754 |
def is_pos_int(num_str):
"""
Args:
num_str (str): The string that is checked to see if it represents a positive integer (not 0)
Returns:
bool
Examples:
>>> is_pos_int("25.6")
False
>>> is_pos_int("-25.6")
False
>>> is_pos_int("0")
False
>>> is_pos_int("1964")
True
>>> is_pos_int("-1964")
False
>>> is_pos_int("6e5")
False
>>> is_pos_int("1_964")
False
>>> is_pos_int("NaN")
False
>>> is_pos_int("None")
False
>>> is_pos_int("27j+5")
False
>>> is_pos_int("abcdefg")
False
>>> is_pos_int("12345abcdefg")
False
>>> is_pos_int("~26.3")
False
>>> is_pos_int("^26.3")
False
"""
assert isinstance(num_str, str)
if num_str.isdigit():
return int(num_str) != 0
return False | e5c827026a68955c22d5105de2fe601ba609d0e1 | 47,756 |
from typing import Iterable
def _not_none_count(sequence: Iterable) -> int:
"""
A helper function for counting the number of non-`None` entries in a sequence.
:param sequence: the sequence to look through.
:return: the number of values in the sequence that are not `None`.
"""
return sum(1 for _ in filter(lambda item: item is not None, sequence)) | 82fe58ef458245655feba4c0d82c1160dbfba525 | 47,757 |
import numpy
import pandas
def dx_accuracy(cm):
"""dx_accuracy return model performance metrics in a Pandas dataframe
cm: sklearn confusion matrix object
"""
# turn this in to a function that takes the confusion matrix based
FP = numpy.sum(cm, axis=0) - numpy.diag(cm)
FN = numpy.sum(cm, axis=1) - numpy.diag(cm)
TP = numpy.diag(cm)
TN = numpy.sum(cm) - (FP + FN + TP)
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP/(TP+FN)
# Specificity or true negative rate
TNR = TN/(TN+FP)
# Precision or positive predictive value
PPV = TP/(TP+FP)
# Negative predictive value
NPV = TN/(TN+FN)
# Fall out or false positive rate
FPR = FP/(FP+TN)
# False negative rate
FNR = FN/(TP+FN)
# False discovery rate
FDR = FP/(TP+FP)
# Overall accuracy
ACC = (TP+TN)/(TP+FP+FN+TN)
# list of statistics
stats = ['Sensitivity', 'Specificity', 'Positive Predictive Value',
'Negative Predictive Value', 'False Positive Rate',
'False Negative Rate', 'False Discovery Rate', 'Accuracy']
# list of values for y=1
vals = [TPR[1].round(3), TNR[1].round(3),
PPV[1].round(3), NPV[1].round(3),
FPR[1].round(3), FNR[1].round(3),
FDR[1].round(3), ACC[1].round(3)]
# pd dataframe
return pandas.DataFrame({'DxStatistic': stats, 'Value': vals}) | f49d7cf3ce6b6bdf111b19738e5ab312365eea48 | 47,758 |
def get_task_parameter(task_parameters, name):
"""Get task parameter.
Args:
task_parameters (list): task parameters.
name (str): parameter name.
Returns:
Task parameter
"""
for param in task_parameters:
param_name = param.get('name')
if param_name == name:
return param
return | 12fe7e38dd74c92b8042efd8f6e77404b7ef9c66 | 47,759 |
import math
def simple_project(latitiude: float) -> float:
"""
Projects a point to its corrected latitude for the rhumbline calculations.
:param latitiude: A float in radians.
:return: The projected value in radians.
"""
return math.tan(math.pi / 4 + latitiude / 2) | 9e1c530b11b6c1203a486f078a756f27b19a5603 | 47,760 |
def str2byte(content):
""" compile str to byte """
return content.encode("utf-8") | 77197318672b9424f8071d2f730efeb7075c4fa8 | 47,761 |
def sort_group_connected(group):
"""Sort by number of connected contacts"""
return - group.get_nb_connected_contacts() | 2c3dd44db42ba72b7a7843417ce2365e99827f01 | 47,762 |
import torch
def reps_dot(sent1_reps: torch.Tensor, sent2_reps: torch.Tensor) -> torch.Tensor:
"""
calculate representation dot production
:param sent1_reps: (N, sent1_len, reps_dim)
:param sent2_reps: (N, sent2_len, reps_dim)
:return: (N, sent1_len, sent2_len)
"""
return torch.bmm(sent1_reps, torch.transpose(sent2_reps, -1, -2)) | ce8790433820f573b7c8c5ccd1f388abd917f513 | 47,765 |
def create_encode_state_fn(vae, measurements_to_include):
"""
Returns a function that encodes the current state of
the environment into some feature vector.
"""
# Turn into bool array for performance
measure_flags = ["steer" in measurements_to_include,
"throttle" in measurements_to_include,
"speed" in measurements_to_include,
"orientation" in measurements_to_include]
def encode_state(env):
# Encode image with VAE
# frame = preprocess_frame(env.observation)
frame = env.observation
encoded_state = vae.encode([frame])
env.observation = vae.decode(encoded_state)[0]
# # Append measurements
# measurements = []
# for i in range(2): # wyb
# if measure_flags[0]:
# measurements.append(env.vehicle.control.steer)
# if measure_flags[1]:
# measurements.append(env.vehicle.control.throttle)
# if measure_flags[2]:
# measurements.append(env.vehicle.get_speed())
# # Orientation could be usedful for predicting movements that occur due to gravity
# if measure_flags[3]:
# measurements.extend(vector(env.vehicle.get_forward_vector()))
# # # wyb: maneuver
# # measurements.extend(env.code_maneuver)
# encoded_state = np.append(encoded_state, measurements)
return encoded_state
return encode_state | cfd9894a29e2b8ac6d2399dd59f17546d9626cea | 47,767 |
def headers_to_table(markdown_headers, notebook_name):
"""Produces a markdown table of contents from markdown headers.
This function uses a two pass solution to turn markdown headers into a
table of contents. The first pass strips one # from every header and the
second pass turns all remaining # into spaces. Then the function produces
markdown links spaced in table formats.
"""
if markdown_headers is None:
return None
# First pass just strips #
stripped_headers = []
for header in markdown_headers:
# While we're here let's clean up the data.
header = header.replace("\n", "")
# Third argument is max replace>
stripped_headers.append(header.replace("#", "", 1))
# Second pass Produces actual table of contents.
table_of_contents = []
for header in stripped_headers:
# Using the fact that we know markdown headers have to start on the
# left side.
pound_stripped_header = header.lstrip("#")
nesting_count = len(header) - len(pound_stripped_header)
# Remove spaces from URL. All spaces are convered to - so -- is valid.
href_header = pound_stripped_header.replace(" ", "-")
# Finally constructing toc line
toc_line = (u"{spaces}* [{header_name}]" \
"(http://localhost:8888/notebooks/{notebook_name}#{href_header})").format(
spaces=" " * (nesting_count * 4),
header_name=pound_stripped_header,
notebook_name=notebook_name,
href_header=href_header,)
table_of_contents.append(toc_line)
return table_of_contents | 3e1e05ad310056baf7167a6d4eb0f1eb4b4ea5d0 | 47,768 |
import time
def expires_header(duration):
"""Takes a number of seconds and turns it into an expires header"""
return time.strftime("%a, %d-%b-%Y %T GMT",
time.gmtime(time.time() + duration)) | d81a89ed617f2d0544ff2b9ad9c7e5f9e27394f1 | 47,769 |
def replace_by(value, VR, action,
default_name="John Doe",
default_date="18000101",
default_datetime="180001010000.000000",
default_time="0000.000000",
default_text="anon",
default_code="ANON",
default_age="000M",
default_decimal="0.0",
default_integer="0",
default_uid="000.000.0"):
""" Replace a 'value' depending of the input 'action' and the value
representation 'VR'.
The following action codes are:
* D - replace with a non zero length value that may be a dummy value and
consistent with the VR
* Z - replace with a zero length value, or a non-zero length value that
may be a dummy value and consistent with the VR
* X - remove
* K - keep (unchanged for non-sequence attributes, cleaned for sequences)
* C - clean, that is replace with values of similar meaning known not to
contain identifying information and consistent with the VR
* U - replace with a non-zero length UID that is internally consistent
within a set of Instances
* Z/D - Z unless D is required to maintain IOD conformance (Type 2 versus
Type 1)
* X/Z - X unless Z is required to maintain IOD conformance (Type 3 versus
Type 2)
* X/D - X unless D is required to maintain IOD conformance (Type 3 versus
Type 1)
* X/Z/D - X unless Z or D is required to maintain IOD conformance (Type 3
versus Type 2 versus Type 1)
* X/Z/U* - X unless Z or replacement of contained instance UIDs (U) is
required to maintain IOD conformance (Type 3 versus Type 2 versus Type 1
sequences containing UID references)
We use here the PS 3.6 convention.
"""
if action in ["X", "X/Z", "X/D", "X/Z/D", "X/Z/U*"]:
return None
elif action in ["U", "D", "Z", "Z/D"]:
if VR == "DA":
return default_date
elif VR == "AS":
return default_age
elif VR == "DS":
return default_decimal
elif VR == "DT":
return default_datetime
elif VR == "TM":
return default_time
elif VR == "FL":
return float(eval(default_decimal)) # numpy.float32
elif VR == "FD":
return float(eval(default_decimal)) # numpy.float64
elif VR in ["IS"]:
return default_integer
elif VR in ["UL", "US", "SS", "SL"]:
return eval(default_integer)
elif VR == "PN":
return default_name
elif VR == "UI":
return default_uid
elif VR == "CS":
return default_code
elif VR in ["LO", "LT", "ST", "SH"]:
return default_text
elif VR in ["OB", "OW"]:
return default_integer
else:
raise Exception("VR '{0}' is not yet supported. Current value is "
"'{1}'.".format(VR, value))
else:
raise Exception("Action '{0}' is not yet supported.".format(action)) | 95139886417123c5678eaad1855edbff06087585 | 47,770 |
import requests
from bs4 import BeautifulSoup
def get_content(url):
"""
函数说明:获取章节内容
Parameters:
url - 下载连接(string)
Returns:
texts - 章节内容(string)
"""
req = requests.get(url=url)
req.encoding = 'GBK' #转化为GBK编码
bf = BeautifulSoup(req.text,features='html.parser')
texts = bf.find_all('div',class_='showtxt')
return texts[0].text.replace('\xa0'*8,'\n\n') | 4214bdd9e951caf27cf801137241a8dbaf222618 | 47,772 |
def _field_update(op, path, value):
"""Return a dictionary for a field operation
:param op: *add*, *replace* or *test*
:param path: Path of field
:param value: Field value
:return: dict
"""
return {
"op": op,
"path": "/fields/{}".format(path) if "/fields/" not in path else path,
"value": value
} | 8f1b4841828f04119ca21e86b5d0eb43273ce01e | 47,773 |
import re
def verificador_dui_el_salvador(dui):
"""
función que verifica si una cdena ingresada corresponde a un número válido
de un documento único de identidad de El Salvador incluyendo una expresión
regular para verificar el formato y la operación del número verificador
que es el último dígito.
"""
if re.fullmatch("^\d{8}-\d$", dui):
multiplicador = 9
verificador = 0
for digito in dui[:8]:
verificador += int(digito) * multiplicador
multiplicador -= 1
if 10 - verificador % 10 == int(dui[-1]):
return True
return False | 6bacc9a31cdc5b289341ff82171c661e08154ff9 | 47,775 |
from typing import Dict
from typing import Any
def to_sanitized_dict(args) -> Dict[str, Any]:
"""Sanitized serialization hparams"""
if type(args) in [dict]:
d = args
else:
d = vars(args)
valid_types = [bool, int, float, str, dict]
items = {}
for k, v in d.items():
if type(v) in valid_types:
if type(v) == dict:
v = to_sanitized_dict(v)
items.update({k: v})
else:
str(v)
return items | c4cec6f6761ff12f84b3f1d2be97ac7e96f329f8 | 47,776 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.