content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def _prepare_gdal_options(options: dict, split_by_option_type: bool = False) -> dict:
"""
Prepares the options so they are ready to pass on to gdal.
- Uppercase the option key
- Check if the option types are on of the supported ones:
- LAYER_CREATION: layer creation option (lco)
- DATASET_CREATION: dataset creation option (dsco)
- INPUT_OPEN: input dataset open option (oo)
- DESTINATION_OPEN: destination dataset open option (doo)
- CONFIG: config option (config)
- Prepare the option values
- convert bool to YES/NO
- convert all values to str
Args:
options (dict): options to pass to gdal.
split_by_option_type (optional, bool): True to split the options in a
seperate dict per option type. Defaults to False.
Returns:
dict: prepared options. If split_by_option_type: a dict of dicts for each
occuring option type.
"""
# Init prepared options with all existing option types
option_types = [
"LAYER_CREATION",
"DATASET_CREATION",
"INPUT_OPEN",
"DESTINATION_OPEN",
"CONFIG",
]
prepared_options = {option_type: {} for option_type in option_types}
# Loop through options specified to add them
for option, value in options.items():
# Prepare option type and name
option_type, option_name = option.split(".")
option_type = option_type.strip().upper()
option_name = option_name.strip().upper()
if option_type not in option_types:
raise ValueError(
f"Unsupported option type: {option_type}, should be one of {option_types}"
)
# Prepare value
if isinstance(value, bool):
value = "YES" if value is True else "NO"
# Add to prepared options
if option_name in prepared_options[option_type]:
raise ValueError(
f"option {option_type}.{option_name} specified more than once"
)
prepared_options[option_type][option_name] = str(value)
# If no split is asked, convert back to original format
if split_by_option_type is True:
result = prepared_options
else:
result = {}
for option_type in prepared_options:
for option_name, value in prepared_options[option_type].items():
result[f"{option_type}.{option_name}"] = value
return result
|
744ef3ba48d5f5b8c7deaaf8b9fdf3c6249d6751
| 46,500
|
import random
import string
def gen_random_str(min_length, max_length, prefix=None, suffix=None,
has_letter=True, has_digit=False, has_punctuation=False):
"""
指定一个前后缀、字符串长度以及字符串包含字符类型,返回随机生成带有前后缀及指定长度的字符串
:param:
* min_length: (int) 字符串最小长度
* max_length: (int) 字符串最小长度
* prefix: (string) 字符串前缀
* suffix: (string) 字符串后缀
* has_letter: (bool) 字符串时候包含字母,默认为 True
* has_digit: (bool) 字符串是否包含数字,默认为 False
* has_punctuation: (bool) 字符串是否包含标点符号,默认为 False
:return:
* random_str: (string) 指定规则的随机字符串
举例如下::
print('--- gen_random_str demo ---')
print(gen_random_str(5, 7))
print(gen_random_str(5, 7, prefix='FISHBASE_'))
print(gen_random_str(5, 7, prefix='FISHBASE_', suffix='.py'))
print(gen_random_str(5, 7, has_digit=True, has_punctuation=True))
print(gen_random_str(5, 7, prefix='FISHBASE_', has_digit=True, has_punctuation=True))
print('---')
执行结果::
--- gen_string_by_range demo ---
q4uo6E8
FISHBASE_8uCBEUH
FISHBASE_D4wRX2.py
FISHBASE_65nqlNs
FISHBASE_3"uFm$s
---
"""
if not all([isinstance(min_length, int), isinstance(max_length, int)]):
raise ValueError('min_length and max_length should be int, but we got {} and {}'.
format(type(min_length), type(max_length)))
if min_length > max_length:
raise ValueError('min_length should less than or equal to max_length')
# 避免随机源为空
if not any([has_letter, has_digit, has_punctuation]):
raise ValueError('At least one value is True in has_letter, has_digit and has_punctuation')
random_str_len = random.randint(min_length, max_length)
random_source = ''
random_source += string.ascii_letters if has_letter else ''
random_source += string.digits if has_digit else ''
random_source += string.punctuation if has_punctuation else ''
# 避免出现 ValueError: Sample larger than population or is negative
if random_str_len > len(random_source):
random_source *= (random_str_len // len(random_source) + 1)
mid_random_str = ''.join(random.sample(random_source, random_str_len))
prefix = prefix if prefix else ''
suffix = suffix if suffix else ''
random_str = ''.join([prefix, mid_random_str, suffix])
return random_str
|
d7039df8299a858c6fbe62619230740e466772e2
| 46,505
|
def getURL(key):
"""Takes in Spreadsheet key and appends url attributes.
key -- Google Spreadsheet Key
"""
return "https://spreadsheets.google.com/feeds/list/"+str(key)+"/od6/public/basic?alt=json"
|
ee487fb68b7f951d28e6fa2ff04fa79df2e92c47
| 46,509
|
import configparser
def get_default_choice_index_from_config(config, section, option, choice_list, fallback=1):
"""Get index + 1 of the current choice value from cong, replacing with fallback if not found"""
try:
config_val = config.get(section, option)
return [i for i, x in enumerate(choice_list) if "name" in x and x["name"] == config_val][
0
] + 1
except (IndexError, configparser.NoSectionError, configparser.NoOptionError):
return fallback
|
10057cdfdbf1d34d742784fd8606b4359e688687
| 46,512
|
import math
def coords_to_kavrayskiy(coords):
"""Convert geographical coordinates to Kavrayskiy VII coordinates.
A Kavrayskiy VII map is defined with the following dimensions:
- Height: pi units
- Width: sqrt(3) * pi units
"""
# convert degrees to radians
lat, lng = map(lambda deg: deg * math.pi / 180, coords)
x = (3 * lng / 2) * math.sqrt((1 / 3.) - (lat / math.pi)**2)
y = lat
return (x, y)
|
963269e5f4bae78a536d8917c88d5c493b3d5b7f
| 46,515
|
def denormalize(data, mean, std):
"""
Invert `normalize`
:param data:
:param mean:
:param std:
:return: denormalized data
"""
return (data * std) + mean
|
e78b208e1f422c1faafc72bff97ba7049774a103
| 46,518
|
def get_point(pi, points):
"""Get a point from a numpy array.
If the numpy array has 3 dimensions, will return all three (x,y,z)
Arguments:
pi {int} -- Point index in numpy array
points {ndarray} -- Numpy array of points
Returns:
[list] -- List of poins [x,y,(z)]
"""
if points.shape[1] > 2:
return [points[pi, 0], points[pi, 1], points[pi, 2]]
else:
return [points[pi, 0], points[pi, 1]]
|
5cd2c2998db422b676efdadfb5f60d0add722c5e
| 46,520
|
def create_new_calculator(operations=None):
"""
Creates a configuration dict for a new calculator. Optionally pre loads an
initial set of operations. By default a calculator with no operations
is created.
:param operations: Dict with initial operations.
ie: {'sum': sum_function, ...}
"""
calc = {'operations':{}, 'history':[]}
if operations:
calc['operations'] = operations
return calc
|
a633c9b9bef89090d013640255af7294a53b59d7
| 46,523
|
def _login(item):
"""Handle login entries
Returns: title, username, password, url, notes (include any extra URLs)
"""
title = item['name']
notes = item.get('notes', '') or ''
url = None
if len(item['login'].get('uris', [])) > 0:
urls = [i['uri'] or '' for i in item['login']['uris']]
url = urls[0]
if len(urls) > 1:
notes = "{}\n{}".format(notes, "\n".join(urls[1:]))
username = item['login'].get('username', '') or ''
password = item['login'].get('password', '') or ''
return title, username, password, url, notes
|
fdb3114cac5a8e7987d3b86e55acf1f76fc5093f
| 46,527
|
def parse_values(values):
"""Create a new dictionary version from the sheet values passed in.
Arguments:
values -- (list) a 2d list of values from the google sheet
Returns:
new_sheet -- (dictionary) a dictionary representation of 'values'
"""
new_sheet = {}
header = values[0]
values = values[1:] # Shave off the first item (header)
for i in values:
proj_id = '' if i[2] is None else i[2]
folder_url = "https://www.sciencebase.gov/catalog/folder/"
item_url = "https://www.sciencebase.gov/catalog/item/"
if folder_url in proj_id:
proj_id = proj_id.replace(folder_url, '')
if item_url in proj_id:
proj_id = proj_id.replace(item_url, '')
if '/' in proj_id:
# in case there is a trailing slash
proj_id = proj_id.replace('/', '')
if proj_id != '':
new_sheet[proj_id] = {}
for n in range(0, len(header)):
headerVal = header[n]
try:
val_val = i[n]
except IndexError:
val_val = "No Info Provided"
new_sheet[proj_id][headerVal] = val_val
return new_sheet
|
5237cefbfb9cd2de2a5513e75cfdc76714c5d46f
| 46,530
|
def _get_s_exon_paths(s_exon, path_list, s_exon2path):
"""
Return the set of path with a given s_exon.
It uses s_exon2path to memoize the result.
"""
if s_exon not in s_exon2path:
s_exon2path[s_exon] = set(
filter(lambda x: '/' + s_exon + '/' in x, path_list))
return s_exon2path[s_exon]
|
0ea3a1fb350393cdd6dd7a5a4ac52aa415fa04d0
| 46,545
|
import re
def parse_imports(filename):
"""
Reads the file, and scans for imports. Returns all the assumed filename
of all the imported modules (ie, module name appended with ".by")
Args:
filename (str): Path to file
Returns:
list of str: All imported modules, suffixed with '.by'. Ie, the name
the imported files must have if they are bython files.
"""
infile = open(filename, 'r')
infile_str = ""
for line in infile:
infile_str += line
imports = re.findall(r"(?<=import\s)[\w.]+(?=;|\s|$)", infile_str)
imports2 = re.findall(r"(?<=from\s)[\w.]+(?=\s+import)", infile_str)
imports_with_suffixes = [im + ".by" for im in imports + imports2]
return imports_with_suffixes
|
3125a187260d796d06e7bf180b304026039b59b5
| 46,547
|
def getconfirmation_input(action):
"""
Method collects user confirmation to proceed with action
INPUTS: action as str, description of the action
OUTPUT: Returns boolean, True to proceed, False to not proceed.
"""
loop = True
while loop:
user_input = input(f"Confirm to proceed with '{action}'? [y/N]: ")
if (user_input == "Y") | (user_input == "y"):
return True
elif (user_input == "") | (user_input == "N") | (user_input == "n"):
return False
else:
print(f"Invalid input '{user_input}' >> Expecting [y/Y/n/N].")
|
7d54bf72ff35afc2f180f5c9509dbdf0812118d0
| 46,549
|
def filter_songplays(df):
""" Returns songplay rows """
return df.loc[df['page'] == "NextSong"]
|
c471f3f422129f562546df416093eaf49863f0d6
| 46,550
|
from typing import List
def get_pairs_of_int_two_from_text(text: str, space: str) -> List[List[int]]:
"""Creates pairs of two from the given text
:param text: Text we want to create pairs from
:type text: str
:param space: Target space
:type space: str
:return: Pairs of two in list form
:rtype: List(List(int))
"""
return [
[space.index(text[i]), space.index(text[i + 1])]
for i in range(0, len(text) - 1, 2)
]
|
3c9e572b9cf67c6ab2b5e8fe71765b925ee63d96
| 46,551
|
def ordinal_form(n):
"""Convert number to ordinal form in English.
Parameters
----------
n : :class:`~python:int`
Returns
-------
:class:`~python:str`
Examples
--------
>>> from pprint import pprint
>>> from sknano.core import ordinal_form
>>> pprint([ordinal_form(i) for i in range(200)], width=70, compact=True)
['0th', '1st', '2nd', '3rd', '4th', '5th', '6th', '7th', '8th', '9th',
'10th', '11th', '12th', '13th', '14th', '15th', '16th', '17th',
'18th', '19th', '20th', '21st', '22nd', '23rd', '24th', '25th',
'26th', '27th', '28th', '29th', '30th', '31st', '32nd', '33rd',
'34th', '35th', '36th', '37th', '38th', '39th', '40th', '41st',
'42nd', '43rd', '44th', '45th', '46th', '47th', '48th', '49th',
'50th', '51st', '52nd', '53rd', '54th', '55th', '56th', '57th',
'58th', '59th', '60th', '61st', '62nd', '63rd', '64th', '65th',
'66th', '67th', '68th', '69th', '70th', '71st', '72nd', '73rd',
'74th', '75th', '76th', '77th', '78th', '79th', '80th', '81st',
'82nd', '83rd', '84th', '85th', '86th', '87th', '88th', '89th',
'90th', '91st', '92nd', '93rd', '94th', '95th', '96th', '97th',
'98th', '99th', '100th', '101st', '102nd', '103rd', '104th', '105th',
'106th', '107th', '108th', '109th', '110th', '111st', '112nd',
'113rd', '114th', '115th', '116th', '117th', '118th', '119th',
'120th', '121st', '122nd', '123rd', '124th', '125th', '126th',
'127th', '128th', '129th', '130th', '131st', '132nd', '133rd',
'134th', '135th', '136th', '137th', '138th', '139th', '140th',
'141st', '142nd', '143rd', '144th', '145th', '146th', '147th',
'148th', '149th', '150th', '151st', '152nd', '153rd', '154th',
'155th', '156th', '157th', '158th', '159th', '160th', '161st',
'162nd', '163rd', '164th', '165th', '166th', '167th', '168th',
'169th', '170th', '171st', '172nd', '173rd', '174th', '175th',
'176th', '177th', '178th', '179th', '180th', '181st', '182nd',
'183rd', '184th', '185th', '186th', '187th', '188th', '189th',
'190th', '191st', '192nd', '193rd', '194th', '195th', '196th',
'197th', '198th', '199th']
"""
ordinal_suffix = {}
ordinal_suffix.update(dict.fromkeys(range(20), 'th'))
ordinal_suffix.update({1: 'st', 2: 'nd', 3: 'rd'})
try:
return ''.join((str(n), ordinal_suffix[n]))
except KeyError:
last_digit = int(str(n)[-1])
return ''.join((str(n), ordinal_suffix[last_digit]))
|
ee9545e15c0a2a85a76da0bde38ee5342687de0e
| 46,554
|
def format_image_size(size):
"""Formats the given image size to a two-element tuple.
A valid image size can be an integer, indicating both the height and the
width, OR can be a two-element list or tuple. Both height and width are
assumed to be positive integer.
Args:
size: The input size to format.
Returns:
A two-elements tuple, indicating the height and the width, respectively.
Raises:
ValueError: If the input size is invalid.
"""
if not isinstance(size, (int, tuple, list)):
raise ValueError(f'Input size must be an integer, a tuple, or a list, '
f'but `{type(size)}` received!')
if isinstance(size, int):
size = (size, size)
else:
if len(size) == 1:
size = (size[0], size[0])
if not len(size) == 2:
raise ValueError(f'Input size is expected to have two numbers at '
f'most, but `{len(size)}` numbers received!')
if not isinstance(size[0], int) or size[0] < 0:
raise ValueError(f'The height is expected to be a non-negative '
f'integer, but `{size[0]}` received!')
if not isinstance(size[1], int) or size[1] < 0:
raise ValueError(f'The width is expected to be a non-negative '
f'integer, but `{size[1]}` received!')
return tuple(size)
|
caefa0ab4c9c4cdb4bde9caf5c07b65fee58ab62
| 46,559
|
def next_player(table, from_seat, step=1, hascards=False):
""" Attempts to find the index of the next valid player from the from_seat.
If step is -1 it will search backwards on the table. Step can only be
1 or -1. We can also specify to search for the next player with cards
by setting hascards to True. If no player is found after searching
the length of the table, an exception is raised.
"""
if from_seat < -1 or from_seat >= len(table):
raise ValueError('from_seat is out of bounds!')
if abs(step) != 1:
raise ValueError('step needs to be 1 or -1.')
length = len(table)
for i in range(1, length + 1):
_seat = (from_seat + (i * step)) % length
s = table.seats[_seat]
if s.vacant():
continue
elif hascards and not s.has_hand():
continue
return _seat
raise Exception('Error finding player!')
|
9606b7b340b25d3dc277d0cfb1328e1e4d32cbb9
| 46,560
|
def strip_namespace(obj):
"""
Returns the given object name after striping the namespace
:param obj: str, object to strip namespace from
:return: str
"""
return obj.split(':')[-1]
|
1c34576458df1a90b6c0d075d7e54bbd7c350125
| 46,564
|
def convert_mothur_bool(item):
"""Converts python bool into a format that is compatible with mothur."""
if item is True:
return 'T'
elif item is False:
return 'F'
else:
return item
|
2729fd1a3a0d2b0d8e9c8eaf786ed137e5c6c7c8
| 46,565
|
def case_normalizer(word, dictionary):
"""In case the word is not available in the vocabulary, we can try multiple
case normalizing procedures. We consider the best substitute to be the one
with the lowest index, which is equivalent to the most frequent
alternative.
Obtained from
https://nbviewer.jupyter.org/gist/aboSamoor/6046170
Args:
word (str): The word.
dictionary (list): The dictionary.
Returns:
(str): The case-normalized word.
"""
w = word
lower = (dictionary.get(w.lower(), 1e12), w.lower())
upper = (dictionary.get(w.upper(), 1e12), w.upper())
title = (dictionary.get(w.title(), 1e12), w.title())
results = [lower, upper, title]
results.sort()
index, w = results[0]
if index != 1e12:
return w
return word
|
1af2efc898777709c6bfce486dece5dbbb33f984
| 46,570
|
def get_bbox(x_start, y_start, x_end, y_end):
"""
This method returns the bounding box of a face.
Parameters:
-------------
x_start: the x value of top-left corner of bounding box
y_start: the y value of top-left corner of bounding box
width : the x value of bottom-right corner of bounding box
height: the y value of bottom-right corner of bounding box
returns:
--------------
[x1, y1, x2, y2, x3, y3, x4, y4]
the list of x and y values starting from the top-left corner and going clock, or counter-clock wise
"""
x1 = x_start
y1 = y_start
x2 = x_end
y2 = y_start
x3 = x_end
y3 = y_end
x4 = x_start
y4 = y_end
return [x1, y1, x2, y2, x3, y3, x4, y4]
|
79e440d4875f1e32d5f678d6715777ab525f4c69
| 46,572
|
def avoids(word, forbidden):
"""Returns True if word does not contain any letter in forbidden string
>>> avoids('yummy', 'abcdefg')
True
>>> avoids('dictionary', 'abcdefg')
False
>>> avoids('crypt', 'aeiou')
True
>>> avoids('tangible', 'aeiou')
False
"""
for letter in word:
if letter in forbidden:
return False
return True
|
bc6e87afb968b7651a992400a899fb0b231c0d77
| 46,580
|
def unlock_time(web3):
"""UNIX timestamp to unlock tokens 180 days in the future."""
return web3.eth.getBlock('pending').timestamp + 180 * 24 * 60 * 60
|
fe8888b663efc101ba79fc7200a342c09f44257c
| 46,582
|
def define_result(result: str, index: int) -> str:
"""
Rather than having the Team decode this, what if we just told it
whether it had a win, loss, or draw? Then it wouldn't matter how
we came to know this, we would just have to inform the team.
The less that the Team knows about how we determine the match results,
the less likely it will be that the Team will need to be modified if
we modify how we represent match results.
:param result: Game Outcome
:param index: Team Index
:return:
"""
# remove spaces and convert chars to lower case
result = result.strip().lower()
possible_results = ('win', 'loss', 'draw')
if result not in possible_results:
raise ValueError("ERROR: this is invalid game outcome: {}".format(result))
if result == 'win' and index == 0:
return possible_results[0]
elif result == 'win' and index == 1:
return possible_results[1]
elif result == 'loss' and index == 0:
return possible_results[1]
elif result == 'loss' and index == 1:
return possible_results[0]
else:
return possible_results[2]
|
6cf794f99334cbaa93139a801ada10cfec621790
| 46,585
|
def IsValueValid(value):
"""Validate the value.
An invalid value is either an empty string or a string of multiple '+'.
Args:
value: string. The value in raw result.
Returns:
A boolean indicates if the value is valid or not.
"""
if value == '' or '+' in value:
return False
return True
|
bf3254371080d24c5f12188f0a52459f85480350
| 46,588
|
def is_predicate(logic, start, end):
"""Returns T/F depending on if logic string is predicate and index of predicate operator (<=)"""
equals_ind = logic.index("=")
if equals_ind>end or equals_ind<start:
return True, equals_ind
return False, -1
|
254527bc4a5604fa9ad4f3470adcce4a846fa44f
| 46,591
|
import re
def trim_stopwords(s, stop_words):
"""Case-insensitive removal of stop phrases/words from a string
>>> trim_stopwords('Depártment de Testing Test royale', ['depártment de', 'royale'])
'Testing Test'
"""
for stop in stop_words:
if ' ' in stop: # phrase
s = re.sub(stop, '', s, flags=re.IGNORECASE)
else: # individual word
s = s.split()
for i, w in enumerate(s):
if w.lower() == stop:
s.pop(i)
s = ' '.join(s)
return s.strip()
|
21b161ace4dd0ea288719856c03156fc8b08ec3a
| 46,592
|
import requests
def fetch_file_content(resource_url):
"""
Fetch the XML content of a resource via its URL
:param resource_url
:return: the text content of the file, None if the status code was not 200
"""
response = requests.get(resource_url, headers={'accept': 'application/xml'})
if response.status_code != 200:
return None
return response.text
|
98b24b181eeef1e923900c4540c41b1b9e743ee0
| 46,597
|
def value_to_string(value, fmt):
"""
Convert numerical value to string with a specific format
"""
return "{value:>{fmt}}".format(value=value, fmt=fmt).strip()
|
07437330b5c42640b48871cf37c503aa1a1b689a
| 46,599
|
def str_rgb(rgb):
"""
Returns the string representation of an RGB object without alpha
Parameter rgb: the color object to display
Precondition: rgb is an RGB object
"""
return '('+str(rgb.red)+', '+str(rgb.green)+', '+str(rgb.blue)+')'
|
2b0cba25a874605c0666e7a7ef5c3491558a6fba
| 46,600
|
from typing import Optional
from pathlib import Path
def get_home_path() -> Optional[str]:
"""get home directory path, return None if failed"""
try:
return str(Path.home())
except RuntimeError:
return None
|
05b35523084cf81c5890031ed1c5c533e6c5fe5d
| 46,601
|
from typing import Dict
def unparse_connection(dsn: Dict[str, str]) -> str:
"""Return connection string for pretty printing or copying when starting psql."""
values = dict(dsn)
for key in ("user", "port"):
if key not in values:
values[key] = "<default>"
return "host={host} port={port} dbname={database} user={user} password=***".format_map(values)
|
7b110587b5a56ed775d4e51c2bc6fe392c7bb727
| 46,610
|
import yaml
def load_yaml(filepath):
"""Load the content of a YAML file to a dictionary."""
with open(filepath, "r") as m_file:
content = yaml.safe_load(m_file)
return content
|
c5d72a0af8234d7f369359a11279ba1a07b37f54
| 46,612
|
def construct_res_tuple(res):
"""
Build the BigDFT fragment tuple given the residue of the structure
Args:
res(Residue): A residue Class on the Biopython package
Returns:
tuple: the (chain,fragname,fragno) tuple
"""
chain = res.full_id[2]
if len(chain.lstrip(' ')) == 0:
chain = 'A'
resname = res.resname
position = res.full_id[3][1]
return chain, resname, position
|
6091470e4a16f7b04ee9d3fcf5bf64f30f5a1b58
| 46,617
|
def get_id_from_name(name):
"""
Takes the lowercase name of a component and removes '-' and redundant spaces by splitting and
then rejoining on spaces. Spaces and underscores are finally replaced with '-'.
"""
return ' '.join(name.lower().replace('-', '').split()).replace(' ', '-').replace('_', '-')
|
252b7268b80de95920e73e1e70573d869d22e9f3
| 46,620
|
def get_prop_t(pid, i):
"""
Gets a value of 'P585' (point in time) from a Wikidata property
"""
return pid[i]["qualifiers"]["P585"][0]["datavalue"]["value"]["time"]
|
fabaf5885fa2d5543182c5f19c818da43b533886
| 46,623
|
def is_valid(n: str) -> bool:
"""Tests if n is float or int"""
try:
float(n) or int(n)
return True
except ValueError:
return False
|
aa93c78f863942a3b5a2d25e3f96e83515117007
| 46,625
|
def check_if_vertically_overlapped(box_a, box_b):
"""
Return if box_b is intersected vertically with coord_a boxes.
:param box_a:
:param box_b:
:return: true if intersected, false instead
"""
return \
box_a['y_min'] < box_b['y_min'] < box_a['y_max'] or \
box_a['y_min'] < box_b['y_max'] < box_a['y_max'] or \
(box_a['y_min'] >= box_b['y_min'] and box_a['y_max'] <= box_b['y_max']) or \
(box_a['y_min'] <= box_b['y_min'] and box_a['y_max'] >= box_b['y_max'])
|
e749d5161a4dec2fb3feeb4f6c66ab604731b3e7
| 46,626
|
def ms2smp(ms, fs):
"""
Parameters
----------
ms: float
Time in milliseconds
fs: float
Sampling rate in Hz.
"""
# return corresponding length in samples
return int(float(fs) * float(ms) / 1000.0)
|
80f8ca79cd4bc8dc3defefde2749c2e97c37d744
| 46,629
|
import collections
def DictFilter(alist, bits):
"""Translates bits from EDID into a list of strings.
Args:
alist: A list of tuples, with the first being a number and second a string.
bits: The bits from EDID that indicate whether each string is supported by
this EDID or not.
Returns:
A dict of strings and bools.
"""
d = collections.OrderedDict()
for x, s in alist:
d[s] = bool(bits & x)
return d
|
d314fdb4c1fd34ae9974f63d64495b8cafefbef5
| 46,630
|
import re
def is_html(first_bytes):
""" Detect whether a file contains HTML by examining its first bytes. """
BOMS = [
(b'\xef\xbb\xbf', 'utf-8'),
(b'\x00\x00\xfe\xff', 'utf-32-be'),
(b'\xff\xfe\x00\x00', 'utf-32-le'),
(b'\xff\xfe', 'utf-16-le'),
(b'\xfe\xff', 'utf-16-be'),
]
for bom, enc in BOMS:
if first_bytes.startswith(bom):
s = first_bytes[len(bom):].decode(enc, 'replace')
break
else:
s = first_bytes.decode('utf-8', 'replace')
return re.match(r'^\s*<', s)
|
5c1c9a8d097eb23b931cf4ed595585b8908f51e2
| 46,631
|
import sqlite3
def get_setting(path, name):
"""Gets a setting value from the settings database."""
db = sqlite3.connect(path)
r = db.execute("SELECT value FROM setting WHERE key = ?", (name,)).fetchone()
return r[0] if r else None
|
42491fdf3102ce5e0afc0c68721f8445b2718cee
| 46,633
|
def build_data_dict(stats_arr, prefix):
"""
Build a data dictionary with columns named according to (k,z) bins, a
threshold value, and some prefix.
Assumes that stats_arr has shape: (N_samp, N_thres, N_z, N_kbins)
"""
# Get no. of points in each dimension.
N_sam, N_thres, N_z, N_kbins = stats_arr.shape
# Create dictionary with column names that can be used by ColumnDataSource
data_dict = {}
for n in range(N_thres):
for j in range(N_z):
for m in range(N_kbins):
key = "tot_%s_h%d_k%d_z%d" % (prefix, n+1, m+1, j+1)
data_dict[key] = stats_arr[:,n, j, m]
return data_dict
|
808eeba9ed71b8d1420c6277d8db459d211d52bd
| 46,637
|
def text_align(keyword):
"""``text-align`` property validation."""
return keyword in ('left', 'right', 'center', 'justify')
|
71db2a5f97831e9fb4de27ba670aa1fd14913fa2
| 46,641
|
def is_nested(ctx, rp1_uuid, rp2_uuid):
"""Returns True if the two resource providers are related with a :CONTAINS
relationship. The direction of the relationship doesn't matter.
"""
query = """
MATCH p=(rp1 {uuid: '%s'})-[:CONTAINS*]-(rp2 {uuid: '%s'})
RETURN p
""" % (rp1_uuid, rp2_uuid)
result = ctx.tx.run(query).data()
return bool(result)
|
d8c8faa95600024b272dd133bb33609f6483cd33
| 46,643
|
import random
def ChooseFrom(choice_list):
"""Return a random choice from given list.
Args:
choice_list: list of possible choices.
Returns:
One random element from choice_list
"""
return random.choice(choice_list)
|
08ab9609ef974802f699803b8a7b2ff740a964d3
| 46,644
|
def validate_rule_group_type(rule_group_type):
"""
Validate Type for RuleGroup
Property: RuleGroup.Type
"""
VALID_RULE_GROUP_TYPES = ("STATEFUL", "STATELESS")
if rule_group_type not in VALID_RULE_GROUP_TYPES:
raise ValueError(
"RuleGroup Type must be one of %s" % ", ".join(VALID_RULE_GROUP_TYPES)
)
return rule_group_type
|
529d515fc1dd16e8a2e4e1d8b062096126c7c809
| 46,647
|
import torch
def get_semantic_segmentation(sem):
"""
Post-processing for semantic segmentation branch.
Arguments:
sem: A Tensor of shape [N, C, H, W], where N is the batch size, for consistent, we only
support N=1.
Returns:
A Tensor of shape [1, H, W] (to be gathered by distributed data parallel).
Raises:
ValueError, if batch size is not 1.
"""
if sem.size(0) != 1:
raise ValueError('Only supports inference for batch size = 1')
sem = sem.squeeze(0)
return torch.argmax(sem, dim=0, keepdim=True)
|
5561f359f6a7d8acbad8d25596970aa681e43948
| 46,655
|
import torch
def getKneighborsConnections(affinity_mat: torch.Tensor, p_value: int):
"""
Binarize top-p values for each row from the given affinity matrix.
"""
binarized_affinity_mat = torch.zeros_like(affinity_mat).int()
for i in range(affinity_mat.shape[0]):
line = affinity_mat[i, :]
sorted_idx = torch.argsort(line, descending=True)
indices = sorted_idx[:p_value]
binarized_affinity_mat[indices, i] = torch.ones(indices.shape[0]).to(affinity_mat.device).int()
return binarized_affinity_mat
|
95c3b6237a34ef10e6551991f6250f5f14837059
| 46,656
|
def crop_center(img, size):
"""
Crops out the center of `img` according to the given `size` tuple.
"""
return img[(Ellipsis,) + tuple(slice((x - s) // 2, (x + s) // 2)
if s < x else slice(None)
for x, s in zip(img.shape[-len(size):],
size))]
|
36594ba9997a3e0c4eaac7feb03d2a7f9ef1ea34
| 46,658
|
import json
def GetDownloadSerializationData(src_obj_metadata, progress=0):
"""Returns download serialization data.
There are four entries:
auto_transfer: JSON-specific field, always False.
progress: How much of the download has already been completed.
total_size: Total object size.
url: Implementation-specific field used for saving a metadata get call.
For JSON, this the download URL of the object.
For XML, this is a pickled boto key.
Args:
src_obj_metadata: Object to be downloaded.
progress: See above.
Returns:
Serialization data for use with Cloud API GetObjectMedia.
"""
serialization_dict = {
'auto_transfer': 'False',
'progress': progress,
'total_size': src_obj_metadata.size,
'url': src_obj_metadata.mediaLink
}
return json.dumps(serialization_dict)
|
64b0594268074f92499e6db08ba047e81633d4a4
| 46,665
|
def create_crumb(title, url=None, active=False):
"""
Helper function that creates breadcrumb.
"""
active_str = ""
if active:
active_str = ' class="active"'
inner_str = "%s" % title
if url:
inner_str = '<a href="%s">%s</a>' % (url, title)
crumb = "<li%s>%s</li>" % (active_str, inner_str)
return crumb
|
c3902084627a7a80804efc1b8e97f18040b9a0f9
| 46,666
|
import pickle
def is_picklable(obj: object) -> bool:
"""Tests if an object can be pickled"""
try:
pickle.dumps(obj)
return True
except (pickle.PickleError, AttributeError):
return False
|
9246cfc006ec382620bb4fb4961a447e6ab924d3
| 46,668
|
import json
def toJson(val):
"""Return JSON string
Supported datatypes:
<class 'sqlalchemy.engine.result.RowProxy'>
<type 'dict'>
Args:
val: datatype that gets converted to JSON string
"""
if str(type(val)) == "<class 'sqlalchemy.engine.result.RowProxy'>":
return json.dumps(dict(val.items()))
return json.dumps(val)
|
9464cf94669f851588fd7b6e63159198c0eb91b9
| 46,670
|
import requests
def _is_ssl_issue(jwks_url: str) -> bool:
"""Helper method to verify if auth failed because of an ssl issue looking up the jwks
Args:
jwks_url: url to the jwks endpoint
Returns:
true if it is an ssl error, false if anything else happens (success or any other failure)
"""
result = False
try:
requests.get(jwks_url)
except requests.exceptions.SSLError:
result = True
except:
pass
return result
|
82d2f140a40f6a80ca52d27d00dc668429786b3c
| 46,671
|
from typing import OrderedDict
def cols2dict(headers,iterable):
"""creates an ordered dictionary from sequences of keys and values
headers - a sequence of hashable items
iterable - a sequence of items (checked to be the same length as headers)
"""
if len(headers) != len(iterable): raise ValueError('headers amd iterable sequences are not of equal length')
return OrderedDict(zip(headers,iterable))
|
236065bd4654790dd86d2e5bc3c8c81e0a901ba0
| 46,673
|
def list_difference(l1,l2):
"""
return the difference between 2 lists
:param l1:
:param l2:
:return: list l1 - list l2
"""
return list(set(l1) - set(l2))
|
3ef73c1c58e27abf2a2d58c1de872cf3cb4b2a8f
| 46,677
|
def _parse_grid_origin(grid_origin, radars):
""" Parse the grid origin parameter, finding origin if not given. """
if grid_origin is None:
lat = float(radars[0].latitude['data'])
lon = float(radars[0].longitude['data'])
grid_origin = (lat, lon)
return grid_origin
|
383ec0ef5a5212ce0eec7346235a409a9d251733
| 46,681
|
import itertools
import random
def reservoir(iterator, k):
""" Performs reservoir sampling of k items in iterator. Make sure that the iterator is a once-only iterator
(ie. not created using the "range" function).
:param iterator: set of items to sample from
:param k: sample k items
:return: list of sampled items
"""
sample = list(itertools.islice(iterator, 0, k))
for i, item in enumerate(iterator):
replace = random.randint(0, i + k)
if replace < k:
sample[replace] = item
return sample
|
c8ef11758246cbda4d07223c94daedf834513f9e
| 46,685
|
def calculate_kinetic_energy(mass, velocity):
"""Returns kinetic energy of mass [kg] with velocity [ms]."""
return 0.5 * mass * velocity ** 2
def test_calculate_kinetic_energy():
mass = 10 # [kg]
velocity = 4 # [m/s]
assert calculate_kinetic_energy(mass, velocity) == 80
|
f5552b919a671f072ae4bf1e06f2b28239f158e8
| 46,691
|
import shutil
def dependency_check(dependency):
"""
Uses shutil to check if a dependency is installed (won't check version of anything - just presence)
:param dependency: The dependency as it would be called on the command line (i.e. for blastn, would be blastn)
:return: True if dependency is present, False if it is not found.
"""
if shutil.which(dependency) is not None:
return True
else:
return False
|
4bfb07814492eb7257e81653ef0ea816c71a3341
| 46,692
|
def cleanup_user(user):
"""Given a dictionary of a user, return a new dictionary for output as
JSON."""
return {"id" : str(user["_id"])}
|
4ebd6abefbac839c26ebfd9c6a0503e51b3d48a5
| 46,694
|
def _content(obj):
"""Return content of obj as bytes"""
if type(obj) is bytes:
return obj
if not isinstance(obj, memoryview):
obj = memoryview(obj)
return obj.tobytes()
|
5241b542c2d94b118c447c93a39b784bbe0d2ba5
| 46,698
|
import torch
def divide_sequence(
x: torch.Tensor,
seq_len: int,
pad: bool) -> torch.Tensor:
"""
Break full_len -> n_samples * seq_len
Args:
x:
tensor: (full_len, ...)
seq_len:
Divided sequence length, the second dimension of the output tensor
pad:
Pad with zeros or discard the remainder sequence
Returns:
tensor, where the first input dimension (full_len, ) is split into (n_samples, seq_len)
"""
full_len = x.size()[0]
k_dims = list(x.size()[1:])
remainder = full_len % seq_len
divisible = remainder == 0
if not divisible:
if pad:
pad_len = seq_len - remainder
pad_size = [pad_len] + k_dims
x_pad = torch.zeros(size=pad_size, dtype=x.dtype)
if x.is_cuda:
x_pad = x_pad.cuda()
x = torch.cat([x, x_pad], dim=0)
else: # discard remainder
x = x[0:-remainder]
new_size = [-1, seq_len] + k_dims
return x.view(*new_size)
|
c128ee735b92287b450d74f706714eb0a5dc6c25
| 46,701
|
def get_multi_values(columns_to_insert,insert_values_dict_lst):
"""
returns the values for the placeholders in query.
:param columns_to_insert:
:param insert_values_dict_lst:
:return:
"""
values = []
for value_dict in insert_values_dict_lst:
for col in columns_to_insert:
value = value_dict[col]
values.append(value)
return values
|
9a7d9213f42eee303b3845ef8d1564ef72668c89
| 46,707
|
import re
def is_literature(paragraph: str) -> bool:
"""
Check if a paragraph is a literature entry.
Parameters
----------
paragraph : str
Returns
-------
is_literature : bool
"""
doi_regex = re.compile(r"""(10[.][0-9]{4,}(?:[.][0-9]+)*/(?:(?!["&\'<>])\S)+)""")
issn_regex = re.compile(r"""ISSN \d+""", re.IGNORECASE)
vol_regex = re.compile(r"""vol\. [IVCXL\d]+""", re.IGNORECASE)
return (
"ISBN" in paragraph
or bool(doi_regex.search(paragraph))
or bool(issn_regex.search(paragraph))
or bool(vol_regex.search(paragraph))
or "https://" in paragraph
or "http://" in paragraph
)
|
861c6332fb4eea0a696e1705c68dd49c6bab0885
| 46,708
|
def compute_MIF(ic):
"""
Args:
ic (IC): indepentent component.
Returns:
float: Myogenic identification feature.
"""
freqs, psd = ic.psd(verbose=False)
mean_psd = psd.mean(axis=0)
return mean_psd[freqs > 20].sum() / mean_psd.sum()
|
423a96a9dfcc3c05b2fb05f97e7912a534b41c95
| 46,711
|
def preformatted(text: str) -> str:
"""Make text to pre-formatted text."""
return f'```{text}```'
|
152c9cf6ce78ffed74b23562f7f09195340ab9b0
| 46,716
|
def interest1(b, p, n):
"""
INTEREST1(b, p, n) computes the new balance after n years for an initial
balance b and an annual interest rate p in per cent
"""
return b*(1 + p/100)**n
|
351ec07ed8e9c12728a6ae033eaaba7585ccf29d
| 46,719
|
import functools
def chain(func):
"""
Decorator function that allows class methods to be chained by implicitly returning the object. Any method
decorated with this function returns its object.
:param func:
:return:
"""
@functools.wraps(func)
def wrap(self, *args, **kwargs):
func(self, *args, **kwargs)
return self
return wrap
|
fe603c769d2ca7a3f9bbc31cb37a82c948062825
| 46,720
|
def find_factors(b):
"""Find factors of a number."""
res = []
for i in range(1, b + 1):
if b % i == 0:
print(i)
res.append(i)
return res
|
61a2d8dc3727eed32752ac6dbd58ac74fdff9d67
| 46,721
|
def rename_dict_keys(input_dict, key_sets):
"""Renames the keys in a dictionary
Parameters
----------
input_dict : dict
Dictionary for which to change the keys
key_sets : list
list of tuples of the format `(old_key, new_key)`
Returns
-------
dict
Copy of `input_dict` with old keys subbed for new keys
"""
output_dict = input_dict.copy()
for old_key, new_key in key_sets:
output_dict[new_key] = output_dict.pop(old_key, None)
return output_dict
|
ebfe7eb12c16d8e9ba2d8f9f4c5b1d7a9b0f4716
| 46,722
|
def get_img(item):
"""
Get img data from item.
:param item:
:return:
"""
return item
|
8244f271af81140dc28822f8f59f6c23e9073958
| 46,724
|
def build_caching_info_message(
job_spec, job_id, workflow_workspace, workflow_json, result_path
):
"""Build the caching info message with correct formatting."""
caching_info_message = {
"job_spec": job_spec,
"job_id": job_id,
"workflow_workspace": workflow_workspace,
"workflow_json": workflow_json,
"result_path": result_path,
}
return caching_info_message
|
6573ca89698390ebb1d54e913ba1ba0a35b0566d
| 46,730
|
def import_object(name):
"""
Import an object from a module, by name.
:param name: The object name, in the ``package.module:name`` format.
:return: The imported object
"""
if name.count(':') != 1:
raise ValueError("Invalid object name: {0!r}. "
"Expected format: '<module>:<name>'."
.format(name))
module_name, class_name = name.split(':')
module = __import__(module_name, fromlist=[class_name])
return getattr(module, class_name)
|
7822570779519954f2e06c5451c704fd905eb48a
| 46,738
|
def buildEdgeDict(faces):
"""
Arguments:
faces ([[vIdx, ...], ...]): A face representation
Returns:
{vIdx: [vIdx, ...]}: A dictionary keyed from a vert index whose
values are adjacent edges
"""
edgeDict = {}
for face in faces:
for f in range(len(face)):
ff = edgeDict.setdefault(face[f-1], set())
ff.add(face[f])
ff.add(face[f-2])
return edgeDict
|
d11c803d954087815362ee3ede910d894b473c1c
| 46,740
|
import inspect
import weakref
import functools
def weakref_cache(func):
"""
(This cache is designed for the functions in the ``introspection.typing.introspection`` module.)
Caches a function's return values based on the first argument. The cached
input values are weakly referenced.
Example::
@weakref_cache
def demo_func(foo, bar):
Here, every call to ``demo_func`` with a new value for ``foo`` would be cached.
The values passed to ``bar`` are completely ignored::
>>> demo_func(int, 'bar') # first call with foo=int, result is cached
<some output>
>>> demo_func(int, None) # second call with foo=int, cached result is returned
<the same output as before>
If an input value can't be hashed or weakly referenced, the result of that call
is not cached.
"""
sig = inspect.signature(func)
cache_param = next(iter(sig.parameters))
cache = weakref.WeakKeyDictionary()
@functools.wraps(func)
def wrapper(*args, **kwargs):
bound_args = sig.bind(*args, **kwargs)
cache_key = bound_args.arguments[cache_param]
try:
return cache[cache_key]
except (KeyError, TypeError):
pass
result = func(*args, **kwargs)
# If the cache_key isn't hashable, we simply won't cache this result
try:
cache[cache_key] = result
except TypeError:
pass
return result
return wrapper
|
b5c4bd2ed00fd7f0d4ebeaf1dd8510665f59ffba
| 46,746
|
from typing import Any
def get_from_dict(dct: dict, key: tuple[str, ...]) -> Any:
"""Get value from dict using a multi-part key"""
data: Any = dct
for part in key:
assert isinstance(data, dict)
data = data[part]
return data
|
64366f80dd896f31561f1ace2b768aa36c8058ad
| 46,748
|
def digit(n, k, base):
"""
>>> digit(1234, 0, 10)
4
>>> digit(1234, 1, 10)
3
>>> digit(1234, 2, 10)
2
>>> digit(1234, 3, 10)
1
"""
return n // base**k % base
|
818c19539298c6ec05691a08ee1425ea800c5850
| 46,749
|
def majority(samples, ignore_none=True):
"""
Find the most frequent element in a list.
Arguments:
samples (list): Input list. Its elements must be hashable.
ignore_none (bool): If None is a valid value.
Returns:
object: The most frequent element in samples. Returns none if the input list is empty.
"""
freq_dict = {}
most_freq_ele = None
highest_freq = 0
for element in samples:
if ignore_none and element is None:
continue
if element not in freq_dict:
freq_dict[element] = 0
freq = freq_dict[element] + 1
freq_dict[element] = freq
if freq > highest_freq:
highest_freq = freq
most_freq_ele = element
return most_freq_ele
|
5929ed5ab7c19a1a77ef8c4d4d7205e6181e53cc
| 46,753
|
import requests
from datetime import datetime
def commit_in_last_year(commits_url: str, headers: dict) -> str:
"""
11. Has there been a commit in the last year?
"""
r = requests.get(commits_url, headers=headers).json()
last_commit_date = r.get("commit").get("author").get("date")
last_commit_date = datetime.strptime(last_commit_date, "%Y-%m-%dT%H:%M:%SZ")
days_since_last_commit = (datetime.utcnow() - last_commit_date).days
if days_since_last_commit > 365:
message = f"[red]No. The last commit was {days_since_last_commit} days ago"
else:
message = f"[green]Yes. The last commit was on {datetime.strftime(last_commit_date, '%m-%d-%Y')} "
message += f"which was {days_since_last_commit} days ago"
return message
|
93af56c1b71dac407fac84a0ea659a27100c87d7
| 46,760
|
from typing import Callable
from typing import Any
import inspect
def ignore_input(inner: Callable[[], Any]) -> Callable:
"""Returns `inner` function ignoring the provided inputs.
>>> ignore_input(lambda: 0)(1)
0
"""
def ignore_and_run(*args, **kwargs):
return inner()
async def ignore_and_run_async(*args, **kwargs):
return await inner()
if inspect.iscoroutinefunction(inner):
return ignore_and_run_async
return ignore_and_run
|
9fd11556c0dcfcd045dc73027eea9dae9f034d40
| 46,762
|
import string
import random
def gen_random_str(length=32):
"""
Generate random string (letters+numbers)
Args:
length: string length (default: 32)
"""
symbols = string.ascii_letters + '0123456789'
return ''.join(random.choice(symbols) for i in range(length))
|
9d7244a747c09455de0b7d9c3858022fcecf13af
| 46,768
|
def extract_source_phrase(line):
"""Extract the source phrase from an extract-file line."""
return line.split(b'|||', 1)[0]
|
a1fe16c9bace30ab110920080d1b6eed97803d28
| 46,769
|
import re
def parse_fasta_header(line):
"""
Returns gene_name, [(start, end), ..], strand for a given fasta header line.
>>> parse_fasta_header(">lcl|NC_000913.2_cdsid_NP_417358.2 [gene=xanQ] [protein=xanthine permease] [protein_id=NP_417358.2] [location=3022373..3023773]")
('xanQ', [(3022373, 3023773)], '+')
>>> parse_fasta_header(">lcl|NC_000913.2_cdsid_NP_414616.1 [gene=leuA] [protein=2-isopropylmalate synthase] [protein_id=NP_414616.1] [location=complement(81958..83529)]")
('leuA', [(81958, 83529)], '-')
>>> parse_fasta_header(">lcl|NC_000913.2_cdsid_NP_417367.1 [gene=prfB] [protein=peptide chain release factor RF-2] [protein_id=NP_417367.1] [location=complement(join(3033206..3034228,3034230..3034304))]")
('prfB', [(3033206, 3034228), (3034230, 3034304)], '-')
"""
# Regular expressions to match id and location
#exp_id = re.compile("\[protein_id=([a-zA-Z0-9_\.]+)\]")
exp_id = re.compile("\[gene=([a-zA-Z0-9]+)\]")
exp_loc = re.compile("\[location=([a-zA-Z0-9_\.(),]+)\]")
positions = []
strand = '+'
protein_id = None
m = exp_id.search(line)
if m:
protein_id = m.group(1)
start, end = None, None
m = exp_loc.search(line)
if m:
loc_str = m.group(1)
if loc_str.startswith("complement"):
strand = '-'
loc_str = loc_str[11:-1]
if loc_str.startswith("join"):
loc_str = loc_str[5:-1]
for pair in loc_str.split(","):
start, end = map(int, pair.split(".."))
positions.append((start, end))
else:
start, end = map(int, loc_str.split(".."))
positions.append((start, end))
return protein_id, positions, strand
|
d22648282247b30ef871e85602a4874e33ca4f10
| 46,772
|
def mvw_standard(prices,
weight_bounds=(0.,1.),
rf = 0.,
options = None):
"""
Calculates the mean-variance weights given a DataFrame of returns.
Wraps mean_var_weights with standard covariance calculation method
Args:
* prices (DataFrame): Prices for multiple securities.
* weight_bounds ((low, high)): Weigh limits for optimization.
* rf (float): `Risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ used in utility calculation
* options (dict): options for minimizing, e.g. {'maxiter': 10000 }
Returns:
Series {col_name: weight}
"""
r = prices.to_returns().dropna()
covar = r.cov()
return covar
|
98d0e2bee27984fd2229e39450050ac85e6a3e4f
| 46,778
|
def change_user_password(
self,
username: str,
current_password: str,
new_password: str,
) -> bool:
"""Update an existing user's password
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - user
- POST
- /users/{newUser}/password
:param username: Username for user
:type username: str
:param current_password: Current password for user
:type current_password: str
:param new_password: New password for user, Password must be at
least 8 characters long and contain the following items: upper
case letter, lower case letter, a number, a special character
:type new_password: str
:return: Returns True/False based on successful call
:rtype: bool
"""
data = {
"oldPassword": current_password,
"newPassword": new_password,
}
return self._post(
"/users/{}/password".format(username),
data=data,
expected_status=[204],
return_type="bool",
)
|
01c4d0d6fdb5592db96c44d37e6cb5eccad61803
| 46,779
|
def get_Di(M, A, Ds, S=None):
"""
Obtain the IFOV vector in GCI coordinates.
Parameters
----------
M : numpy.matrix
3x3 nadir-to-GCI rotation matrix M
A : numpy.matrix
3x3 Spacecraft Attitude Matrix
Ds : 1-dimensional array of floats
3-element IFOV vector in sensor coordinates
S : numpy.matrix or None
Optional 3x3 Sensor Alignment Matrix
Returns
-------
Di : 1-dimensional array of floats
3-element IFOV vector in GCI coordinates
"""
if S is None:
return M * A.T * Ds
else:
return M * A.T * S.T * Ds
|
95aa7582eafe12be434fb3377d5d2ddc6a0b3137
| 46,781
|
def check_troposphere(altitude: float=0.0) -> bool:
""" This function checks if the input altitude is in the Troposphere."""
if -610.0 <= altitude <= 11000.0:
return True
else:
return False
|
bc3247a44358e8cf175ab8c06f11eaf7eae77a14
| 46,787
|
def last_dig(a: int, b: int, c: int) -> bool:
"""Determine if last digit of c equals last digit of a * b."""
# Make list of list digits of a,b,c.
last_digits = [digit % 10 for digit in [a, b, c, ]]
# Calculate a*b and find the last digit.
ab_last_digit = (last_digits[0] * last_digits[1]) % 10
# Assign last digit of c to variable.
c_last_digit = last_digits[2]
# Compare ab_last_digit to c_last_digit and return.
return ab_last_digit == c_last_digit
|
b95a3eed84b776b760071b10d1d1557a1fbfcd43
| 46,791
|
import math
def safeArgs(args):
"""Iterate over valid, finite values in an iterable.
Skip any items that are None, NaN, or infinite.
"""
return (arg for arg in args
if arg is not None and not math.isnan(arg) and not math.isinf(arg))
|
d455bcd0cef7e6a47d1e967f17ba0e2dd08c25f4
| 46,793
|
def grabKmer(seq, starti, k=9):
"""Grab the kmer from seq starting at position starti with length k
Return the gapped and non-gapped kmer
If seq[starti] is a gap then the non-gapped kmer is None.
If there are not enough non-gap AA to return after starti then it returns None
Parameters
----------
seq : str
Sequence from which peptide will be grabbed.
starti : int
Starting position of the kmer (zero-based indexing)
k : int
Length of the peptide to return.
Returns
-------
gapped : str
A k-length peptide starting at starti from seq.
nonGapped : str
A k-length peptide starting at starti from seq.
If seq[starti] is a gap then returns None.
If not then all gaps are removed before taking the k-length peptide
(if there aren't k AAs then return is None)"""
if not isinstance(starti, int):
starti = int(starti)
if (starti+k-1) <= (len(seq)-1) and starti >= 0:
tmp = seq[starti:]
full = tmp[:k]
if full[0] == '-':
return None, None
elif '-' in full:
ng = tmp.replace('-', '')
if len(ng) >= k:
ng = ng[:k]
else:
ng = None
else:
ng = full
return full, ng
else:
return None, None
|
10f2e135c27cf2986512017b2cf165520efae655
| 46,796
|
def _resolve_dotted_attribute(obj, attr):
"""Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
"""
for i in attr.split('.'):
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
|
3463dfea3a617cd132df2d85e8003b1e320576cb
| 46,806
|
def repeat_selection(base_algorithm, sampler, min_success, num_tries):
"""
Repeat a set-returning selection algorithm `num_tries` times,
returning all elements that appear at least `min_success` times.
"""
results = {}
for _ in range(num_tries):
current = base_algorithm(sampler)
for item in current:
results.setdefault(item, 0)
results[item] += 1
final_value = []
for key in results:
if results[key] >= min_success:
final_value.append(key)
return set(final_value)
|
f25c292870ff0a50973f3259f3849cf81609e363
| 46,809
|
from typing import Callable
def compose2(f: Callable, g: Callable) -> Callable:
"""Compose two functions
"""
def h(*args, **kwargs):
return f(g(*args, **kwargs))
return h
|
de7e7da7192cee12bceafa2939810eecedffc72d
| 46,816
|
def _escape_strings(strings):
"""escape to squarebracket and doublequote.
>>> print(_escape_strings("hoge"))
hoge
>>> print(_escape_strings("[hoge"))
\\[hoge
>>> print(_escape_strings("hoge]"))
hoge\\]
>>> print(_escape_strings("[hoge]"))
\\[hoge\\]
>>> print(_escape_strings('[ho"ge]'))
\\[ho\\"ge\\]
"""
target_chars = '[]"`'
ret = []
for string in strings:
if string in target_chars:
string = "\\" + string
ret.append(string)
return "".join(ret)
|
e1a80def54cfe40da9634b5bbe7f157539a864d1
| 46,818
|
def strip_from_end(text, suffix):
"""
Strip a substring from the end of a string
Parameters
----------
text : str
The string to be evaluated
suffix : str
The suffix or substring to remove from the end of the text string
Returns
-------
str
A string with the substring removed if it was found at the end of the
string.
"""
if not text.endswith(suffix):
return text
return text[:len(text)-len(suffix)]
|
01227c75cee0fc153dcebc153dd89cc5ea35c1d4
| 46,819
|
import struct
def _convert_unsigned(data, fmt):
"""Convert data from signed to unsigned in bulk."""
num = len(data)
return struct.unpack(
"{}{}".format(num, fmt.upper()).encode("utf-8"),
struct.pack("{}{}".format(num, fmt).encode("utf-8"), *data)
)
|
b65fa5fb1c7243ff831e95961bcc6528c5c57aae
| 46,821
|
def remove_object(objs, label, none_val=0):
"""Remove object specified by id value"""
objs[objs==label]=none_val
return True
|
c62df57aebc323f85f318db982cc4de795c30e9a
| 46,822
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.