content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def _is_beneath(a, b):
"""Is 'a' beneath 'b'?
Args:
a : string (path)
b : string (path)
Returns:
bool: True if a is beneath (or equal to) b
"""
# Everything is beneath the top ("")
if (b==""):
return True
# If paths are both blank, they are equal
if (b=="" and a==""):
return True
A = a.split(".")
B = b.split(".")
# A is not beneath B if any part is not the same
while (len(A) > 0 and len(B) > 0):
if (A.pop(0) != B.pop(0)):
return False
# A is not beneath B if B is longer
if (len(B) > 0):
return False
return True | b895cf05ae3dbe4b2a2df1ee89d8722f84f3d092 | 114,967 |
def metadata_from_dict(key):
"""
Should not raise unused-argument message because key is
used inside comprehension dict
"""
return {key: str(value) for key, value in key.items()} | 38b0651c7cf7264ce4195119934662b61802dabe | 114,968 |
def hex_to_tcp_state(hex):
"""
For converting tcp state hex codes to human readable strings.
:param hex: TCP status hex code (as a string)
:return: String containing the human readable state
"""
states = {
'01': 'TCP_ESTABLISHED',
'02': 'TCP_SYN_SENT',
'03': 'TCP_SYN_RECV',
'04': 'TCP_FIN_WAIT1',
'05': 'TCP_FIN_WAIT2',
'06': 'TCP_TIME_WAIT',
'07': 'TCP_CLOSE',
'08': 'TCP_CLOSE_WAIT',
'09': 'TCP_LAST_ACK',
'0A': 'TCP_LISTEN',
'0B': 'TCP_CLOSING'
}
return states[hex] | e362bcb514ee46d76282470cf9db40c1eb0eb3f8 | 114,970 |
import random
def roll(code: str) -> int:
"""Roll dice."""
# Break down the dice code.
num_dice_code, dice_and_bonus_code = code.split('d')
dice_type_code = dice_and_bonus_code
bonus_code = '0'
if '+' in dice_and_bonus_code:
dice_type_code, bonus_code = dice_and_bonus_code.split('+')
elif '-' in dice_and_bonus_code:
dice_type_code, bonus_code = dice_and_bonus_code.split('-')
bonus_code = f'-{bonus_code}'
num_dice = int(num_dice_code)
dice_type = int(dice_type_code)
bonus = int(bonus_code)
# Roll the dice.
rolls = tuple(random.randint(1, dice_type) for _ in range(num_dice))
return sum(rolls) + bonus | 0a5a3b03c04bf8b551ed6aefed9b48bf6750cdba | 114,976 |
def _remove_quotes(value):
"""Removes quotes around a value.
Also strips the whitespace.
>>> _remove_quotes('"hello"')
'hello'
>>> _remove_quotes("'hello'")
'hello'
>>> _remove_quotes("hello")
'hello'
"""
return value.strip(" '\"") | e6b9fe3db777c17dba95df56525bcc405b098037 | 114,978 |
def reverse(text):
"""takes a string text and returns that string in reverse."""
result=""
for i in text:
result=i+result
return result | 213097931dc1a73fb78e36fe681a7b5f8e8579d3 | 114,979 |
import re
def read_csv(filename):
"""Reads data from a space or comma delimited file.
:param filename: path of the file
:type filename: str
:return: data from file
:rtype: List[List[str]]
"""
data = []
regex = re.compile(r'(\s+|(\s*,\s*))')
with open(filename, encoding='utf-8-sig') as csv_file:
for line in csv_file:
line = regex.sub(' ', line)
row = line.split()
if not row:
continue
data.append(row)
if not data:
raise ValueError('The file is empty')
return data | 9835e3887e8e3599e94ebe5e633a329fcec03d35 | 114,986 |
def is_valid_trajectory(traj):
""" Checks if trajectory format is compatible with Cpptraj """
formats = 'mdcrd', 'crd', 'cdf', 'netcdf', 'restart', 'ncrestart', 'restartnc', 'dcd', 'charmm', 'cor', 'pdb', 'mol2', 'trr', 'gro', 'binpos', 'xtc', 'cif', 'arc', 'sqm', 'sdf', 'conflib'
return traj in formats | 0e3cc2a6edd10997050232b275cb549461f6f357 | 114,987 |
def create_dictionary(all_groups, sub_keys):
"""
Create nested dictionary - each group within a file and its properties.
Parameters
----------
all_groups : List, groups within .h5 file.
sub_keys : List, keys for each group within .h5 file.
Returns
-------
dictionary : Dictionary in format: {Group1: [key, Key ...], Group2: [Key, Key ...]}.
"""
dictionary = {group: [key for key in sub_keys[i]] for i, group in enumerate(all_groups)}
return dictionary | 2fea2131fdc14f6db87bb47f32947e68732e657d | 114,989 |
def reverse_lookup(data):
"""Construct an index reverse lookup map from an iterable."""
return {
datum: i for i, datum in enumerate(data)
} | 5c95fb8d354dc356963485e793b39c0d94bd9942 | 114,991 |
def aneuploidy_code_for_chr_count(chr_count):
""" Output a letter code based on chromosome copy number (0,1,2,3,4)"""
return {0: 'N',
1: 'U',
2: 'B',
3: 'U',
4: 'N'}[chr_count] | 7c1114fd231a05f67c45310cb5aa80f1b806a42b | 114,995 |
def _parse_config(filename):
"""Return a dict with (name, value) items for the given I2P configuration file."""
f = open(filename, 'r')
s = f.read()
f.close()
ans = {}
for line in s.split('\n'):
line = line.strip()
if '#' in line: line = line[:line.find('#')]
pair = line.split('=')
if len(pair) == 2:
ans[pair[0].strip()] = pair[1].strip()
return ans | 27811ee035502d5b53abd2c27e36997d06ef09cb | 114,998 |
import re
def time_str(adatetime):
"""
Return the format of "at hour:minute [AP]M",
where the hour doesn't have a leading zero.
"""
return re.sub(r'^0', '', adatetime.strftime("%I:%M %p")) | 20147b5e9b5d36a2fefdd4d700cc63c14f330e79 | 115,001 |
def scope_name_for_color(foreground, background):
"""Returns a name of Sublime scope that has the specified color."""
return "lisp_highlight.%06X.%06X" % (foreground, background) | c8adec675c17b410459fee77c8d0b5ab6e51015f | 115,003 |
def solution(x: int, a: list) -> int:
"""
Finds the earliest time at which a frog can cross a river.
:param x: Number of positions a leaf can fall.
:param a: A list of positions.
:returns: The time which a frog can get from position 0 to position X+1
or -1 if it can never reach it.
>>> solution(5, [1, 3, 1, 4, 2, 3, 5, 4])
6
"""
positions = set(range(1, x + 1))
for time, position in enumerate(a):
positions.discard(position)
if not positions:
return time
else:
return -1 | 29bbc9a17757830cacefc83e214d399c9a536240 | 115,006 |
import re
def sanitize_artist_name(name):
"""
Remove parenthentical number disambiguation bullshit from artist names,
as well as the asterisk stuff.
"""
name = re.sub(r" \(\d+\)$", "", name)
return re.sub(r"\*+$", "", name) | ba2e1122730957d1226c5392e4362e2bac3b1b4f | 115,008 |
def chunk(data:list, N:int):
"""
Chunk a list into a list of list of size N
:param data: list to chunk
:param N: Chunk size
:return: Chucked list (list of list)
"""
array = []
for i in range(0, len(data), N):
line = data[i:i + N]
array.append(line)
return array | ebcf26c1e042bb3710e5d66889cb67fc620d7f30 | 115,011 |
def flag_name(status):
"""
Determine the name for a flag file of the status indicated.
:param str status: Name of status for which to create flag file name.
:return str: Name of flag file corresponding to given status.
"""
return status + ".flag" | e481e7c565946213ab4baf3e76d980172f86df6b | 115,018 |
def rivers_with_station(stations):
"""This function creates a set containing all the river names which have a station on it.
It is set up so each river only occurs onece """
rivers = set()
for station in stations:
rivers.add(station.river)
rivers = sorted(rivers)
return rivers | 2cbc0bca499d4b0c6cfba7d5b2b146518ee93654 | 115,021 |
def make_pstage(block_class, num_blocks, first_stride, *, in_channels, out_channels, **kwargs):
"""
Create a list of blocks just like those in a ResNet stage.
Args:
block_class (type): a subclass of ResNetBlockBase
num_blocks (int):
first_stride (int): the stride of the first block. The other blocks will have stride=1.
in_channels (int): input channels of the entire stage.
out_channels (int): output channels of **every block** in the stage.
kwargs: other arguments passed to the constructor of every block.
Returns:
list[nn.Module]: a list of block module.
"""
assert "stride" not in kwargs, "Stride of blocks in make_stage cannot be changed."
blocks = []
for i in range(num_blocks):
blocks.append(
block_class(
in_channels=in_channels,
out_channels=out_channels,
stride=first_stride if i == 0 else 1,
**kwargs,
)
)
in_channels = out_channels
return blocks | a10e15e78660d8c64f8dfe90cc1aa2f638ff3f0f | 115,022 |
import six
import base64
def encode_as_bytes(s, encoding='utf-8'):
"""Encode a string using Base64.
If *s* is a text string, first encode it to *encoding* (UTF-8 by default).
:param s: bytes or text string to be encoded
:param encoding: encoding used to encode *s* if it's a text string
:returns: Base64 encoded byte string (bytes)
Use encode_as_text() to get the Base64 encoded string as text.
"""
if isinstance(s, six.text_type):
s = s.encode(encoding)
return base64.b64encode(s) | b743a5f8c227b36c6340c6a50fcd4565a88c7e2e | 115,025 |
def get_time_string(cube):
"""Return a climatological season string in the format: "year season"."""
season = cube.coord('clim_season').points
year = cube.coord('year').points
return str(int(year[0])) + ' ' + season[0].upper() | a32e638c02a7ae464ffcf2e8590398ca569f494e | 115,029 |
def call_delete_outcome(frauddetector_client, outcome_name: str):
"""
Call delete_outcome for a given outcome name with the given frauddetector client.
:param frauddetector_client: boto3 frauddetector client to use to make the call
:param outcome_name: name of the outcome to delete
:return: success will return a 200 with no body
"""
return frauddetector_client.delete_outcome(name=outcome_name) | 0f88873b3cda780de00bdda23f6dc5e0f29ab1dc | 115,032 |
def queue_left(work):
"""
Figure out how much work is left to in the queue.
"""
return sum([arr.size for arr in work.values()]) | 7489022b69de8cf3deab478a6354d50ae6edb9c8 | 115,033 |
def deep_get(obj, keys):
"""
Recurses through a nested object get a leaf value.
There are cases where the use of inheritance or polymorphism-- the use of allOf or
oneOf keywords-- will cause the obj to be a list. In this case the keys will
contain one or more strings containing integers.
:type obj: list or dict
:type keys: list of strings
"""
if not keys:
return obj
if isinstance(obj, list):
return deep_get(obj[int(keys[0])], keys[1:])
else:
return deep_get(obj[keys[0]], keys[1:]) | fd5cc0216eb3e0b40d9a7347ae8506dbddaca5aa | 115,034 |
from functools import reduce
from operator import truediv
def divide_all(n, ds):
"""Divide n by every d in ds.
>>> divide_all(1024, [2, 4, 8])
16.0
>>> divide_all(1024, [2, 4, 0, 8])
inf
"""
try:
return reduce(truediv, ds, n)
except ZeroDivisionError:
return float('inf') | 883a8d301a3aaede7395fad61ca8ba47f5f80e99 | 115,035 |
def kelvin_to_celsius(kelvin):
"""Convert kelvin to celsius."""
celsius = kelvin - 273.15
return celsius | cce45dfb9047810df8ca29c21eb59e1be8e87486 | 115,037 |
def unit_func(x):
"""Dummy quantity function for histogrammar objects
:param x: value
:returns: the same value
"""
return x | fb04a8fdd79168177bc35aca12c689352410a710 | 115,040 |
from datetime import datetime
def getDatetimeFromTimestamp( timestamp ):
"""convert an Internet Archive timestamp into a datetime object"""
return datetime.strptime( timestamp , "%Y%m%d%H%M%S" ) | 1dd42b7d34de9fc80f19702b8eb8456aaf869624 | 115,041 |
def typeset_latex_math(variable) -> str:
"""
Returns `variable` as a string with LaTeX math typesetting.
Args:
variable: The python variable to be saved.
Returns:
str: String accoring to LaTeX typesetting.
"""
has_unit = hasattr(variable, 'u')
if has_unit:
if str(variable.u) == 'dimensionless':
suffix = None
else:
suffix = r'\,' + '{:Lx}'.format(variable.u)
value = variable.magnitude
else:
suffix = None
value = variable
has_error = hasattr(value, 'std_dev')
if has_error:
payload = '{:L}'.format(value)
else:
payload = str(value)
if suffix is not None:
if has_error:
payload = '({}){}'.format(payload, suffix)
else:
payload += suffix
return payload | c7638ba3818c44f3f1d25d85ee19f2153e361c57 | 115,042 |
import json
def extract_body(event):
"""Extracts the body of the event for which this lambda is called.
Arguments:
event {dict} -- The event as received by this lambda handler
Returns:
dict -- JSON of the body, or None if the body cannot be found
"""
body = None
if 'body' in event:
body = json.loads(event['body'])
return body | cdd7c7b69ed29ebf34192421b397cedd34af2e9d | 115,044 |
def train_model(model, scaled_train_images, train_labels):
"""
This function should train the model for 5 epochs on the scaled_train_images and train_labels.
Your function should return the training history, as returned by model.fit.
"""
history = model.fit(scaled_train_images, train_labels, epochs = 5)
return history | 9e26a90654dc53436cefceb1018420a05f9c0064 | 115,045 |
import copy
def rearange(im):
"""
rearange k space to set 0,0 to the center of the image
"""
h,w = int(im.shape[0]/2), int(im.shape[1]/2)
cop = copy.deepcopy(im)
im[:h, :w] = cop[h:, w:]
im[h:, w:] = cop[:h, :w]
im[h:, :w] = cop[:h, w:]
im[:h, w:] = cop[h:, :w]
return im | d04413d3487a4de1218234f549cc08bae73853f3 | 115,047 |
def get_continuation_tables_headers(
cols_widths, index_name=None, space=2, max_width=1e100
):
"""
returns column headers for continuation tables segmented to not exceed max_width
Parameters
----------
cols_widths : list
[[col_name, length of longest string], ...]
index_name : str
column name of an index. This column included in all sub table headers.
space : int
how much white space between columns
max_width : int
maximum width
Returns
-------
list of lists, each inner list is the column names for a subtable
"""
width_map = dict(cols_widths)
index_width = 0 if index_name is None else width_map[index_name]
for name, width in width_map.items():
if index_width + width > max_width:
raise ValueError(
f"{index_name}={index_width} + {name} width={width} > max_width={max_width}"
)
if sum(v + space + index_width for _, v in cols_widths) < max_width:
return [[l for l, _ in cols_widths]]
headers = []
curr = [index_name] if index_name is not None else []
cum_sum = index_width
for name, width in cols_widths:
if name == index_name:
continue
cum_sum += space + width
if cum_sum > max_width:
headers.append(curr)
curr = [index_name, name] if index_name is not None else [name]
cum_sum = index_width + space + width
continue
curr.append(name)
headers.append(curr)
return headers | 8b4f4256ecb3ceb55b0e95ba56741b222f0bd81e | 115,053 |
def IOU(A, B):
"""
计算两个boxes的IOU值
Args:
矩形A = [xmin, ymin, xmax, ymax]
矩形B = [xmin, ymin, xmax, ymax]
Returns:
矩形A和矩形B的IOU值
"""
W = min(A[3], B[3]) - max(A[1], B[1])
H = min(A[2], B[2]) - max(A[0], B[0])
if W <= 0 or H <= 0: #不存在交集
return 0
A_area = (A[3] - A[1]) * (A[2] - A[0]) #矩形A的面积
B_area = (B[3] - B[1]) * (B[2] - B[0]) #矩形B的面积
cross_area = W * H #交集的面积
return cross_area / (A_area + B_area - cross_area) | 2b6a2a537563e15ffcd7bbb6d5adaa2b1c62a251 | 115,058 |
def test_name(request):
"""Returns module_name.function_name for a given test"""
return request.module.__name__ + '/' + request._parent_request._pyfuncitem.name | 0de99d15c6cbaa78115e7508e056e8086a46a196 | 115,060 |
from typing import Union
import pathlib
from pathlib import Path
def strip_ext(filepath: Union[str, pathlib.Path]) -> Union[str, pathlib.Path]:
"""
Strip extension from a file.
Parameters
----------
filepath : str or pathlib.Path
Returns
-------
str or pathlib.PosixPath
`filepath` with suffix removed. type is equivalent to input.
"""
result = Path(filepath).with_suffix('')
return result if isinstance(filepath, Path) else str(result) | 236a2632f5b658674dda6234a38291a4a98aabee | 115,061 |
def unzip(seq):
"""
The inverse of the builtin ``zip`` function, this method transposes groups of elements into new
groups composed of elements from each group at their corresponding indexes.
Examples:
>>> list(unzip([(1, 4, 7), (2, 5, 8), (3, 6, 9)]))
[(1, 2, 3), (4, 5, 6), (7, 8, 9)]
>>> list(unzip(unzip([(1, 4, 7), (2, 5, 8), (3, 6, 9)])))
[(1, 4, 7), (2, 5, 8), (3, 6, 9)]
Args:
seq (Iterable): Iterable to unzip.
Yields:
tuple: Each transposed group.
"""
return zip(*seq) | 2aff6f3d9e79e52f1e0ce16c1080eab102c1b512 | 115,062 |
def w_pressure(depth):
""" Calculate water pressure at different depths in kPa """
w_pres = 1.03e3 * 9.8 * depth
w_pres = w_pres + 1.01e5
w_pres = w_pres / 1000
return w_pres | 9550cb1d3c1d54abbaa77267b0a208af7f1339ab | 115,072 |
def get_time(start_time, end_time):
"""Get ellapsed time in minutes and seconds.
Args:
start_time (float): strarting time
end_time (float): ending time
Returns:
elapsed_mins (float): elapsed time in minutes
elapsed_secs (float): elapsed time in seconds.
"""
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs | 5b21319ba638c658795e274cfea170ebc50ec4ee | 115,073 |
def reorder_to(l, order):
"""Returns a list, reordered to a specific ordering.
:param l: the list to reorder. It is not modified.
:param order: a list containing the new ordering,
eg [2,1,0] to reverse a list of length 3
"""
#Zip on ordering, sort by it, then remove ordering.
return map(lambda el: el[1],
sorted(zip(order, l), key = lambda el: el[0])) | 18c13bfba069946a65c0e90b3169de95aaac03bd | 115,081 |
def tree(node, string=''):
"""
Recursive function that goes through all the nodes in a graph and creates the Huffman code for each one.
:param node:
:param string:
:return: d: dictionary where each character is given a Huffman code
"""
if type(node) is str:
return {node: string}
left, right = node.children()
d = dict()
d.update(tree(left, string + '0'))
d.update(tree(right, string + '1'))
return d | 4d6f9a7b7699e70def74885e103497c30aec7267 | 115,082 |
def handle_mask(mask, tree):
"""Expand the mask to match the tree structure.
:param mask: boolean mask
:param tree: tree structure
:return: boolean mask
"""
if isinstance(mask, bool):
return [mask] * len(tree)
return mask | 1621d700b65ecd3f81811828999c42a0cd57c075 | 115,084 |
def color_true_green(val):
"""Changes text color to green if value is True
Ex: style_df = df.style.applymap(color_true_green)
style_df #to display"""
color='green' if val==True else 'black'
return f'color: {color}' | d9e2992bb256815f9abb940f34f51084793a006c | 115,089 |
from pathlib import Path
from typing import Union
from typing import Optional
def get_sample_fp(sample_dir: Path,
ckpt_path: Union[Path,str],
prefix: Optional[str] = None) -> Path:
"""Returns the file-path to the pickled file of sample,
generated from the model at ckpt_path state"""
if isinstance(ckpt_path, str):
ckpt_path = Path(ckpt_path)
prefix = '' if prefix is None else f'{prefix}-'
fp = sample_dir/f'{prefix}{ckpt_path.stem}.pkl'
return fp | bce74c15d4001b7b98cf372551e13cafc18c1e31 | 115,096 |
def transform_tags(tags:str):
"""convert the tags field to a list split on spaces"""
return tags.split(" ") | f68437476312d93d30b29d4c72aab4320db212b4 | 115,101 |
def remove_last_occurence(s, to_remove):
"""Remove last occurence of :obj:`to_remove` from :obj:`s`."""
li = s.rsplit(to_remove, 1)
return ''.join(li) | 0dbaa6d609079708556db5108ecb43e4992320f9 | 115,106 |
from typing import Callable
def getAction(fileName: str, actionName: str) -> Callable:
"""Load action from file inside shock folder
Args:
fileName (str): file that has the action.
actionName (str): action that will be extracted from file.
Returns:
action: the call result.
"""
modulefullpath = "shock."+fileName
module = __import__(modulefullpath)
action = getattr(module, fileName)
return getattr(action, actionName) | 4416966cfdd2e5e76a233a7418e661a19b544b0c | 115,107 |
def zero_times(recognitions):
"""Set times to zero so they can be easily compared in assertions"""
for recognition in recognitions:
recognition.recognize_seconds = 0
return recognitions | 12967e0f5d5d4ba8944b1c80ebf7468bdf09cbc2 | 115,111 |
import math
def rad_to_hms(rad):
""" convert radians to sexagesimal degrees
This function converts a positive angle in radians to a sexagesimal
angle in degrees, minutes, and seconds.
It works for positive angles (take care of the negative part
yourself.
INPUTS:
rad I input angle in radians
OUTPUTS: deg min sec
deg O degrees (integer)
min O minutes (integer)
sec O seconds (float)
"""
# print('in rad_to_hms')
M_RAD2DEG = 180.0/math.pi
if rad < 0:
rad = 2.0 * math.pi + rad
d = math.fabs(rad) * M_RAD2DEG * 24.0 / 360.0
deg = int(d+2E-13)
m = (d-deg) * 60.0
min = int(m+1E-11)
sec = (m - min) * 60.0
return deg, min, sec | 9f9f7a8c94a931242dd17b9797c5b08c73097452 | 115,114 |
def create_grid(width, height):
""" Create a two-dimensional grid of specified size. """
return [[0 for _x in range(width)] for _y in range(height)] | 692771fb3a1274ddb56bb2224aa96b71d70471d8 | 115,115 |
def get_params(layer):
"""
This function returns the parameters for use when loading weights of one
model into another
:param : layer : Keras Layer object
:returns list of numpy arrays for all of the layers in the model
"""
return [param.numpy() for param in layer.variables] | d020ede9662adb1a8c25a22679a114950933bb3d | 115,117 |
def mapVal2Color(colorInt):
"""
Maps an integer to a color
Args:
- colorInt: the integer value of the color to map
Returns:
returns a string of the mapped color value
"""
colorDict = {1: "Green",
2: "Red",
3: "Blue",
4: "Yellow",
5: "White",
6: "Purple",
7: "Orange",
}
return colorDict[colorInt] | 883e5c4692d6b79075bcdc6058e12bf40dea2e81 | 115,119 |
def unique(listCheck):
""" Check that all elements of list are unique
https://stackoverflow.com/a/5281641"""
seen = set()
return not any(tuple(i) in seen or seen.add(tuple(i)) for i in listCheck) | 66e1d324236c221f32981226b46f8edc6482478c | 115,124 |
from typing import Optional
def get_path_parameter(event, param_name: str, default_value: str = "") -> Optional[str]:
"""Returns the value for the specified path parameter or the 'default_value' if not found
Args:
event: the AWS Lambda event (usually Application Gateway event)
param_name: the name of the path parameter
default_value: the default value for the parameter
Returns:
the value in the event object of the specified path parameter
"""
map = event.get("pathParameters")
if map:
return map.get(param_name, default_value)
return default_value | 3508f632677186f6ace1a2ceaf5194cd8f2c4bd1 | 115,125 |
def combine_lines(lines):
"""
Combines the stings contained in a list into a single string.
"""
new = ''
for each in lines:
new = new + each
return new | 29098a7650a4e1e500529a398f51b4547c5da226 | 115,126 |
def estimate_snow_and_rain(pptn, t_min, t_max, t_snow):
""" Split precipitation grid into snowfall and rainfall components
according to temperature. Assumes that temperature increases linearly
from t_min to t_max and then decreases linearly again back to t_min in
each 24hr period (i.e. a symmetrical triangular temperature profile).
In this simplistic case, the proportion of the day above t_snow is:
(t_max - t_snow)/(t_max - t_min)
Args:
pptn: Grid of total daily precipitation.
t_min: Grid of minimum daily temperature.
t_max: Grid of maximum daily temperature.
t_snow: Threshold temperature below which precipitation falls as snow
(float).
Returns:
List of arrays: [rain, snow]
"""
# Calculate fraction of rain. There are three cases to consider:
# 1. t_max > t_snow and t_min < t_snow. Use equation above.
# 2. t_max <= t_snow. fr_rn = 0
# 3. t_min > t_snow. fr_rn = 1
# Calculate case 1
fr_rn = (t_max-t_snow)/(t_max-t_min)
# Deal with case 2
fr_rn[t_max<=t_snow] = 0
# Deal with case 3
fr_rn[t_min>t_snow] = 1
# Calculate rain and snow
rain = pptn*fr_rn
snow = pptn*(1-fr_rn)
return [rain, snow] | d9d96a8aedc4ff51c0f8c73f9407a7380c633381 | 115,127 |
import pathlib
def pathlib_dir(path):
"""Checks if a path is an existing directory
Parameters
----------
path: str | pathlib.Path
File system path to a directory (must exist)
Returns
-------
path: pathlib.Path
Same path as input coerced to a pathlib.Path object
Raises
------
ValueError
If inputted path is not an existing directory
"""
path = pathlib.Path(path)
if path.is_dir():
return path
raise ValueError("Specified argument is not a valid directory") | fbd7d487e2d7199d1c707a093c388225ba034caa | 115,131 |
def moore_penrose_psuedoinverse(E):
"""Return the Moore Penrose Psuedoinverse of the given Matrix
Parameters
----------
E: np.ndarray
The sigma matrix returned after performing SVD on matrix W
Returns
-------
np.ndarray
returns Moore Penrose Psuedoinverse of the input matrix
"""
E_plus = E.copy()
for i in range(min(E_plus.shape[0], E_plus.shape[1])):
if E_plus[i][i] > 1e-6 : # actually should be != 0
E_plus[i][i] = (1/E_plus[i][i])
else: E_plus[i][i] = 0
return E_plus.transpose() | 73a3db1cc4062e908ecf65c1e33d32ffb94e43f4 | 115,132 |
import binascii
import re
def adapt(payload):
"""
Convert hex string to its bytes representation. Whitespaces in the
hex string are removed prior to conversion.
Args:
payload (str): Payload in bytes literals.
Returns:
str: The payload in its bytes representation form.
"""
return binascii.unhexlify(
re.sub(b'\s', b'', payload)
) | f95c8e5ca5ee7fa3558af0e369e49eb8eb96fa28 | 115,135 |
import json
def get_dictionary_from_json(filename):
""" Returns dictionary from Json file"""
f = open(filename, "r")
data = json.load(f)
f.close()
return data | df6574f743a3893c4e7dbd4109b5e080f692cc0d | 115,140 |
def reproject_prec(pre_df, new_crs='epsg:26910'):
"""Reproject to a CRS with units as meters and add area column (needed for pop density variable)
new_crs(int): desired reprojected crs
"""
pre_df = pre_df.to_crs({'init': new_crs})
pre_df['area_m']=pre_df.geometry.area
return(pre_df) | 7ce9525db18c466c063803ae83f818be00dc4684 | 115,142 |
def get_5d_string( number ):
"""
takes an integer < 100000, returns a string of the number that's the 5 characters long.
If the number is < 10000, it puts zeros at the front to make the string 5 chars long
"""
thing = ""
for i in range(5):
thing+= str( int(number/( 10**(4-i))) % 10 )
return(thing) | 80f268c2dd951da31a1c3b857917b2b66879a50e | 115,146 |
def check_columns(columns):
"""Helper function for columns input validation.
:param columns: value to check
:type columns: anything, but iterable expected
:rtype: bool
"""
if not hasattr(columns, '__iter__') or not len(columns):
return False
return True | 01e1d114265bd68538bf060c8883aa0b713b5bad | 115,149 |
from pathlib import Path
def absolute_path_for(provided_path: Path, root_path: Path) -> Path:
"""
Return an absolute path including root_path if provided path is a relative path, else return provided_path
Parameters
----------
provided_path : Path
absolute or relative path
root_path : Path
absolute path a provided relative path will be added to
Returns
-------
Path
Absolute path if provided_path is relative and root_path is absolute. Else will return
the provided_path which is an absolute path not on root_path.
"""
if provided_path.is_absolute():
return provided_path
return Path(root_path, provided_path) | 13dced48fd9aaa34d554e65d0061733089bd8ca4 | 115,151 |
def is_goal(state):
"""
Returns True, if the given state is the goal state
state: a list representing the state to be checked
"""
goal_state = [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
return state == goal_state | aa77e1be42a5afd67d380ccc3ff493db4dec28fa | 115,154 |
def dpbase(opval):
"""
Parse and return opcode,sflag,Rn,Rd for a standard
dataprocessing instruction.
"""
ocode = (opval >> 21) & 0xf
sflag = (opval >> 20) & 0x1
Rn = (opval >> 16) & 0xf
Rd = (opval >> 12) & 0xf
#print "DPBASE:",ocode,sflag,Rn,Rd
return ocode,sflag,Rn,Rd | 96378fc4a9624976ed277df173d2c0f8668987c6 | 115,155 |
from typing import Dict
from typing import Any
from typing import List
def form_mapping_using_dictionary(label_bundles: Dict[Any, List[Any]]) -> Dict[Any, Any]:
"""
Forming a mapping using a dictionary can take place by using this function.
label_bundles: the bundles, like: "parent": ["child1", "child2", ... ]
Returns
----------
The output which is a mapping of type `Dict[Any, Any]` can be used as `f[x]=y` later on.
"""
mapping = dict()
for target, list_of_labels in label_bundles.items():
for label in list_of_labels:
mapping[label] = target
mapping[target] = target
return mapping | 6bb7606048e63d422a1d073b22d58d40cbeb76c2 | 115,164 |
import math
def mutual_information(co_freq, s_freq, t_freq, total_instances, mitype=None):
"""Compute mutual information on a pair of terms.
Parameters
----------
co_freq : int
Co-occurrence frequency of s & t
s_freq : int
Occurrence frequency of s
t_freq : int
Occurrence frequency of t
total_instances : int
Total number of all unique instances of the occurrence factor (for
example, the total number of all scientific articles in the dataset).
mitype : str, optional
Mutual information score type. Possible types 'expected', 'normalized',
'pmi2', 'pmi3', by default, no normalization is applied (i.e. positive
pointwise mutual information is computed).
"""
if co_freq > 0:
if mitype is not None:
if mitype == "expected":
mi = math.log2(
(total_instances * co_freq) / (s_freq * t_freq)
) * (co_freq / total_instances)
elif mitype == "normalized":
alpha = - math.log2(co_freq / total_instances)
mi = (
(math.log2(
(total_instances * co_freq) / (s_freq * t_freq)) / alpha)
if alpha != 0 else 0
)
elif mitype == "pmi2":
mi = math.log2((co_freq ** 2) / (s_freq * t_freq))
elif mitype == "pmi3":
mi = math.log2(
(co_freq ** 3) / (s_freq * t_freq * total_instances))
else:
raise ValueError(
"Provided Mutual information score type (mitype) is not "
"supported. Provide one value from the following list "
"['expected', 'normalized','pmi2', 'pmi3'] ")
else:
mi = math.log2((total_instances * co_freq) / (s_freq * t_freq))
else:
mi = 0
return mi if mi > 0 else 0 | c9e0276940850492f3df334ead9b9a8e53cc921d | 115,169 |
def qw(s):
"""
Examples :
>>> print(qw(“ -correlated-propagation -scalarrepl -lowerinvoke”))
(-correlated-propagation, -scalarrepl, -lowerinvoke)
Args:
s (str): s is a list of all the possible passes that can be used (the passes shoul dvbe separated by whitespace).
Returns:
Returns a tuple of strings where each element is a pass(used for optimization) from s.
"""
return tuple(s.split()) | 9e0ec379da947e6210115cc4be4265414929e5d6 | 115,172 |
def aggr_constrained_shp(unconstr_gdf, constr_gdf):
"""
Returns identical aggregation for unconstrained dataframe to match that of contrained dataframe.
Parameters:
-----------
unconstr_gdf : (gpd.GeoDataFrame)
gdf of admin unit polygons with labels indicating how units will be aggregated BEFORE DISSOLVING (unconstr and constr should have the same number of rows)
constr_gdf : (gpd.GeoDataFrame)
gdf polygonised from admin units covering ONLY areas that are 'built'.
Retruns:
---------
constr_gdf : (gpd.GeoDataFrame)
Unconstrained geodataframe aggregated to same level (as indicated by the constrained labels) as constrained geodataframe
"""
constr_gdf = constr_gdf[['geometry']]
unconstr_gdf = unconstr_gdf.reset_index()
constr_gdf['labels'] = unconstr_gdf['labels']
constr_gdf['adm_id'] = constr_gdf['labels']
constr_gdf = constr_gdf[['adm_id','geometry']]
constr_gdf = constr_gdf.dissolve(by='adm_id')
return constr_gdf | 9f291f47f0569f401529abbc4566631c205d0705 | 115,173 |
def _isA(self, elementClass, category = ''):
"""Return True if this element is an instance of the given subclass.
If a category string is specified, then both subclass and category
matches are required."""
if not isinstance(self, elementClass):
return False
if category and self.getCategory() != category:
return False
return True | f7effac555e56b98cfeb575935818b9c444db04a | 115,174 |
def getTagValue(fluiddb, about, path):
"""
Get the value of a tag from an object with the given about tag.
"""
return fluiddb.about[about][path].get() | 19ebe2bb96c4358c681f200fdbd852cb1d5e359a | 115,178 |
def get_most_popular_annotations(ambiguous_entity, k=2):
"""
Get k most popular annotations for ambiguous_entity.
"""
freq = [(key, len(value)) for key, value in ambiguous_entity.annotated_corpus.items()]
freq = sorted(freq, key=lambda x: x[1], reverse=True)
return [x[0] for x in freq[:k]] | 34e059a932e5912f370cf782643cf7a398acfca2 | 115,182 |
def copy_model_params(model1, model2):
"""
copies model parameters from model1 to model2. Both must be same model class
:param model1:
:param model2:
:return: model2
"""
return model2.load_state_dict(model1.state_dict()) | dc0a25a1bbdb27b2d29ae72d10b16458efefcd81 | 115,187 |
from typing import Union
def axis_title(title: str, units: Union[str, None]=None) -> str:
"""Create string for axis title with units italicized.
:param str title: title of axis
:param str units: units of axis (default: None)
:returns: formatted axis title
:rtype: str
"""
title = title.title()
if units:
return '{} ({})'.format(title, r'$\mathit{{{}}}$'.format(units))
else:
return '{}'.format(title) | e9323622ff44b458db02eb27268fbffb70af9992 | 115,188 |
def plfrompd(pd, trim=True):
"""
This function takes a path dictionary and returns a simple list of paths
in that dictionary by iterating through the keys of the dictionary.
There is a parameter to trim self loops from the dictionary.
"""
paths = []
for s in pd:
for d in pd[s]:
if not trim or s != d:
paths.append(pd[s][d])
return paths | 0d5874c3c2b6659b255116a11d014a2178ea6d5a | 115,192 |
def digit_arr(n):
"""
Given a number, this function returns an array with the digits of the given number
"""
return list(str(n)) | bc298053ee713b1ac3cdae7a9cf76f10e0904782 | 115,196 |
def dag(X):
"""
Takes the numpy array X and returns its complex conjugate transpose.
"""
return X.conj().T | e01858d29b005223a7c7c7c6da8785c5bbe69162 | 115,198 |
def asi(tb85v, tb85h):
"""The asi ice concentration algorithm
"""
P0 = 47.0
P1 = 7.5
P = tb85v - tb85h
"""method coefficients:"""
d3=1.64/100000.0
d2=-0.0016
d1=0.0192
d0=0.971
"""concentrations calculation:"""
ct = d3 * P**3.0 + d2 * P**2.0 + d1 * P + d0
return ct | 741cc97f223ba7e1b133ffb58539d72327b6f25d | 115,199 |
import six
def safe_filename(filename, extension=None):
"""
Returns a filename with FAT32-, NTFS- and HFS+-illegal characters removed.
Unicode or bytestring datatype of filename is preserved.
>>> safe_filename(u'spam*?: 𐍃𐍀𐌰𐌼-&.txt')
u'spam 𐍃𐍀𐌰𐌼-&.txt'
"""
filename = filename if isinstance(filename, six.text_type) else filename.decode('utf8')
if extension is not None:
filename = "{}.{}".format(filename, extension)
unsafe_chars = ':*?"<>|/\\\r\n'
for c in unsafe_chars:
filename = filename.replace(c, '')
return filename | d3571ea8d272d1081fc132c204b1a8c5544c2cd2 | 115,202 |
import torch
def sparse_eye(size):
"""
Returns the identity matrix as a sparse matrix
"""
indices = torch.arange(0, size).long().unsqueeze(0).expand(2, size)
values = torch.tensor(1.0).expand(size)
cls = getattr(torch.sparse, values.type().split(".")[-1])
return cls(indices, values, torch.Size([size, size])) | 45aac95e35df6e3eed503cde5ab4b32f723f86e4 | 115,203 |
def UpdateDisplayName(unused_ref, args, request):
"""Update displayName.
Args:
unused_ref: unused.
args: The argparse namespace.
request: The request to modify.
Returns:
The updated request.
"""
if args.IsSpecified('clear_display_name'):
request.group.displayName = ''
elif args.IsSpecified('display_name'):
request.group.displayName = args.display_name
return request | c0969e647e5abd32698f67e687b6638701207a26 | 115,204 |
def _reverse_elastic_sort(current_sort, valid_field_names):
"""
Reverse each item in current_sort, which is an elasticsearch sort order list.
(See https://www.elastic.co/guide/en/elasticsearch/reference/current/sort-search-results.html for format.)
This currently strips all sorting options except for "order". Otherwise we'd need an allowlist or signature on the cursor, since it's user-submitted data.
Example: 'asc' becomes 'desc', 'desc' becomes 'asc', plain 'id' becomes {'id': {'order': 'desc'}}:
>>> assert _reverse_elastic_sort([{'_score': {'order': 'desc'}}, {'date': {'order': 'asc'}}, 'id'], ['_score', 'date', 'id']) == \
[{'_score': {'order': 'asc'}}, {'date': {'order': 'desc'}}, {'id': {'order': 'desc'}}]
valid_field_names makes sure that invalid fields aren't passed in:
>>> assert _reverse_elastic_sort([{'date': {'order': 'asc'}}, {'bad': {'order': 'asc'}}], ['date']) == \
[{'date': {'order': 'desc'}}]
"""
new_sort = []
for item in current_sort:
if isinstance(item, str):
# inflate str-only fields like 'id' to dicts like {'id': {'order': 'asc'}}
item = {item: {"order": "asc"}}
try:
# get 'id' from {'id': {'order': 'asc'}}
field_name = next(iter(item))
# this would only happen if cursor was manually edited
if field_name not in valid_field_names:
continue
# update order
new_order = "desc" if item[field_name]["order"] == "asc" else "asc"
item = {field_name: {"order": new_order}}
except Exception:
raise TypeError("Unrecognized sort order: %s" % item)
new_sort.append(item)
return new_sort | 2a3f2fd2ebbc5dc1dc503e7186efb133c3adb096 | 115,208 |
from typing import List
from typing import Dict
def sub_menu_open(menu_entries: List[Dict[str, str]], active_route: str) -> bool:
"""
Expose submenu method for jinja templates.
:param menu_entries: list of menu entry
:type menu_entries: List
:param active_route: current active flask route
:type active_route: str
:return: True if route in menu_entry and is the active_route, False otherwise
:rtype: bool
"""
for menu_entry in menu_entries:
if 'route' in menu_entry and menu_entry['route'] == active_route:
return True
return False | d3d44ddcd72ba0e202aa03dc58acae2398295304 | 115,214 |
from typing import Any
def ifnone(a: Any, b: Any) -> Any:
"""`a` if `a` is not None, otherwise `b`"""
if a is not None:
return a
else:
return b | 93e98b50ec727d36e44fac1ec8712a509ecb8b20 | 115,215 |
import six
def query_builder(info, funcid):
"""Function builder for the query expression
Parameters
----------
info: dict
From the `query_parser()`
funcid: str
The name for the function being generated
Returns
-------
func: a python function of the query
"""
args = info['args']
def_line = 'def {funcid}({args}):'.format(funcid=funcid,
args=', '.join(args))
lines = [def_line, ' return {}'.format(info['source'])]
source = '\n'.join(lines)
glbs = {}
six.exec_(source, glbs)
return glbs[funcid] | c340ec328551093f0b37d32b22b39e29f5b3a091 | 115,221 |
def compute_prev(before, block, idx, after):
"""
Get the token just before the gap before block[idx] (inspecting inside lists)
"""
if idx == 0:
return before
else:
prev = block[idx-1]
if type(prev) == list:
if not prev:
return compute_prev(before, block, idx-1, after)
return prev[-1]
return prev | 2048b61e36a9fd3b5bd15220aafe461804dea452 | 115,222 |
from typing import List
def compose_regexs(regexs: List[str]) -> str:
"""Join list of regex patterns.
Resulting pattern will match any one of the input patterns supplied in the
list.
"""
return '|'.join(regexs) | 1fdf3e81d562fc0967d70ddf2542e22fce1091d1 | 115,225 |
import io
import re
def find_pattern(location, pattern):
"""
Search the file at `location` for a patern regex on a single line and return
this or None if not found. Reads the supplied location as text without
importing it.
Code inspired and heavily modified from:
https://github.com/pyserial/pyserial/blob/d867871e6aa333014a77498b4ac96fdd1d3bf1d8/setup.py#L34
SPDX-License-Identifier: BSD-3-Clause
(C) 2001-2020 Chris Liechti <cliechti@gmx.net>
"""
with io.open(location, encoding='utf8') as fp:
content = fp.read()
match = re.search(pattern, content)
if match:
return match.group(1).strip() | ad36c26616087dc02e329dbe562d8607be4315c1 | 115,227 |
def check_mc(mc):
"""
Returns proper status or error message based on modified count of MongoDB
query.
---
IN
mc: result.modified_count (int)
OUT
status: message based on modified count (str)
"""
if mc == 1:
status = " * Sample modified in DB"
elif mc == 0:
status = " * ERROR: No samples modified in DB"
else:
status = " * ERROR: Modified count of {}".format(mc)
return status | 7b0ebcd5ed1bb35d56b392f15d013d0898c44bd0 | 115,228 |
def calc_growth_rate(tube):
"""the growth rate equation from Sultan 96
Parameters
----------
tube : (FluxTube)
flux tube object
Variables used
----------
sig_F_P : (float)
flux tube integrated pedersen cond. F region in mho
sig_total : (float)
flux tube integrated pedersen cond. total in mho
V_P : (float)
flux tube integrated vertical drift or drift at apex altitude m/s
U_L : (float)
flux tube integrated neutral wind perp. B in L direction in m/s
g_e : (float)
gravtiy at apex altitude in m/s^2
nu_eff : (float)
collision frequency in s-1
K_F : (float)
altitude gradient in density in m-1
"""
sig_F_P = tube.sig_F
sig_total = tube.sig_total
V = tube.V
U_L = tube.U
g_e = tube.g
nu_eff = tube.nu_ef
K_F = tube.K
R_T = tube.R
gamma = sig_F_P / sig_total * (V - U_L + g_e/nu_eff) * K_F - R_T
return gamma | cfbcf28093ba04a484ddac49f19d37d8e841855c | 115,229 |
def group(data, num):
""" Split data into chunks of num chars each """
return [data[i:i+num] for i in range(0, len(data), num)] | c7f40ba9ecf0f9b6dcb8748762161cd91600a8c4 | 115,230 |
def is_not_transferred(run_id, transfer_log):
"""Return True if run id not in transfer.tsv, else False."""
with open(transfer_log, 'r') as f:
return run_id not in f.read() | 8ac4d034ee067c8c3489c2895a4fee6751d092d6 | 115,233 |
from typing import Dict
from typing import Union
import math
def convert_nan_to_zero(input: Dict[str, Union[float, int]]) -> Dict:
"""Convert any Nan into zero"""
output = dict()
for key, value in input.items():
output[key] = value if not math.isnan(value) else 0
return output | a68f54a3e7ca83661d61559003aeb585eadc9c7f | 115,237 |
import binascii
def dehexlify(hx):
"""Revert human hexlification - remove white spaces from hex string and convert into real values
Input like
'61 62 04 63 65'
becomes
'ab\x04ce'
"""
return binascii.unhexlify(hx.replace(' ', '')) | 0fd2d4d1c28b1029437833477fdbfd22172d00ee | 115,239 |
from pynput.mouse import Button
def to_button(value):
"""Convert value to Button enum."""
# pylint: disable=C0415
if isinstance(value, Button):
return value
sanitized = str(value).lower().strip().replace(" ", "_")
try:
return Button[sanitized]
except KeyError as err:
raise ValueError(f"Unknown mouse button: {value}") from err | 811b8c04cb5d8c5fbf3857105b5479d78d224378 | 115,240 |
import copy
def _merge_list_of_scalars(dst, src):
"""Merge list of scalars (add src first, then remaining unique dst)"""
dst_copy = copy.copy(dst)
src_set = set(src)
dst = copy.copy(src)
for val in dst_copy:
if val not in src_set:
dst.append(val)
return dst | b3350296cab9c712eac2c2003d870d05a16aea17 | 115,245 |
def get(amt):
"""
Creates a quadratic easing interpolator. The amount of easing (curvature of the function) is controlled by ``amt``.
-1 gives a *in* type interpolator, 1 gives a *out* type interpolator, 0 just returns the :func:`linear`
interpolator. Other values returns something in between.
:param float amt:
The amount of easing for the function. A float in the range *[-1, 1]*.
:return:
An interpolating function.
"""
if amt < -1:
amt = -1
elif amt > 1:
amt = 1
return lambda t: t * (1 + (1 - t) * amt) | c151990f3d5cfd8adf06549ff61fcc1671f510bf | 115,246 |
def average(series):
"""
implements the average of a pandas series from scratch
suggested functions:
len(list)
sum(list)
you should get the same result as calling .mean() on your series
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.mean.html
See numpy documenation for implementation details:
https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html
"""
return sum(series)/len(series)
pass | 1b18c722d277c9edda8af386e59f56c17cff9019 | 115,247 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.