content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def strip_server(path):
""" Removes the server part from xrdfs output.
Example input:
root://polgrid4.in2p3.fr:1094//dpm/in2p3.fr/home/cms/trivcat/store/user/rembserj
Output:
/dpm/in2p3.fr/home/cms/trivcat/store/user/rembserj
Note:
Returns None if the input does not match the right pattern.
"""
if not path.startswith("root://"):
return None
path = path[7:]
hit = path.find("//")
if hit < 0:
return None
return path[hit + 1 :]
|
91a9bbe327e0c0b165137ed3ff49be490f6373be
| 61,268
|
def rewrite_kwargs(conn_type, kwargs, module_name=None):
"""
Manipulate connection keywords.
Modifieds keywords based on connection type.
There is an assumption here that the client has
already been created and that these keywords are being
passed into methods for interacting with various services.
Current modifications:
- if conn_type is not cloud and module is 'compute',
then rewrite project as name.
- if conn_type is cloud and module is 'storage',
then remove 'project' from dict.
:param conn_type: E.g. 'cloud' or 'general'
:type conn_type: ``str``
:param kwargs: Dictionary of keywords sent in by user.
:type kwargs: ``dict``
:param module_name: Name of specific module that will be loaded.
Default is None.
:type conn_type: ``str`` or None
:returns kwargs with client and module specific changes
:rtype: ``dict``
"""
if conn_type != 'cloud' and module_name != 'compute':
if 'project' in kwargs:
kwargs['name'] = 'projects/%s' % kwargs.pop('project')
if conn_type == 'cloud' and module_name == 'storage':
if 'project' in kwargs:
del kwargs['project']
return kwargs
|
b5fc487a50f82e28dc00282f08951c759f19780d
| 61,269
|
def normalize_path(path):
"""Put a URL path into the format used by the data structure."""
if path in ('', '/'):
return '/'
path = path.rstrip('/')
if not path.startswith('/'):
path = '/{path}'.format(path=path)
return path
|
53435e41f240a5f4f8d33f4a9a670eaa0d2c2cab
| 61,272
|
import requests
def get_last_tag(github_config, owner, repo):
""" Get the last tag for the given repo """
tags_url = '/'.join([github_config.api_url, 'repos', owner, repo, 'tags'])
tags_response = requests.get(tags_url, headers=github_config.headers)
tags_response.raise_for_status()
tags_json = tags_response.json()
return tags_json[0]['name']
|
706758b43e7fe1d0405241ca7c8e4cff71207591
| 61,273
|
def _cubic_spline_point(b_coeff, t):
"""
Internal convenience function for calculating
a k-dimensional point defined by the supplied
Bezier coefficients. Finds the point that
describes the current position along the bezier
segment for k dimensions.
params:
b_coeff => b0...b3: Four k-dimensional Bezier
coefficients each one is a numpy.array
of size k by 1, so
b_coeff is a numpy array of size k by 4
k is the number of dimensions for each
coefficient
t: percentage of time elapsed for this segment
0 <= int <= 1.0
returns:
current position in k dimensions
numpy.array of size 1 by k
"""
return (pow((1-t), 3)*b_coeff[:, 0] +
3*pow((1-t), 2)*t*b_coeff[:, 1] +
3*(1-t)*pow(t, 2)*b_coeff[:, 2] +
pow(t, 3)*b_coeff[:, 3]
)
|
1471743893fbb6874b39ca1fa7634a246b1f2aee
| 61,274
|
from typing import List
def _is_torso_visible_or_labeled(kp: List) -> bool:
"""
True if torso (left hip, right hip, left shoulder,
right shoulder) is visible else False
"""
if len(kp) != 51:
raise ValueError(
"keypoint list doesn't fit the format of "
"COCO human keypoints (17 keypoints)"
)
return (
(kp[17] == 1 or kp[17] == 2)
and (kp[20] == 1 or kp[20] == 2)
and (kp[41] == 1 or kp[41] == 2)
and (kp[38] == 1 or kp[38] == 2)
)
|
06bad20526b74db08daa22ddb6bf63b5394fb26d
| 61,284
|
def StringRotation(s1: str, s2: str) -> bool:
"""Returns whether s2 is a rotation of s1.
>>> StringRotation("", "")
True
>>> StringRotation("a", "")
False
>>> StringRotation("", "b")
False
>>> StringRotation("a", "a")
True
>>> StringRotation("a", "b")
False
>>> StringRotation("ab", "ba")
True
>>> StringRotation("aba", "bab")
False
>>> StringRotation("abc", "acb")
False
>>> StringRotation("erbottlewat", "waterbottle")
True
>>> StringRotation("kelly", "ellyk")
True
>>> StringRotation("erbottlewat", "waterbottle")
True
"""
return len(s1) == len(s2) and s2 in (s1 + s1)
|
4d6c8bec9e72b92545cfa34804910466771ac158
| 61,291
|
import six
def quote(s, *args):
"""Return quoted string even if it is unicode one.
:param s: string that should be quoted
:param args: any symbol we want to stay unquoted
"""
s_en = s.encode('utf8')
return six.moves.urllib.parse.quote(s_en, *args)
|
14b72b9231a2d53f242136cb002dc2d189eca1aa
| 61,297
|
def DescribeStory(story):
"""Get the docstring title out of a given story."""
description = story.__doc__
if description:
return description.strip().splitlines()[0]
else:
return ''
|
1b8dc5f7c2d108c035e7b6838b1608a511c5ba56
| 61,303
|
def margins_to_dict(margins):
"""Convert the margin's informations into a dictionary.
Parameters
----------
margins : the list of OpenTurns distributions
The marginal distributions of the input variables.
Returns
-------
margin_dict : dict
The dictionary with the information of each marginal.
"""
margin_dict = {}
for i, marginal in enumerate(margins):
margin_dict[i] = {}
name = marginal.getName()
params = list(marginal.getParameter())
if name == 'TruncatedDistribution':
margin_dict[i]['Type'] = 'Truncated'
in_marginal = marginal.getDistribution()
margin_dict[i]['Truncated Parameters'] = params
name = in_marginal.getName()
params = list(in_marginal.getParameter())
else:
margin_dict[i]['Type'] = 'Standard'
margin_dict[i]['Marginal Family'] = name
margin_dict[i]['Marginal Parameters'] = params
return margin_dict
|
0efa7862d7c47744d016b923056b74c265c2316d
| 61,305
|
import math
def repeatedString(s, n):
"""
Returns the number of 'a's in the first n letters of a string consisting of
infinitely repeating strings, s.
"""
count = 0
s_count_a = s.count('a')
count += math.floor(n / len(s)) * s_count_a
for _ in range(n % len(s)):
if s[_] == 'a':
count += 1
return count
|
b35bf491ed532e5d70cf273ea3d30af0bb184ff4
| 61,310
|
from typing import Union
def _create_table_row(key: str, value: Union[str, int], highlight: bool = False) -> str:
"""
Create table row for stats panel
"""
template_stats_data = """
<tr style="border-bottom: 1px solid;">
<th style="text-align: left">{key}</th>
<td style="text-align: left">{value}</td>
</tr>
"""
template_stats_data_red = """
<tr style="color: #f00; border-bottom: 1px solid;">
<th style="text-align: left">{key}</th>
<td style="text-align: left">{value}</td>\
</tr>
"""
return (
template_stats_data_red.format(key=key, value=value)
if highlight
else template_stats_data.format(key=key, value=value)
)
|
362154b67151f9f6c1d996c1e9ee67cbcf8b7ea3
| 61,313
|
import random
def number(minimum=0, maximum=9999):
"""Generate a random number"""
return random.randint(minimum, maximum)
|
b3f06823431cfaca549e3dc9544e68fd13171571
| 61,314
|
import torch
def topk_pool(x, k=32):
"""
Args:
x: (B, F, M, P)
k: top k
Returns:
(B, F, M, topk)
"""
_, idx = torch.topk(x, k=k, dim=3) # (B, F, M, k)
pooled = torch.gather(x, dim=3, index=idx)
return pooled
|
d3dcce9c94c9c8dbdf839b713718c8ac803bddec
| 61,321
|
import re
def uid_from_context(context):
"""
Parse post.uid from a link
"""
# id pattern found in link
pattern = r"[0-9]+"
uid = re.search(pattern, context)
uid = uid.group(0) if uid else ""
return uid
|
471918ef387112094fc450a4bd4b924614e59171
| 61,325
|
from typing import List
from typing import Dict
import json
def get_movie_data(files: List[str]) -> List[Dict]:
"""Parse movie json files into a list of dicts."""
movie_data = []
for file in files:
with open(file, encoding="utf-8") as f_in:
movie_data.append(json.load(f_in))
return movie_data
|
14e27498b4153595d37e87140caba6a455ad6f99
| 61,328
|
def get_lines(row):
"""Get lines from start and end coordinates.
Output format: [((x1, y1), (x2, y2)), ...]
:param row: measurement segments grouped by instance and index
:return: line coordinates
"""
x1s, y1s, x2s, y2s = (
row["x1coordinate"],
row["y1coordinate"],
row["x2coordinate"],
row["y2coordinate"],
)
start_points, end_points = tuple(zip(x1s, y1s)), tuple(zip(x2s, y2s))
lines = list(zip(start_points, end_points))
return lines
|
082f2b595cc7c6835c9ebf6f06f2c23cb7cb8a3e
| 61,329
|
from typing import Dict
def _add_sub_path(arg: Dict, sub_paths: Dict) -> Dict:
"""Add the sub_path field to an argument."""
name = arg['name'].replace('-', '_')
if name in sub_paths:
arg['sub_path'] = sub_paths[name]
return arg
|
3d0125ba8a64ea0b9bdb1a445fe2f3a297d226a4
| 61,330
|
import re
def camel_case_to_snake_case(camel_case):
""" Convert string from camel (e.g. SnakeCase) to snake case (e.g. snake_case)
Args:
camel_case (:obj:`str`): string in camel case
Returns:
:obj:`str`: string in snake case
"""
_underscorer1 = re.compile(r'(.)([A-Z][a-z]+)')
_underscorer2 = re.compile('([a-z0-9])([A-Z])')
subbed = _underscorer1.sub(r'\1_\2', camel_case)
return _underscorer2.sub(r'\1_\2', subbed).lower()
|
2d58755d6c3ad97a109a284ff90cd9a8be909a50
| 61,335
|
import torch
def reduce_loss(loss: torch.Tensor, reduction: str = "mean") -> torch.Tensor:
"""
:param loss: loss tensor
:param reduction: mean, sum, or none
"""
if reduction == "mean":
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
elif reduction == "none":
pass
else:
raise NotImplementedError
return loss
|
4cfd19ecefeadd1a8208de6e5f848b7164e6c3d1
| 61,343
|
import re
def StripSingleLineComments(
string: str, start_comment_re: str = "(#|//)",
) -> str:
"""Strip line comments from a string.
Args:
string: The string to strip the comments of.
start_comment_re: The regular expression to match the start of a line
comment. By default, this matches Bash-style '#' and C-style '//'
comments.
Returns:
The string.
"""
comment_re = re.compile(f"{start_comment_re}.*")
lines = [comment_re.sub("", line) for line in string.split("\n")]
return "\n".join(lines)
|
73670417a90d383051f93e5181c52e1d352751a9
| 61,347
|
def deduce_environment_variable(option_strings: list) -> str:
"""Deduce the environment name from the long option name of the parameter.
Every parameter has a long named option and is last in the list. The last
in the list will be assumed to resemble the environment variable.
E.g. "-q", "--query_ip", "--query_ipv4" --> TID_QUERY_IPV4
:param option_strings: list of options for this specific parameter
:type option_strings: list
:return: the generated environment variable
:rtype: str
"""
# Use the (last) named option of the parameter and designate it
# as a valid environment variable.
long_options = [opt for opt in option_strings if opt[0:2] == "--"]
return f"TID_{long_options[-1][2:].upper()}"
|
6a41f97fce32aedfe712683c106e47fc3973f295
| 61,350
|
import requests
def send_request(body, url, token):
"""
Sends a request to a SOS using POST method
:param body: body of the request formatted as JSON
:param token: Authorization Token for an existing SOS.
:param url: URL to the endpoint where the SOS can be accessed
:return: Server response to response formatted as JSON
"""
# Add headers:
headers = {'Authorization': str(token), 'Accept': 'application/json'}
response = requests.post(url, headers=headers, json=body)
response.raise_for_status() # raise HTTP errors
return response
|
ec092d7ebf7f8f2002795465f04804f19f094c90
| 61,351
|
import json
def convert_to_json(data):
"""Convert data into json format"""
data = json.loads(data)
return data
|
d5993ea405ec8a59999706e52c887eb05b1336fe
| 61,355
|
import string
import random
def random_string(min_length: int = 1, max_length: int = 150):
"""
Generate a random string.
Args:
min_length(int): minimum length of the generated string
max_length(int): maximum length of the generated string
Returns:
str: a string of random characters
"""
char_source = string.ascii_letters + string.digits + "-"
length = random.randint(min_length, max_length)
return "".join(random.choice(char_source) for _ in range(length))
|
0d52690322da2342e8288f1ca371a9f552314aaf
| 61,356
|
def is_using_reverse_process(input_shape):
"""Check if output of attention meachanism is a single
Attention matrix or 2 attention matrices - one for A_in
one for A_out
[description]
Arguments:
input_shape {[tuple]} -- input_shape
"""
# dimension of attention layer output
dim = len(input_shape)
# (batch, 2, N, N) if we use A_in and A_out
if dim == 4:
return True
# (batch, N, N) is we aren't
elif dim == 3:
return False
else:
raise ValueError(f"Invalid attention shape {input_shape}")
|
885559ec584cb2e0a7dec8bcd0392a510187bc62
| 61,358
|
def nodelabel2line(label: str):
"""Given a node label, return the line number.
Example:
s = "METHOD_1.0: static long main()..."
nodelabel2line(s)
>>> '1.0'
"""
try:
return str(int(label))
except:
return label.split(":")[0].split("_")[-1]
|
41b87f3a9e7f4a628f818be96fe3767960837f29
| 61,360
|
import time
def getTimeLeft(to, start, position, end):
"""Return the time left in the loop."""
return (end - position) * (time.time() - to) / (position - start)
|
acb285a4c6e691a620673754249963d806be3d61
| 61,363
|
def my_subreddits(client, status, limit):
"""
return a list of subreddits an account is subscribed to.
client requires running accounts.user_login() first to start a user session
status: 'subscriber', 'moderator', 'contributor'
limit: max of 100
> my_subs = my_subreddits(client, 'contributor', limit=25)
"""
url = r'http://www.reddit.com/subreddits/mine/{st}.json'.format(st=status)
data = {'limit': limit}
response = client.get(url, data=data)
return response.json()['data']
|
a6369110d8e84e644697f6536c896318eed1a4ad
| 61,364
|
def merge_parameter_lists(*parameter_definitions):
"""
Merge multiple lists of parameters into a single list. If there are any
duplicate definitions, the last write wins.
"""
merged_parameters = {}
for parameter_list in parameter_definitions:
for parameter in parameter_list:
key = (parameter['name'], parameter['in'])
merged_parameters[key] = parameter
return merged_parameters.values()
|
c5dd6521dcd0e3a3961a9d1438b34c6159149c1d
| 61,367
|
def parse_platform(platform):
"""Split platform into OS name and architecture."""
assert '-' in platform
# platforms can have multiple hyphens e.g. linux-cos5-64 Our
# goal here is to separate the general name from the
# bit-width.
pieces = platform.rsplit("-", 1)
return (pieces[0], pieces[1])
|
7236a13edc29d4c5025461216e957a9d5f1ed451
| 61,369
|
def active_mode_french(raw_table, base_index):
""" Convert mode to french """
value = raw_table[base_index]
if value == 0:
return "Antigel"
if value == 2:
return "Nuit"
if value == 4:
return "Jour"
return "Inconnu"
|
9c3f491b6137ae817b137913002c3f4ae327bc9b
| 61,373
|
def name_is_special(method_name):
"""Returns true if the method name starts and ends with two underscores."""
return method_name.startswith('__') and method_name.endswith('__')
|
fecb067fa2564167af515522e57031735cdbae83
| 61,374
|
import re
def get_list_or_none(pattern, text):
"""
Similar function to get_value_or_none except in this case we want our match
to be a list of strings instead of a single string.
For example if our match is 'Slovenia, Croatia, Serbia' we would rather have this information split
into a list of countries. (e.g. ['Slovenia', 'Croatia', 'Serbia'])
This function will try to find the match and it will also split the string
if the match is found.
Parameters:
pattern : regex pattern
pattern that we are looking for
text : string
the text in which we are looking for a match
Returns
list of strings or None if no match is found.
"""
temp = re.findall(pattern, text)
if len(temp) > 0:
return temp[0].split(',')
else:
return None
|
2442f8d2b22a897d5b9cc467f1f6745242410e97
| 61,381
|
def commit_to_the_database(p_my_connection):
"""Called to commit (agree to) the changes proposed to the database. Returns None."""
p_my_connection.commit()
return None
|
547bc92c237fcecaa663e301eb784b63d1284efb
| 61,384
|
def _op_is_ok_to_apply(i, n_local_discretes):
"""Returns whether applying a 7-discrete building_block with the ith discrete of the system
being the left-most of the 7, is okay, if there are n_local_discretes local
discretes in total. "Being okay" refers to being able to do the matrix product
without causing padding.
"""
return i + 7 <= n_local_discretes - 7
|
d0fe99ba5df1bb3c0aaaf8f5d5c9baf4b00063c6
| 61,385
|
def _get_discussion_styles(_helper_cfg):
"""This function defines (when present in the configuration) the enabled discussion styles in the environment.
:param _helper_cfg: The configuration parsed from the helper configuration file
:type _helper_cfg: dict
:returns: List of enabled discussion styles in the environment
"""
_discussion_styles = ['blog', 'contest', 'forum', 'idea', 'qanda', 'tkb']
if 'discussion_styles' in _helper_cfg:
if isinstance(_helper_cfg.get('discussion_styles'), list):
_discussion_styles = _helper_cfg.get('discussion_styles')
return _discussion_styles
|
131eb14e15f09ca84ac0b737448e5f0463cf52cd
| 61,387
|
def read_file(filename):
"""
Read input file and save dots and folds.
:param filename: input file
:return: list of dots, list of folds
"""
dots = []
folds = []
max_x = 0
max_y = 0
with open(filename, 'r', encoding='UTF-8') as file:
for line in file:
if ',' in line:
x, y = line.strip().split(',')
x = int(x)
y = int(y)
dots.append([x, y])
max_x = max(max_x, x)
max_y = max(max_y, y)
if "=" in line:
_, _, statement = line.split(" ")
axis, value = statement.split("=")
folds.append([axis, int(value.strip())])
max_x += 1
max_y += 1
paper = [[0] * max_x for _ in range(max_y)]
for x, y in dots:
paper[y][x] = 1
return paper, folds
|
0670f3bdcd34a166d24a0742b1f9e13f22a5e005
| 61,388
|
def reverse_remap_to_dict(data: list):
"""
From a list of dictionnaries of values, remap the
different items to create a dict where the keys
are a list of values
Parameters
----------
data (list): [{'a': 1}, {'a': 2}]
Returns
-------
list: list of dictionnaries
"""
items = dict()
for key in data[-0].keys():
items.setdefault(key, [])
for item in data:
for key, value in item.items():
items[key].append(value)
return items
|
33eb999fbe2ba3a350b53d5394b82cd311738e2f
| 61,389
|
def listify(items):
"""Puts each list element in its own list.
Example:
Input: [a, b, c]
Output: [[a], [b], [c]]
This is needed for tabulate to print rows [a], [b], and [c].
Args:
* items: A list to listify.
Returns:
A list that contains elements that are listified.
"""
output = []
for item in items:
item_list = []
item_list.append(item)
output.append(item_list)
return output
|
07fd8597d9fe2524b74810fcc85d60b4f05017f3
| 61,393
|
def abbrev_prompt(msg: str, *options: str) -> str:
"""
Prompt the user to input one of several options, which can be entered as
either a whole word or the first letter of a word. All input is handled
case-insensitively. Returns the complete word corresponding to the input,
lowercased.
For example, ``abbrev_prompt("Delete assets?", "yes", "no", "list")``
prompts the user with the message ``Delete assets? ([y]es/[n]o/[l]ist): ``
and accepts as input ``y`, ``yes``, ``n``, ``no``, ``l``, and ``list``.
"""
options_map = {}
optstrs = []
for opt in options:
opt = opt.lower()
if opt in options_map:
raise ValueError(f"Repeated option: {opt}")
elif opt[0] in options_map:
raise ValueError(f"Repeated abbreviated option: {opt[0]}")
options_map[opt] = opt
options_map[opt[0]] = opt
optstrs.append(f"[{opt[0]}]{opt[1:]}")
msg += " (" + "/".join(optstrs) + "): "
while True:
answer = input(msg).lower()
if answer in options_map:
return options_map[answer]
|
bf3702f9254a8fe1f5879ecff987a9fb3d9ce81c
| 61,394
|
def validate(data, required_fields):
"""Validate if all required_fields are in the given data dictionary"""
if all(field in data for field in required_fields):
return True
return False
|
bd53351007bd1de3934b0a5245437c3889091573
| 61,397
|
def next_pos_from_state(state):
""" Function that computes the next puck position
From the current state, the next position of the puck is computed
but not set.
Returns:
dict: coordinates
"""
xn = state['puck_pos']['x'] + state['puck_speed']['x'] * state['delta_t']
yn = state['puck_pos']['y'] + state['puck_speed']['y'] * state['delta_t']
return {'x': xn, 'y': yn}
|
61eb3fef17706be5f15a41757580d12fada1f810
| 61,400
|
from pathlib import Path
def _private_key_verified(key_path):
"""
Verify private key exists
Arguments:
key_path (str):
Path to private key
Return:
boolean
"""
private_key = Path(key_path).expanduser()
return private_key.exists()
|
373bd69257b0612f26dc9f96c5c6355dbb9c9dca
| 61,404
|
def split_3d_array_into_channels(arr):
"""Split 3D array into a list of 2D arrays.
e.g. given a numpy array of shape (100, 200, 6), return a list of 6 channels,
each with shape (100, 200).
Args:
arr: a 3D numpy array.
Returns:
list of 2D numpy arrays.
"""
return [arr[:, :, i] for i in range(arr.shape[-1])]
|
9c41ec61c41f386cbfecdeea355a2ddede083e55
| 61,409
|
def negative_grad(orig, grad):
"""Returns -grad"""
return [-grad]
|
6ba15b92ae8f78d020a1df44c572b631efe9e6db
| 61,411
|
def _ParseAppServers(app_servers):
"""Parse the app servers for name and project id.
Args:
app_servers: list|str|, a list of strings defining the Google Cloud Project
IDs by friendly name.
Returns:
A dictionary with the friendly name as the key and the Google Cloud Project
ID as the value.
"""
return dict(server.split('=', 1) for server in app_servers)
|
7bd3bf9c62e7dd7737995a04d968ea1cec8c4005
| 61,413
|
def caseless_uniq(un_uniqed_files):
"""
Given a list, return a list of unique strings, and a list of duplicates.
This is a caseless comparison, so 'Test' and 'test' are considered duplicates.
"""
lower_files = [] # Use this to allow for easy use of the 'in' keyword
unique_files = [] # Guaranteed case insensitive unique
filtered = [] # any duplicates from the input
for aFile in un_uniqed_files:
if aFile.lower() in lower_files:
filtered.append(aFile)
else:
unique_files.append(aFile)
lower_files.append(aFile.lower())
return (unique_files, filtered)
|
6bbf93c5993ffd6c600847ecbcd0998820224c0a
| 61,414
|
def _string_to_float(string_list):
"""
Converts a list of string values to float values.
:param string_list:
:return:
"""
return map(float, string_list)
|
ffc2dcdb6319712e9b576e7ec9dbfa3a0eaaef53
| 61,417
|
def apply_scale_offset(scale, offset, value):
"""Apply scale and offset to the given value"""
return (value + offset) / scale
|
50850cb1a95dddfdcf127cc3993c9ac3ff9e748a
| 61,421
|
def force_zero_resource_area_constraint_rule(backend_model, node, tech):
"""
Set resource_area to zero if energy_cap_max is zero
(i.e. there can be no energy_cap, so similarly there can be no resource_area)
"""
return backend_model.resource_area[node, tech] == 0
|
b7f97931f9eb21d7bc81248c109c95a21640b7a6
| 61,422
|
def float_to_str(source: float) -> str:
"""Converts an float to a str."""
return str(source)
|
25044a627765c9a1742c5bc176e3811f4eef14ed
| 61,423
|
import math
def round_repeats(repeats: int, depth_multiplier: int) -> int:
"""
Round off the number of repeats. This determine how many times to repeat a block.
"""
return int(math.ceil(depth_multiplier * repeats))
|
160145c60d71de99905721d85ce85bbfb390cb59
| 61,424
|
import string
import click
def validate_bigip_name(ctx, param, value):
"""
Big-IP is kinda picky about names. Names with special characters
will (mostly) get rejected, and some (slashes) will lead to unexpected
results (path traversal). Best not to accept names like that.
"""
if not value:
return None
allowed_characters = string.ascii_letters + string.digits + "._-"
for char in value:
if char not in allowed_characters:
raise click.BadParameter("The requested object name is invalid")
return value
|
46bffbb79d24ccd3b26bbf2a888bfedf7ac60264
| 61,427
|
def getJob(config, args):
"""Identify the job in the Config that matches the command that was sent to Skelebot"""
job = None
for configJob in config.jobs:
if args.job == configJob.name:
job = configJob
return job
|
08036ea0cc574a41580a7e4418ef2487b6b07f00
| 61,432
|
def edx_rtd_url(slug):
"""Make a RTD URL for a book that doesn't branch for each release."""
return f"https://edx.readthedocs.io/projects/{slug}/en/latest/"
|
ebac4f30156d8bd2711a9262026d548b7faddbfe
| 61,433
|
def report_writer(md):
"""
Reads meta data into function and makes txt message report.
----------
md : dict
Contains meta data from experiment file
Returns
-------
message : string
The text output for the report
"""
s_name = md["sample_meta_data"]["sample_name"]
s_date = md["sample_meta_data"]["sample_date"]
s_surface = md["sample_meta_data"]["sample_surface_area"]
imp_mode = md["experiment_meta_data"]["impedance_mode"]
meas_volt = md["experiment_meta_data"]["measurement_voltage"]
vs = md["experiment_meta_data"]["vs"]
pert_v = md["experiment_meta_data"]["pertubation_voltage"]
sf = md["experiment_meta_data"]["starting_frequency"]
ef = md["experiment_meta_data"]["ending_frequency"]
ppi = md["experiment_meta_data"]["points_per_interval"]
ig = md["experiment_meta_data"]["interval_group"]
spacing = md["experiment_meta_data"]["spacing"]
intro_line = "Report for "+str(s_name)+" experiment conducted on "+str(s_date)+".\n\n"
imp_line = "A "+str(imp_mode)+" measurement was made with a "+str(pert_v)+"mV pertubation voltage at "+str(meas_volt)+"V vs. "+str(vs)+".\n\n"
range_line = "Experiment conducted from "+str(sf)+"Hz to "+str(ef)+"Hz with "+str(ppi)+ " points "+str(ig)+" using "+str(spacing)+" spacing.\n\n"
surface_line = "Sample has a surface area of "+str(s_surface)+"cm^2."
message = intro_line+imp_line+range_line+surface_line
return message
|
16d67de3ca6f858aeea1f1a652ab4946a6c60cc0
| 61,436
|
def flipMultiDict(eDict, fDict):
""" Make a dict keyed off the values of two dicts
Args:
eDict,fDict: Two dictionaries with hashable values
Returns:
A dictionary in this form: (eValueSet,fValueSet):key
"""
inv = {}
allKeys = eDict.viewkeys() | fDict.viewkeys()
for k in allKeys:
eVal = frozenset(eDict.get(k, ()))
fVal = frozenset(fDict.get(k, ()))
inv.setdefault((eVal, fVal), []).append(k)
return inv
|
bbe766d64861753cba44fb29737fa59d4eb38fb2
| 61,443
|
def cssname(value):
"""Replaces all spaces with a dash to be a valid id for a cssname"""
return value.replace(' ', '-')
|
d374d84482d062fde387a967f7d790985e87033c
| 61,445
|
import hashlib
def cat2hash(category):
"""
Hash a category string in order to be usable in dict for example.
:arg str category:
A category string.
:returns:
A hash of this category string
"""
return hashlib.md5(category.encode('utf-8')).hexdigest()
|
66dd6c34f59e65ad82ae812edc577ec28b430f15
| 61,446
|
def example(a: int, b: int) -> int:
"""
Returns the sum of a and b, except one or both are 0 then it returns 42.
:param a: The first operand
:type a: int
:param b: The second operand
:type b: int
:returns: The conditional sum
:rtype: int
.. testsetup::
from <%= rootPackage %>.<%= mainModule %> import example
>>> example(1, 2)
3
>>> example(0, 4)
42
"""
return a + b if a != 0 and b != 0 else 42
|
aac4b49706753f40390277d9c19de08323e96799
| 61,449
|
def _read_csv_lines(path):
"""
Opens CSV file `path` and returns list of rows.
Pass output of this function to `csv.DictReader` for reading data.
"""
csv_file = open(path, "r")
csv_lines_raw = csv_file.readlines()
csv_lines_clean = [line for line in csv_lines_raw if len(line.strip()) > 0]
return csv_lines_clean
|
ab147d1e8e9fa7e4c41b96bed4c2a9373fb39c1c
| 61,450
|
def fix_dims_for_output(forecast_dataset):
"""Manipulate the dimensions of the dataset of a single forecast so that we
can concatenate them easily."""
return (
forecast_dataset.stack(
{"forecast_label": ["forecast_year", "forecast_monthday"]}
)
.expand_dims("forecast_time")
.drop("forecast_label")
.squeeze("forecast_label")
)
|
0754937845ef3a31e0d96b5812903e3b10a8656d
| 61,453
|
import pwd
def system_user_exists(username):
""" Check if username exists
"""
try:
pwd.getpwnam(username)
except KeyError:
return False
return True
|
229c02197a15eece3c6118d73384be4e754372da
| 61,455
|
import io
def compact_decode(b:bytes) -> int:
"""Converts bytes with compact int to int"""
stream = io.BytesIO(b)
i = stream.read(1)[0]
if i >= 0xfd:
bytes_to_read = 2**(i-0xfc)
return int.from_bytes(stream.read(bytes_to_read), 'little')
else:
return i
|
21cc236e8660077e74feaea7381a6f1798abf986
| 61,456
|
import zipfile
import tempfile
def training_model_archive(filepath):
"""
Create zip archive file for training model.
:param filepath:
:return: temporary archive file
"""
if zipfile.is_zipfile(filepath):
return open(filepath, 'rb')
tmp_file = tempfile.NamedTemporaryFile(suffix='.zip')
with zipfile.ZipFile(tmp_file.name, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip:
new_zip.write(filepath)
tmp_file.seek(0)
return tmp_file
|
4d8dd1d6166c50d8c368cb34599126ab7d838333
| 61,459
|
def isblank(s):
"""Is this whitespace or an empty string?"""
if isinstance(s, str):
if not s:
return True
else:
return s.isspace()
else:
return False
|
1b61202a679fcf89bbbc7f7d1409b4715c824afa
| 61,461
|
def durationBucket(durationStr):
"""
Return string category label for call duration
"""
duration = int(float(durationStr))
if duration < 60:
return "0 to 1 min"
elif duration < 120:
return "1 min to 2 min"
elif duration < 180:
return "2 min to 3 min"
elif duration < 300:
return "3 min to 5 min"
elif duration < 600:
return "5 min to 10 min"
else:
return "over 10 min"
|
cc4d091303333d1f9a634cc47f4a5b508943c783
| 61,467
|
import random
def point1(parent1, parent2):
"""Basic 1 point crossover for lists"""
if len(parent1) < 2:
return []
parent1, parent2 = list(parent1), list(parent2)
point = random.randint(1, len(parent1) - 1)
return [parent1[:point] + parent2[point:],
parent2[:point] + parent1[point:]]
|
455bcaddd42810762b0a805e069a02a55b48f144
| 61,473
|
def det(r1, r2, r3):
"""Calculates the determinant of a 3x3 matrix with rows as args.
Args:
r1: First row
r2: Second row
r3: Third row
Returns:
Determinant of matrix [r1,r2,r3]. Functionally equivalent to
jnp.linalg.det(jnp.array([r1,r2,r3])), but jits 10x faster for large batch.
"""
return r1[0] * r2[1] * r3[2] + r1[1] * r2[2] * r3[0] + r1[2] * r2[0] * r3[
1] - r1[2] * r2[1] * r3[0] - r1[0] * r2[2] * r3[1] - r1[1] * r2[0] * r3[2]
|
9f445bbe9b0d60242096f649ca5868ed40ff2589
| 61,475
|
def _get_lim(q, qlim):
"""Gets the limits for a coordinate q."""
if qlim == 'range':
return min(q), max(q)
else:
return qlim
|
fe8bc53f110653403b0bd9726973ccde8ccb1e37
| 61,482
|
import tempfile
def create_tmp_f(content=None, mode="w"):
"""Create a temporary file
Parameters
----------
mode : str, optional
Write-mode [default: 'w']
content : str, optional
The content to write to the file
If `None`, leave the file empty
Returns
-------
str
The path to the filename
"""
f_in = tempfile.NamedTemporaryFile(mode=mode, delete=False)
if content:
f_in.write(content)
f_in.close()
return f_in.name
|
128b855840648f8d795a4d1504d2dc12e291b336
| 61,484
|
def find_node(nodes, attribute, value):
"""Search among nodes by attribute.
Args:
nodes: list of nodes
attribute: attribute name
value: attribute value
Returns:
First matched node
Raises:
IndexError if unable to find the node
"""
return [
node for node in nodes if node.getAttribute(attribute) == value
][0]
|
88b0d9e938112a9130af2952e664d05e92577382
| 61,487
|
def get_mixins_attrs(mixins):
"""
Merges attributes from all mixins
"""
attrs = {}
if mixins:
# just make all those mixin classes inherit in to one
Mixin = type('Mixin', tuple(mixins), {})
for k in dir(Mixin):
if not k.startswith('__') or k in ['__unicode__', '__str__']:
attrs[k] = getattr(Mixin, k)
return attrs
|
a207d4b65bf3f9a1399eccc0b3af32c336c9b0aa
| 61,488
|
def auth_headers(auth_token):
"""Return HTTP authentication headers for API requests.
Kwargs:
auth_token: valid API token.
Returns a dict that can be passed into requests.post as the
'headers' dict.
"""
assert auth_token is not None, (
u"No valid HipChat authentication token found.")
return {
'Authorization': 'Bearer %s' % auth_token,
'Host': 'api.hipchat.com',
'Content-Type': 'application/json'
}
|
3f4598bd0b4d1d6021a53de5088e0dc854c16fe2
| 61,490
|
def fix_metadata(ds):
"""
Fix known issues (from errata) with the metadata.
"""
# https://errata.es-doc.org/static/view.html?uid=2f6b5963-f87e-b2df-a5b0-2f12b6b68d32
if ds.attrs["source_id"] == "GFDL-CM4" and ds.attrs["experiment_id"] in [
"1pctCO2",
"abrupt-4xCO2",
"historical",
]:
ds.attrs["branch_time_in_parent"] = 91250
# https://errata.es-doc.org/static/view.html?uid=61fb170e-91bb-4c64-8f1d-6f5e342ee421
if ds.attrs["source_id"] == "GFDL-CM4" and ds.attrs["experiment_id"] in [
"ssp245",
"ssp585",
]:
ds.attrs["branch_time_in_child"] = 60225
return ds
|
9723436e90bf4b3a9c4955869a6751e74d97968d
| 61,492
|
def classname(obj):
"""
Returns object class name
"""
return obj.__class__.__name__
|
636dbaf5d40fca66c3e0b5dbba6188e9b4830f4d
| 61,496
|
def get_database(conn, name):
"""Gets an arangodb database safely
Returns an instance of an Arango database. If the database does not exist
a new one will be created.
:type conn: Connection
:type name: str
:rtype: Database
"""
if conn.hasDatabase(name) is False:
return conn.createDatabase(name)
return conn[name]
|
a0910d5ef15162a273cd217d48e9289fb04e0228
| 61,500
|
def is_between(a, b, c):
"""
Determine if point b is between point a and c
:param a: One end of our line
:param b: Point we are checking
:param c: Other end of our line
:return: Boolean: True if on the line formed from a -> c, else False
"""
dotproduct = (c[0] - a[0]) * (b[0] - a[0]) + (c[1] - a[1])*(b[1] - a[1])
if dotproduct < 0:
return False
squaredlengthba = (b[0] - a[0])*(b[0] - a[0]) + (b[1] - a[1])*(b[1] - a[1])
if dotproduct > squaredlengthba:
return False
return True
|
b2a70ecaada50b664d5c830a4adca6d033244a80
| 61,505
|
import time
def datetime_to_ms(dt):
"""
Convert an unaware datetime object to milliseconds.
This datetime should be the time you would expect to see
on the client side. The SMC will do the timestamp conversion
based on the query timezone.
:return: value representing the datetime in milliseconds
:rtype: int
"""
return int(time.mktime(dt.timetuple()) * 1000)
|
af66e06b94f7e94414edc33a203f3c71b335a4b3
| 61,506
|
def relativeSize(bbox1, bbox2):
"""
Calculate the relative size of bbox1 to bbox2.
:return: (sx, sy) where bbox1_width = sx * bbox2_width, etc.
"""
sx = (bbox1[2]-bbox1[0]) / (bbox2[2]-bbox2[0])
sy = (bbox1[3]-bbox1[1]) / (bbox2[3]-bbox2[1])
return (sx, sy)
|
e5477ad9921628470a78161ec46ed6f48531e144
| 61,509
|
def backend_name(setup_backend_pars):
"""The name of the currently tested backend."""
return setup_backend_pars[0]
|
c892c4719bc7c9ac94f7e52579829d5cd1003329
| 61,514
|
def number_of_words(input):
"""Return number of words in a string.
"""
return len(input.split())
|
8b6ee07c7ab81fe68cbc79efbfdd519a3fa9ffbe
| 61,515
|
def MinimalBpseqParser(lines):
"""Separate header and content (residue lines).
lines -- a list of lines or anything that behaves like that.
The standard bpseq header (from the CRW website) is recognized. Also,
lines that contain a colon are accepted as header lines.
Header lines that aren't accepted as header, but that can be split into
three parts are residue lines (sequence and structure description).
Lines that don't fall into any of these categories are ignored.
"""
result = {'HEADER':[], 'SEQ_STRUCT':[]}
for line in lines:
if line.startswith('Filename') or line.startswith('Organism') or\
line.startswith('Accession') or line.startswith('Citation') or\
":" in line:
result['HEADER'].append(line.strip())
elif len(line.split()) == 3:
result['SEQ_STRUCT'].append(line.strip())
else:
continue #unknown
return result
|
e877150348e43341e764f16b2e2774c29df9afc5
| 61,516
|
import torch
def im2col(img, win_len, stride=1):
"""
INPUTS:
- img: a b*c*h*w feature tensor.
- win_len: each pixel compares with its neighbors within a
(win_len*2+1) * (win_len*2+1) window.
OUTPUT:
- result: a b*c*(h*w)*(win_len*2+1)^2 tensor, unfolded neighbors for each pixel
"""
b,c,_,_ = img.size()
# b * (c*w*w) * win_num
unfold_img = torch.nn.functional.unfold(img, win_len, padding=0, stride=stride)
unfold_img = unfold_img.view(b,c,win_len*win_len,-1)
unfold_img = unfold_img.permute(0,1,3,2)
return unfold_img
|
302798cc735439c18c1cbd92d09a38962f123753
| 61,518
|
def compute_loss(target, output, criterion, weights, args, model, data):
"""
Compute the loss.
:param target: target labels
:param output: predicted labels
:param criterion: loss criterion
:param weights: the weight per task / label
:return: the computed loss
"""
loss = criterion(output, target)
return loss
|
51e2d662c456901514cb82bad2a14d8f146279e1
| 61,523
|
def isSameNode(node1, node2):
"""Compare two nodes, returning true if they are the same.
Use the DOM lvl 3 Node.isSameNode method if available, otherwise use a
simple 'is' test.
"""
if hasattr(node1, 'isSameNode'):
return node1.isSameNode(node2)
else:
return node1 is node2
|
a3c1021811580e259c9005bc40a843f272b5036d
| 61,525
|
def count(dependencies):
"""
Count the number of dependencies in a list.
Ignores any comments (entries starting with #).
"""
return len([i for i in dependencies if not i.strip().startswith("#")])
|
614913f63842eacd6bd858501d68d3862ac13cb1
| 61,526
|
from google.cloud.ndb import context as context_module
def get_batch(batch_cls, options=None):
"""Gets a data structure for storing batched calls to Datastore Lookup.
The batch data structure is stored in the current context. If there is
not already a batch started, a new structure is created and an idle
callback is added to the current event loop which will eventually perform
the batch look up.
Args:
batch_cls (type): Class representing the kind of operation being
batched.
options (_options.ReadOptions): The options for the request. Calls with
different options will be placed in different batches.
Returns:
batch_cls: An instance of the batch class.
"""
# prevent circular import in Python 2.7
context = context_module.get_context()
batches = context.batches.get(batch_cls)
if batches is None:
context.batches[batch_cls] = batches = {}
if options is not None:
options_key = tuple(
sorted(
(
(key, value)
for key, value in options.items()
if value is not None
)
)
)
else:
options_key = ()
batch = batches.get(options_key)
if batch is not None and not batch.full():
return batch
def idler(batch):
def idle():
if batches.get(options_key) is batch:
del batches[options_key]
batch.idle_callback()
return idle
batches[options_key] = batch = batch_cls(options)
context.eventloop.add_idle(idler(batch))
return batch
|
e54893bbe62149c1e918027365c6d2e9660d806c
| 61,528
|
import sympy
def rref (A):
"""Compute the reduced row echelon for the matrix A. Returns
returns a tuple of two elements. The first is the reduced row
echelon form, and the second is a list of indices of the pivot columns.
"""
m = sympy.Matrix (A)
return m.rref()
|
2d796f8c48e2a88d87358b03158623ff04c64a48
| 61,530
|
def format_obj_keys(obj, formatter):
"""
Take a dictionary with string keys and recursively convert
all keys from one form to another using the formatting function.
The dictionary may contain lists as values, and any nested
dictionaries within those lists will also be converted.
:param object obj: The object to convert
:param function formatter: The formatting function
for keys, which takes and returns a string
:returns: A new object with keys converted
:rtype: object
:Example:
::
>>> obj = {
... 'dict-list': [
... {'one-key': 123, 'two-key': 456},
... {'threeKey': 789, 'four-key': 456},
... ],
... 'some-other-key': 'some-unconverted-value'
... }
>>> format_obj_keys(obj, lambda s: s.upper())
{
'DICT-LIST': [
{'ONE-KEY': 123, 'TWO-KEY': 456},
{'FOUR-KEY': 456, 'THREE-KEY': 789}
],
'SOME-OTHER-KEY': 'some-unconverted-value'
}
"""
if type(obj) == list:
return [format_obj_keys(o, formatter) for o in obj]
elif type(obj) == dict:
return {formatter(k): format_obj_keys(v, formatter)
for k, v in obj.items()}
else:
return obj
|
9d4e5ea2692e3e65e2b12c2134c775ef16e0a6d7
| 61,532
|
def gsetting_to_R(G, oldmezz=False):
"""This function converts an analog gain setting into the corresponding effective
resistance set in the switchable resistor.
Parameters
----------
G : Integer [0, 15]
Mezzanine gain setting
oldmezz : Boolean, default=False
For use with the Rev2 mezzanines (Not part of any PB2 or SPT3G allocation)
Returns
-------
float
Effective resistance of the switchable resitive network for the gain
stage.
"""
if G not in range(16):
raise(Exception("Mezzanine Gain settings are integers in set [0, 15]"))
if oldmezz:
return 100+(15-G)*15.
else:
Rs = [300.0000, 212.0000, 174.2857, 153.3333, 140.0000, 130.7692,
124.0000, 118.8235, 114.7368, 111.4286, 108.6957, 106.4000,
104.4444, 102.7586, 101.2903, 100.0000]
return Rs[G]
|
c8ab1083cc69404d1140ac7ac26ff8746728b7a1
| 61,534
|
def list_startswith(l, prefix):
"""Like str.startswith, but for lists."""
return l[:len(prefix)] == prefix
|
aef517c04a0eb12886a029ad8ea8247c3767cfbf
| 61,535
|
def create_category(session, displayname, file_extensions,
return_type=None, **kwargs):
"""
Create a new file lifecycle category with its file extensions.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type displayname: str
:param displayname: The category displayname 'category_name' value as returned by
get_all_categories. For example: 'Video Files'. Required.
:type file_extensions: str
:param file_extensions: The category file extensions 'extension' value as returned by
get_all_categories. For example: 'avi,mpg,mpeg'. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
body_values = {'displayname': displayname, 'file_extensions': file_extensions}
path = '/api/flc/create_category.json'
return session.post_api(path=path, body=body_values,
return_type=return_type, **kwargs)
|
b6626ac4d7b8c611d564f5cd5773bfff6b3cd7f9
| 61,537
|
def init_similar_images(paths_files_source):
"""Return empty dictionary of lists, where keys are names of source images"""
similar_images = {}
for source_path_file in paths_files_source:
similar_images[source_path_file] = []
return similar_images
|
117d22436b83915b38661b63df63aa1998057aa4
| 61,539
|
def get_dependencies(requirements_file_path):
"""read dependencies from requirements.txt"""
with open(requirements_file_path, 'r', encoding='utf-8') as fstream:
dependencies = fstream.read()
return dependencies
|
3abb2447c86948e68038ce9735c25b1742bbcb55
| 61,545
|
def convert_value(var):
"""Convert the metric value from string into python type."""
if var["type"] == "number":
return float(var["value"])
if var["type"] == "boolean":
return var["value"] == "true"
if var["type"] == "string":
return var["value"]
print("can't convert unknown type {} for var {}".format(var["type"], var["name"]))
return None
|
52dc9ebe2f66de7638dc8f5ab8261c1e2f76e199
| 61,546
|
import functools
def wraps_py2(wrapped, *args):
"""Wrap a function and add the __wrapped__ attribute.
In Python 2, functools.wraps does not add the __wrapped__ attribute, and it
becomes impossible to retrieve the signature of the wrapped method.
"""
def decorator(wrapper):
functools.update_wrapper(wrapper, wrapped, *args)
wrapper.__wrapped__ = wrapped
return wrapper
return decorator
|
e382827fc21bf7f839a8a18fc91a2c8064ac3b35
| 61,547
|
def text_questmark(text_content):
"""
处理文本中的问号
:param text_content:处理对象文本
:return: 是否含有问号(1:有,0:无),问号数量
"""
en_questmark_nums = text_content.count("?")
cn_questmark_nums = text_content.count("?")
if(en_questmark_nums + cn_questmark_nums > 0):
return 1,en_questmark_nums + cn_questmark_nums
else:
return 0,0
|
9145595672317e7d5cdaadc8c1d94e686dfa94ba
| 61,550
|
from typing import Dict
from datetime import datetime
def transform_metadata(record) -> Dict:
"""Format the metadata record we got from the database to adhere to the response schema."""
response = dict(record)
response["info"] = {"accessType": response.pop("accessType")}
if "createDateTime" in response and isinstance(response["createDateTime"], datetime):
response["createDateTime"] = response.pop("createDateTime").strftime("%Y-%m-%dT%H:%M:%SZ")
if "updateDateTime" in record and isinstance(response["updateDateTime"], datetime):
response["updateDateTime"] = response.pop("updateDateTime").strftime("%Y-%m-%dT%H:%M:%SZ")
return response
|
4555946b9202a2d174eea96379c22d09e19846a1
| 61,552
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.