content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def get_height(prms):
"""prms = ObjParams(shape, r, fi, args, color)"""
return prms.shape.get_height(prms.args) | 9fd7a0bb61ffd68d645560787413495a30937806 | 44,761 |
def create_extension_client_fixture(mgmt_client, get_extension_client_class):
""" Test fixture: Create Extension Client (Factory)"""
def _func(**kwargs):
component = kwargs.pop('component', 'as3')
version = kwargs.pop('version', None)
use_latest_metadata = kwargs.pop('use_latest_metadata', False)
# define extension client kwargs
kwargs = {}
if use_latest_metadata:
kwargs['use_latest_metadata'] = use_latest_metadata
if version is not None:
kwargs['version'] = version
# get extension client class and instantiate
extension_client_class = get_extension_client_class(component=component)
return extension_client_class(mgmt_client, **kwargs)
return _func | 7920131cd9b998c9cc64c96dc2722f81e58461af | 44,762 |
def find_range(index, window, max):
""" find the left and right endpoints of a window in an array
:param index: index window is to be centered at
:param window: the length of the window
:param max: the size of the array
:return: left and right endpoints of the window
"""
half_window = int(window / 2)
return (
(index - half_window, index + half_window) # in range
if max - half_window >= index >= half_window else
(max - window, max) # too far on right
if max - half_window < index else
(0, window) # to far on left
) | f4bc4cd41b0c3d30c906bbf8f2e7449b0ca118de | 44,763 |
def find_crop_size(crop_array, size = 15):
""" Goes thorugh rotated crop_array and finds values with 255. Marks the first and last position of those values and widens the crop coordinates by "size" in pixels.
"""
nrows = len(crop_array)
ncols = len(crop_array[0])
first_coord = (0, 0)
last_coord = (nrows, ncols)
first_found = False
for j in range(ncols):
for i in range(nrows):
if crop_array[i][j] == 255:
if first_found == False:
first_coord = (i, j)
first_found = True
continue
last_coord = (i, j)
first_x = first_coord[1]
first_y = first_coord[0]
last_x = last_coord[1]
last_y = last_coord[0]
first_y = first_y-size
if first_y<0:
first_y = 0
last_y = last_y+size
if last_y>nrows:
last_y = nrows
return first_x, first_y, last_x, last_y | 86b4ee37aec9e6a156c3f8d849bf9e04135aa054 | 44,764 |
def validate_comma_separated_list(setting, value, option_parser,
config_parser=None, config_section=None):
"""Check/normalize list arguments (split at "," and strip whitespace).
"""
# `value` is already a ``list`` when given as command line option
# and "action" is "append" and ``unicode`` or ``str`` else.
if not isinstance(value, list):
value = [value]
# this function is called for every option added to `value`
# -> split the last item and append the result:
last = value.pop()
items = [i.strip(u' \t\n') for i in last.split(u',') if i.strip(u' \t\n')]
value.extend(items)
return value | acef867cb2c9c49ad44245cecfc6e78dc56824ab | 44,765 |
import os
def GenerateUploadDict(base_local_path, base_remote_path, pkgs):
"""Build a dictionary of local remote file key pairs to upload.
Args:
base_local_path: The base path to the files on the local hard drive.
base_remote_path: The base path to the remote paths.
pkgs: The packages to upload.
Returns:
Returns a dictionary of local_path/remote_path pairs
"""
upload_files = {}
for pkg in pkgs:
suffix = pkg['CPV'] + '.tbz2'
local_path = os.path.join(base_local_path, suffix)
assert os.path.exists(local_path)
remote_path = '%s/%s' % (base_remote_path.rstrip('/'), suffix)
upload_files[local_path] = remote_path
return upload_files | a65c57f6b9e3eb4e07e7400ccbb8e75a0ced1d2e | 44,766 |
def MSD(Z, Y):
"""Compute the mean square distortion (MSD) between Z and Y
Parameters
----------
Z: torch.Tensor of shape (n_samples, n_features)
Tensor to be compared
Y: torch.Tensor of shape (n_samples, n_features)
Tensor to be compared
Returns
-------
msd: float
Mean square distance between Z and Y
"""
msd = ((Z - Y) ** 2).sum()
msd /= Z.shape[0]
return msd | 7b13824a4898cbc20609d1a67600298a44cff7c3 | 44,767 |
def get_product_metadata(product_file):
"""
Metadata appended for labels xr.DataArray
"""
return dict({"product_file": product_file}) | 42961dca05a6846c7065879110e0711df2edce4a | 44,768 |
def static_vars(**kwargs):
"""Add static variables to a method.
To add the variable :py:obj:`counter` to :py:meth:`foo` :
.. code-block:: python
@static_vars(counter=0)
def foo():
foo.counter += 1 # foo.counter is incremented on every call to foo
"""
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate | ec41feaf8252dd8e870582cb9b8b125f498d0d46 | 44,769 |
import torch
def avg_disp(y_pred, y_true):
""" Average displacement error. """
y_true, masks = y_true
seq_lengths = masks.sum(1)
batch_size = len(seq_lengths)
squared_dist = (y_true - y_pred)**2
l2_dist = masks * torch.sqrt(squared_dist.sum(2))
avg_l2_dist = (1./batch_size) * ((1./seq_lengths) * l2_dist.sum(1)).sum()
return avg_l2_dist.item() | a97ca860dbc13d857ebfd46a5f831e6f07dc8c5f | 44,770 |
import random
def rollDice(inputArray):
"""Simulate rolling dice. Accepts array size of three. Returns a int value representing the total."""
diceValue = []
for x in range (0,int(inputArray[0])):
diceValue.append(random.randint(1, int(inputArray[1])))
print("Dice rolls: [%s]"%','.join(map(str,diceValue)))
return sum(diceValue)+int(inputArray[2]) | 897a9329a4464705c57ee4ebf8fe0f764424c29c | 44,771 |
import os
import logging
def generate_summaries(run_folder, output_folder):
"""
Creates text summary output from a MiSeq run using InterOp software
:param run_folder: path to MiSeq run folder
:param output_folder: path to output folder
:return: list of paths to each text file produced
"""
miseq_run = os.path.basename(run_folder)
interop_programs = [
'index-summary', # txt
'imaging_table', # csv
'summary' # txt
]
summary_files = []
for program in interop_programs:
logging.info('Creating text output with {}...'.format(program))
if program == 'index-summary':
output_filename = os.path.join(output_folder, miseq_run + '_' + program + '.txt')
else:
output_filename = os.path.join(output_folder, miseq_run + '_' + program + '.csv')
cmd = program
cmd += " " + run_folder
cmd += " > {}".format(output_filename)
os.system(cmd)
summary_files.append(output_filename)
return summary_files | 3805ec94bc9ee7eff95ca85af6103b452ec8bf83 | 44,773 |
def count(items: list) -> list:
"""
Generate a list of counts for each item in the input list. Outputs with highest counted item at index 0
Args:
items (list): A list of objects to be sorted
Returns:
list: A list of unique items in the input and the number of occurances for each item
"""
def row2(k):
return k[1]
if isinstance(items[0], list):
items = [tuple(x) for x in items]
uniqs = [[x, items.count(x)] for x in set(items)]
uniqs.sort(key=row2, reverse=True)
return uniqs | a13697f72c67cffb2c59d30565924bab99b746de | 44,774 |
import json
def to_json(dict_):
"""
serialize dict to json
:param dict_: dict_
:returns: JSON string representation
"""
return json.dumps(dict_) | a0ee283fbcb5d56d8032a25259a01a13a0782858 | 44,775 |
def announcement_channel(will):
"""Used for retrieving the current announcement channel"""
if will.load('announcement_channel'):
channel = will.load('announcement_channel')
else:
will.save('announcement_channel', 'announcements')
channel = 'announcements'
return channel | e0b24591c147c196b4605285787d5f1d0e3fcaa1 | 44,776 |
def find_in_reference_list(reference_list, source_name):
"""Check if it is already in reference list"""
if reference_list.get(source_name):
return True
return False | a36ea3a8d33100c1c831dbb352c79008017e4f29 | 44,779 |
def null_count(df):
"""
Returns total amount of null values in the DataFrame.
"""
nulls = df.isnull().sum()
return nulls | 2e923c56b9048781aec39e68758bbdeebb39362e | 44,781 |
import subprocess
def build_ms():
"""Invoke build command on Windows."""
return subprocess.call(['msbuild', 'libMultiMarkdownShared.vcxproj',
'/p:Configuration=Release']) | f1c13fe9bb93ffcf3694b90fe40d8eece35c0415 | 44,782 |
def remove_blank_lines(lines):
"""Get all non-blank lines out of a list of lines"""
return [line_out for line_out in (line_in.strip() for line_in in lines) if len(line_out) > 0] | 6725f3f602da4ca0dc5e4d250cfd0d8b95181c15 | 44,783 |
import re
def parse_game_times(bs_times_table, team):
"""Hard coded TZ info... and oh god fix this blregghhh."""
tds = bs_times_table.find_all('td', text=re.compile(team))
td_family = [list(x.parent.stripped_strings) for x in tds]
result = []
for x in td_family:
if 'bye' not in x and len(x) > 5:
if 'AM' in x[5] or 'PM' in x[5]:
result.append('{} {} +0800'.format(x[1], x[5]))
return result
# return [
# '{} {} +0800'.format(x[1], x[5]) for x in td_family if
# 'bye' not in x and ('AM' in x[5] or 'PM' in x[5])
# ] | d8821dea0330fcb5fc23a273696e678cbc28ab8e | 44,785 |
import textwrap
def test_include_inherit_context(env, render):
"""
The !include tag passes given ``context`` to downstream renderers.
"""
env.write(
"file.sls",
textwrap.dedent(
"""
#!jinja|text
{{ fuubar() }}
"""
).strip(),
)
template = """
key: !include file.sls
"""
def fuubar():
return "fuubar"
assert render(template, default="yamlet", context={"fuubar": fuubar}) == {
"key": "fuubar"
} | b0a22e7a15b173a3e81f0d5f6dbc51d5064b9132 | 44,789 |
def removeOutliers(data_dict,listOfOutliers):
"""
Parameters:
data_dict= The data_dict provided by Udacity.
listOfOutliers = Python List of outliers (key names)
to remove from the data_dict.
Output:
Updated data_dict where the outliers have been removed.
"""
for outlier in listOfOutliers:
try:
data_dict.pop(outlier,0)
except ValueError:
pass
return data_dict | a6a2ad32decb258b91e2497cbad5ac63b7c8c843 | 44,790 |
def merge_graphs(main_graph, addition_graph):
"""Merges an ''addition_graph'' into the ''main_graph''.
Returns a tuple of dictionaries, mapping old node ids and edge ids to new ids.
"""
node_mapping = {}
edge_mapping = {}
for node in addition_graph.get_all_node_objects():
node_id = node['id']
new_id = main_graph.new_node()
node_mapping[node_id] = new_id
for edge in addition_graph.get_all_edge_objects():
edge_id = edge['id']
old_vertex_a_id, old_vertex_b_id = edge['vertices']
new_vertex_a_id = node_mapping[old_vertex_a_id]
new_vertex_b_id = node_mapping[old_vertex_b_id]
new_edge_id = main_graph.new_edge(new_vertex_a_id, new_vertex_b_id)
edge_mapping[edge_id] = new_edge_id
return node_mapping, edge_mapping | 4518d4738be8e88b8788eaa9d8433ddaf4c46661 | 44,791 |
def perovskite_order_param(Rs_order_param=None):
""" Define an order parameter for peroskite.
"""
if Rs_order_param is None:
Rs_order_param = [(1, 0, 1), (1, 0, -1), (1, 1, 1), (1, 1, -1), (1, 2, 1), (1, 2, -1)]
Rs_in = [(3, 0, 1)] + Rs_order_param
return Rs_in | 0525349d0e0bc3cb6068d38c3e1e71732d3c8a44 | 44,792 |
def RemoveRowsWithHighNans(dataframe):
"""
param1: pandas.DataFrame
return: pandas.DataFrame
Function delete rows containing more than 50% NaN Values
"""
percent = 50.0
min_count = int(((100-percent)/100)*dataframe.shape[1] + 1)
dataframe = dataframe.dropna( axis=0,
thresh=min_count)
return dataframe | fda65bfb802e6491141c7212a4c5d5e25217ca99 | 44,793 |
import json
def mock_prom(self, metric):
"""Fake Prometheus query response.
Args:
metric (dict): Input metric query.
Returns:
dict: Fake response.
"""
data = {
'data': {
'result': [{
'values': [x for x in range(5)],
'value': [0, 1]
}]
}
}
return json.dumps(data) | b5f7c293c15703f004fa696142947b65455babda | 44,795 |
from typing import List
def smallest(number: int) -> List[int]:
"""Finds smallest number.
Examples:
>>> assert smallest(261235) == [126235, 2, 0]
>>> assert smallest(209917) == [29917, 0, 1]
>>> assert smallest(285365) == [238565, 3, 1]
>>> assert smallest(269045) == [26945, 3, 0]
"""
numbers: List[int] = list(map(int, str(number)))
min_: int = min(numbers[1:])
current_index: int = numbers.index(min_)
numbers.pop(current_index)
if min_ < numbers[0]:
numbers.insert(0, min_)
else:
numbers.insert(1, min_)
min_index: int = numbers.index(min_)
if current_index in numbers:
if current_index < numbers.index(current_index):
current_index, min_index = min_index, current_index
return [int("".join(map(str, numbers))), current_index, min_index] | 6d7ac3bf0259923a1779e1b5e8651cd24aa516b3 | 44,797 |
def format_time(time_units):
""" Returns a formated time value (hh:mm:ss:ms). """
return ":".join(["{0}".format(time_units[inx]) for inx in range(4, len(time_units))]) | 28c622043a9b39ae1fe1e1c61ee441a5884a03a3 | 44,798 |
def first(xs, fn=lambda _: True, default=None):
"""Return the first element from iterable that satisfies predicate `fn`,
or `default` if no such element exists.
Args:
xs (Iterable[Any]): collection
fn (Callable[[Any],bool]): predicate
default (Any): default
Returns:
Any
"""
return next((i for i in xs if fn(i)), default) | f4931e9d6b9044f4cffdb23961c6e33dea627083 | 44,799 |
import subprocess
def gnupg_agent_socket_path() -> str:
"""
:return: the agent socket to mount
"""
try:
result = subprocess.run(
["gpgconf", "--list-dir", "agent-extra-socket"], stdout=subprocess.PIPE
)
return result.stdout.decode("utf-8").strip()
except FileNotFoundError:
# gnupg is not installed
return "" | faf6be530769d83c5094a3b1fa56038dcacd18dd | 44,800 |
def _completeEdits(filteredTokens, fullSentence):
"""Given a set of tokens (along with their edit path), generates
a complete list of edit paths, adding the edit 'p' in case the
token is not in the filtered tokens.
"""
allTokens = fullSentence.split(" ")
edits = []
k = 0
for t in allTokens:
if k < len(filteredTokens) and t == filteredTokens[k].token:
edits.append(filteredTokens[k].edit)
k += 1
else:
edits.append('p')
result = "".join(edits)
return result | c197742809ca67e717e59b2d16df3c81b4327794 | 44,802 |
from typing import Sequence
from typing import List
def bubble_sort_rec(seq: Sequence) -> List:
"""
Sort a sequence with the recursive bubble sort algorithm.
Parameters
----------
seq : Sequence
Returns
-------
List
"""
def rbubblesort(seq: List, length: int):
"""Recursive bubble sort algorithm."""
if length in (0, 1):
return
for j in range(length - 1):
cur, nxt = seq[j], seq[j + 1]
if cur > nxt:
seq[j], seq[j + 1] = nxt, cur
rbubblesort(seq, length - 1)
lst: List = list(seq)
rbubblesort(lst, len(lst))
return lst | e9ba43daa21cf58165efce7a60dc21f0ce455076 | 44,803 |
def PerpProduct2D(a,b):
"""Computes the the 2D perpendicular product of sequences a and b.
The convention is a perp b.
The product is:
positive if b is to the left of a
negative if b is to the right of a
zero if b is colinear with a
left right defined as shortest angle (< 180)
"""
return (a[0] * b[1] - a[1] * b[0]) | 94f6184c8c5bfb83f29d4adc40ca030931522e66 | 44,804 |
def no_intervening_genes(feat,b_feat,bed):
"""retunrs true is there are no intervening genes between feat and b_feat
NOTE feat < b_feat... sort before hand"""
if feat[0] == b_feat[0] and feat[4] == b_feat[4]:
if feat[2] >= b_feat[1]: return True
### want to skip non merged feats for now
feats = bed.get_features_in_region(feat[0],feat[2]+1, b_feat[1])
strands = [f["strand"] for f in feats]
if feat[4] not in strands: return True
elif len(feats) > 0: return False
else: return True
else: return False | 459c81408bde75f41b33fcaf8eda276d4233c2d5 | 44,805 |
def PreprocessImage(img):
"""
Preprocess for imput image.
:param actionSpaceList: action number for different tasks, which is a list.
:param img: the image.
:return: the normalized image.
"""
imgOut = img / 255.
return imgOut | 51a2b2101932e34e591610ba9ec7c929438af3fa | 44,806 |
import os
def dummy_pubkey():
"""public key fixture."""
return os.getenv("PUBLIC_KEY", "pub") | cbe50c4c516f22dc5f82646ab1e434520d6165e3 | 44,808 |
def try_key(d, key, val):
"""
d: dict
key: str
val: object
the default value if the key does not exist in d
"""
if key in d:
return d[key]
return val | 7a28f1aaf0c989675bab32f9deacc39e1f6ca9d0 | 44,809 |
def index_to_rowref(index):
"""
0ベース数字を1ベース行番号文字に変換する。
Params:
index(int):
Returns:
str:
"""
return str(index+1) | 19aab9fca0c6fadbd18aa0c6a22fc344f5eb3e0b | 44,810 |
def numpy_reflectance(cosines, ref_idxs):
"""
Calculate the reflectance using Schlick's approximation.
I have no idea whats going on in here. Stolen from:
https://raytracing.github.io/books/RayTracingInOneWeekend.html#dielectrics/schlickapproximation
Args:
cosines (numpy.ndarray): Cosine of ... some angle :(. 1D array
of floats.
ref_idxs (numpy.ndarray): A way of describing the refractive
indecies of the materials on either side of the boundary
between them. 1D array of floats
Returns:
numpy.ndarray: A reflectance angle? 1D array of floads
"""
r0 = (1.0 - ref_idxs) / (1.0 + ref_idxs)
r0 = r0 ** 2
return r0 + ((1.0 - r0) * ((1.0 - cosines) ** 5)) | 72498fb6fdcace10dc603ef65913964075586db7 | 44,811 |
def is_subset(l1: list, l2: list) -> bool:
"""
Test if l2 is a subset of l1, i.e. all elements of l2 are contained in l1 and return True if it the case, False
otherwise.
:param l1: main list
:param l2: list whose elements are to be checked (if they're in l1 or not)
:return: True if l2 is a subset of l1, False otherwise.
"""
set1 = set(l1)
set2 = set(l2)
return set1.issubset(set2) | 09831684f55af2670ac4fd7e930c574f0265483c | 44,813 |
import pprint
def format_display(opt, num=1, symbol=" "):
"""Convert dictionary to format string.
Args:
opt (dict): configuration to be displayed
num (int): number of indent
"""
indent = symbol * num
string = pprint.pformat(opt)
string = indent + string.replace("\n", "\n" + indent)
return string | 6621756ef42bd439db37fe4155ff3bf50efc5621 | 44,815 |
def _looks_like_numpy_function(func_name, numpy_module_name, node):
"""
Return True if the current node correspond to the function inside
the numpy module in parameters
:param node: the current node
:type node: FunctionDef
:param func_name: name of the function
:type func_name: str
:param numpy_module_name: name of the numpy module
:type numpy_module_name: str
:return: True if the current node correspond to the function looked for
:rtype: bool
"""
return node.name == func_name and node.parent.name == numpy_module_name | e343f39d2388ce31f2d749d50b1c8f4e47475780 | 44,816 |
from typing import Callable
import functools
def to(data_type) -> Callable:
"""
Apply a data type to returned data from a function.
Args:
data_type: The data type to apply. Eg: list, int etc.
Returns:
Decorator that applies the data type on returned data
"""
def decorator(func) -> Callable:
@functools.wraps(func)
def inner(*args, **kwargs):
return data_type(func(*args, **kwargs))
return inner
return decorator | b0b5c43c96ea6081888ed8e212d43c8206966ddb | 44,817 |
import random
import string
import hashlib
def generate_response(wallet_address, contract_address, tokenId):
"""Unique message generator.
Args:
wallet_address (string): The address of the requesters wallet.
contract_address (string): The address of the raindrops smart contract.
tokenId (string): The tokenId of the raindrops ticket.
Returns:
JSON string which contains the message response
"""
# Init a blank json response
response_data = {}
# Parameter that controls length of random str
str_length = 32
# Generate random sequence
request_id = ''.join(random.choices(string.ascii_uppercase + string.digits, k=str_length))
# Hash the data together with a random sequence appended
encode_str = str(wallet_address + contract_address + tokenId + request_id)
hash_object = hashlib.sha256(encode_str.encode('UTF-8'))
hex_dig = hash_object.hexdigest()
# Form JSON response
response_data['wallet_address'] = wallet_address
response_data['contract_address'] = contract_address
response_data['tokenId'] = tokenId
response_data['random_str'] = request_id
response_data['message'] = hex_dig
return response_data | 4b00616f331077efcdf40597a9d2223cf0bde545 | 44,818 |
import random
def create_id():
"""This function shuffles a list of IDs, and returns them to main"""
id_list = list(range(1,21)) # 20 voter_ids have been declared.
random.shuffle(id_list)
return id_list | 8fe2ec7605f9997257fbc8c70c246335c0a808ea | 44,819 |
import sys
def bits2bytes(__n):
"""bits2bytes(n, /) -> int
Return the number of bytes necessary to store n bits.
"""
if not isinstance(__n, (int, long) if sys.version_info[0] == 2 else int):
raise TypeError("integer expected")
if __n < 0:
raise ValueError("non-negative integer expected")
return (__n + 7) // 8 | 83f6d764cdd321de7e0f6b6e92862b2cf76a2ff3 | 44,820 |
def list_nodes(base):
"""Utility function that lists Senlin nodes."""
res = base.client.list_objs('nodes')
return res['body'] | 09c42f361be4e2abcb5685b63ce202cc71822e16 | 44,822 |
def guess_mime_type(content, deftype):
"""Description: Guess the mime type of a block of text
:param content: content we're finding the type of
:type str:
:param deftype: Default mime type
:type str:
:rtype: <type>:
:return: <description>
"""
#Mappings recognized by cloudinit
starts_with_mappings={
'#include' : 'text/x-include-url',
'#!' : 'text/x-shellscript',
'#cloud-config' : 'text/cloud-config',
'#upstart-job' : 'text/upstart-job',
'#part-handler' : 'text/part-handler',
'#cloud-boothook' : 'text/cloud-boothook'
}
rtype = deftype
for possible_type, mimetype in starts_with_mappings.items():
if content.startswith(possible_type):
rtype = mimetype
break
return(rtype) | d5ad35c9c6acddcd1d9e284822144509eacf0bc6 | 44,823 |
def Extract_Deltas_from_Eigenvalues(l, Q, R):
""" Maps eigenvalues to delta values. """
return 2 - 2.0 / R * l * Q | 727d45f79e1958cb4f415a2ee68189863a61e50c | 44,824 |
def GetCost(term):
"""Calculate the cost of a term.
Quota is charged based on how complex the rules are rather than simply
limiting the number of rules.
A firewall rule tuple is the unique combination of IP range, protocol, and
port defined as a matching condition in a firewall rule. And the cost of a
firewall rule tuple is the total number of elements within it.
Args:
term: A Term object.
Returns:
int: The cost of the term.
"""
protocols = len(term.protocol) or 1
ports = len(term.destination_port) or 1
if term.destination_address:
addresses = len(term.destination_address) or 1
else:
addresses = len(term.source_address) or 1
return addresses * protocols * ports | b21755d3bb6a32dc7b1b23718b940d3169d050fc | 44,825 |
import os
def get_path(file):
""" Get the path of the input file """
return os.path.realpath(file) | 1f8e6fb4fdbd5594ede23e5ddfbd0fd560bbba40 | 44,826 |
from typing import Tuple
def convert_tuple_to_prayer_dict(tuple_data: Tuple) -> dict:
"""Convert tuple record from DB to prayer dictionary.
:param tuple_data: Prayer data from DB
:return: Dict in thr format -> {Title
Name
Prayer}
"""
prayer = {"Title": tuple_data[0],
"Name": tuple_data[1],
"Prayer": tuple_data[2]}
return prayer | 172643341e7b94ee91d977706f9e21b1023664ed | 44,827 |
def is_num(val):
"""Return True if val is number, i.e. int of float."""
return isinstance(val, int) or isinstance(val, float) | e2177b70c090701f03d517bece2b3bc3d86edcac | 44,828 |
def ArgumentCountException(function: str, args: int, count: int) -> bool:
"""
Checks if the correct number of arguments were given to a specific command.
:param function: The display name given to the function making the call.
:param args: The number of arguments passed into the command.
:param count: The expected number of arguments for this command.
:return: Returns a boolean as a result of the count lookup.
"""
name = ArgumentCountException.__name__
if args != count:
print(f'{function} {name}: Incorrect Number of Arguments. Expected {count}, got {args}.')
return True
return False | a857d3af14da917690e8c33eb78f21e36838ca9d | 44,829 |
def getTier0Regex():
"""
_getTier0Regex_
Define the Tier0 states and matching regex out of the object
so it can be fetched without instancing the plugin.
This are the uncompiled regular expressions in the correct
order of the states
"""
regexDict = {'Repack': [('Merge', [r'^/Repack_Run[0-9]+_Stream[\w]+/Repack$']),
('Processing Done', [r'^/Repack_Run[0-9]+_Stream[\w]+/Repack/RepackMerge[\w]+$'])],
'PromptReco': [('AlcaSkim', [r'^/PromptReco_Run[0-9]+_[\w]+/Reco$']),
('Merge', [r'^/PromptReco_Run[0-9]+_[\w]+/Reco/AlcaSkim$']),
('Harvesting', [r'^/PromptReco_Run[0-9]+_[\w]+/Reco/AlcaSkim/AlcaSkimMerge[\w]+$',
r'^/PromptReco_Run[0-9]+_[\w]+/Reco/RecoMerge[\w]+$']),
('Processing Done',
[r'^/PromptReco_Run[0-9]+_[\w]+/Reco/RecoMerge[\w]+/RecoMerge[\w]+DQMHarvest[\w]+$'])
],
'Express': [('Merge', [r'^/Express_Run[0-9]+_Stream[\w]+/Express$']),
('Harvesting', [r'^/Express_Run[0-9]+_Stream[\w]+/Express/ExpressMerge[\w]+$',
r'^/Express_Run[0-9]+_Stream[\w]+/Express/ExpressAlcaSkim[\w]+$']),
('Processing Done', [
r'^/Express_Run[0-9]+_Stream[\w]+/Express/ExpressMerge[\w]+/ExpressMerge[\w]+DQMHarvest[\w]+$',
r'^/Express_Run[0-9]+_Stream[\w]+/Express/ExpressAlcaSkim[\w]+/ExpressAlcaSkim[\w]+AlcaHarvest[\w]+$'])
]}
return regexDict | 4805b8b5afa1823aa6f9bc2bc3e6baf5b0f03579 | 44,830 |
import torch
def _softplus(x):
"""Implements the softplus function."""
return torch.nn.functional.softplus(x, beta=1, threshold=10000) | 65f83d32b949eccbf8ba5e4b2471852ffa5b9c6b | 44,832 |
def _filter_distance(barcodes, candidate, min_dist, distance):
"""Test whether {candidate} can be added to {barcodes} based on the minimum
distance between {candidate} and all barcodes in {barcodes}.
:arg list barcodes: List of barcodes.
:arg str candidate: Candidate barcode.
:arg int min_dist: Minimum distance between the barcodes.
:arg function distance: Distance function.
:returns bool: True if the barcode is clean, False otherwise.
"""
for i in barcodes:
if distance(i, candidate) < min_dist:
return False
return True | d61643d1bffe21a713466b2e7bd44891b2149a33 | 44,833 |
import numbers
def combine_slices(slice1, slice2):
"""
Given two slices that can be applied to a 1-d array, find the resulting
slice that corresponds to the combination of both slices. We assume that
slice2 can be an integer, but slice1 cannot.
"""
if isinstance(slice1, slice) and slice1.step is not None:
raise ValueError('Only slices with steps of 1 are supported')
if isinstance(slice2, slice) and slice2.step is not None:
raise ValueError('Only slices with steps of 1 are supported')
if isinstance(slice2, numbers.Integral):
if slice1.start is None:
return slice2
else:
return slice2 + slice1.start
if slice1.start is None:
if slice1.stop is None:
return slice2
else:
if slice2.stop is None:
return slice(slice2.start, slice1.stop)
else:
return slice(slice2.start, min(slice1.stop, slice2.stop))
else:
if slice2.start is None:
start = slice1.start
else:
start = slice1.start + slice2.start
if slice2.stop is None:
stop = slice1.stop
else:
if slice1.start is None:
stop = slice2.stop
else:
stop = slice2.stop + slice1.start
if slice1.stop is not None:
stop = min(slice1.stop, stop)
return slice(start, stop) | 15b7453dada8f33122e5e901714e5519a634f99a | 44,836 |
def add_more(*args):
"""
Arbitrary Arguments
>>> add_more(1, 2, 3)
6
"""
return sum(args) | 875ee128d4fee537cd0c2f5cdaff57ff7bec81dd | 44,837 |
def record_read_in_group(read_calls, my_call, my_phred, my_umi, read_name):
"""
Add call data from read to read_calls dictionary of name -> call.
"""
my_call_data = [my_call, my_phred, my_umi] #, my_start
# do we already have a call from this read pair?
if read_name in read_calls:
assert my_umi == read_calls[read_name][2] #should always match!
# #always take lowest start (or overwrite if None, although in that case we're probably None too)
# if read_calls[read_name][3] is None or my_call_data[3] > read_calls[read_name][3]:
# my_call_data[3] = read_calls[read_name][3]
# resolve overlap by taking higher-quality call
if read_calls[read_name][1] < my_phred:
read_calls[read_name] = my_call_data
# handle case where we have the same quality for two different calls (get suspicious)
elif read_calls[read_name][1] == my_phred and read_calls[read_name][0] != my_call:
read_calls[read_name][0] = 'N'
return True
else:
read_calls[read_name] = my_call_data
return False | 3b3472e4d7e030ba3576e08093ced4cb80a5f423 | 44,838 |
def read_list(filename):
""" read a list of strings from file, one per line """
fd = open(filename,'r')
lines = []
for line in fd:
lines.append(line.strip())
return lines | f717a5bcc0015ff39adfb131cd3e611f77e0924c | 44,839 |
def create_fan_string(fan_id):
"""
Function which accepts a fan_id arg and returns sql string
param:
fan_id = '1234'
returns:
" AND youtube_fan_id = 1234"
"""
video_string = f" AND youtube_fan_id = {fan_id}"
return video_string | 7f444e6e60445a4347850f58f32030e06df67621 | 44,840 |
import random
def unif_minus_one(N, m):
"""Sample uniformly from 0, ..., N-1, minus m.
"""
return random.randint(m + 1, m + N) % N | ae1374cefb2b4310ca0b7b3cf1e2279b9a7adc5e | 44,842 |
def _CredentialFrom(messages, credential_data):
"""Translate a dict of credential data into a message object.
Args:
messages: The API message to use.
credential_data: A dict containing credential data.
Returns:
An Credential message object derived from credential_data.
"""
basic_auth = messages.BasicAuth(
password=credential_data['basicAuth']['password'],
user=credential_data['basicAuth']['user'])
return messages.Credential(basicAuth=basic_auth) | c96acdf28dab1ba60acc0c5b1bee73585c20106f | 44,843 |
import os
def getppid(space):
""" getppid() -> ppid
Return the parent's process id.
"""
return space.wrap(os.getppid()) | bad3d172f38ebdcaa90c70de01876a76820dc08c | 44,844 |
def int_lin(y1, y2, x1, x2, score):
"""Interpolates score in a linear function.
"""
return y1 + (score - x1) * (y2 - y1) / (x2 - x1) | 2b188ec699cbadd0431bc159187b6674c3ac7f89 | 44,845 |
def display(
function=None, *, boolean=None, ordering=None, description=None, empty_value=None
):
"""
Conveniently add attributes to a display function::
@admin.display(
boolean=True,
ordering='-publish_date',
description='Is Published?',
)
def is_published(self, obj):
return obj.publish_date is not None
This is equivalent to setting some attributes (with the original, longer
names) on the function directly::
def is_published(self, obj):
return obj.publish_date is not None
is_published.boolean = True
is_published.admin_order_field = '-publish_date'
is_published.short_description = 'Is Published?'
"""
def decorator(func):
if boolean is not None and empty_value is not None:
raise ValueError(
"The boolean and empty_value arguments to the @display "
"decorator are mutually exclusive."
)
if boolean is not None:
func.boolean = boolean
if ordering is not None:
func.admin_order_field = ordering
if description is not None:
func.short_description = description
if empty_value is not None:
func.empty_value_display = empty_value
return func
if function is None:
return decorator
else:
return decorator(function) | 10e1a0b44ea34c705754e148370b0a24baf80323 | 44,847 |
def getMode(mode):
"""
Process EAST mode code and returns reveal class
Classes are chosen by closest resemblance
:param mode: EAST mode code
:return: Reveal class
"""
if(mode == "incremental_allume"):
return "fragment highlight-red"
if(mode == "incremental_ombre"):
return "fragment highlight-blue"
if(mode == "pliage"):
return "fragment grow"
if(mode == "accordeon"):
return "fragment current-visible"
return "fragment" | 9bb6222c0aa8be51b28323e981bc1521f89bdb8e | 44,849 |
import numpy
def ctype(a_type):
"""
Takes a numpy.dtype or any type that can be converted to a numpy.dtype
and returns its equivalent ctype.
Args:
a_type(type): the type to find an equivalent ctype to.
Returns:
(ctype): the ctype equivalent to the dtype provided.
Examples:
>>> ctype(float)
<class 'ctypes.c_double'>
>>> ctype(numpy.float64)
<class 'ctypes.c_double'>
>>> ctype(numpy.float32)
<class 'ctypes.c_float'>
>>> ctype(numpy.dtype(numpy.float32))
<class 'ctypes.c_float'>
>>> ctype(int)
<class 'ctypes.c_long'>
"""
return(type(numpy.ctypeslib.as_ctypes(numpy.array(0, dtype=a_type)))) | 7a7a2779253c709c39ccab588df218801703ba98 | 44,851 |
from typing import List
def check_input(map_item_to_fraction: List[dict]) -> bool:
"""
### Examples of proper input
>>> check_input([{'x':0.5, 'y':0.5, 'z':0.5},{'x':0.5, 'y':0.5, 'z':0.5}])
True
>>> check_input([{'x':0.4, 'y':0, 'z':0.5},{'x':0.6, 'y':1, 'z':0.5}])
True
### Checks for values that are not in the range of 0 to 1
>>> check_input([{'x':0.5, 'y':0.5, 'z':1.9},{'x':0.5, 'y':0.5, 'z':0.5}]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
Exception: The values of the fractional allocation of items are not between 0 and 1
>>> check_input([{'x':0.5, 'y':0.5, 'z':1},{'x':0.5, 'y':0.5, 'z':-0.1}]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
Exception: The values of the fractional allocation of items are not between 0 and 1
### Checks for items whose sum of parts is greater than 1
>>> check_input([{'x':0.7, 'y':0.5, 'z':0.5},{'x':0.9, 'y':0.5, 'z':0.5}]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
Exception: There is an item whose sum of parts is greater than 1
### Checks for items that has not been assigned to any agent
>>> check_input([{'x':0, 'y':0.5, 'z':0.5},{'x':0, 'y':0.5, 'z':0.5}]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
Exception: There is an item that has not been assigned to any agent
"""
sum_value_list = [0] * len(map_item_to_fraction[0]) # Help array
for i in range(len(map_item_to_fraction)):
for v, j in zip(map_item_to_fraction[i].values(), range(len(sum_value_list))):
sum_value_list[j] += v
if v > 1 or v < 0:
raise Exception("The values of the fractional allocation of items are not between 0 and 1")
for k in range(len(sum_value_list)):
if sum_value_list[k] > 1:
raise Exception("There is an item whose sum of parts is greater than 1")
if sum_value_list[k] == 0:
raise Exception("There is an item that has not been assigned to any agent")
return True | 662ac812c5d8a276c3e87054f1456a7e72c1057d | 44,852 |
def refresh_vm(context, vm):
""" Refresh the given virtual machine """
vapp = vm.getVirtualAppliance()
return vapp.getVirtualMachine(vm.getId()) | b473595fcd6c8ce4d94447be22032a8fc5d068af | 44,853 |
import argparse
def create_parser():
"""Create parser object"""
parser = argparse.ArgumentParser(
description=(
'Create an asciidoc file based on '
'https://github.com/metno/py-mmd-tools/tree/master/py_mmd_tools/mmd_elements.yaml.'
)
)
parser.add_argument('-o', '--output_file', help='Output file.')
return parser | e3cea7dbaa504b52f33f0370ad2417c15dbefca2 | 44,854 |
def check_host(host):
"""
Returns SMTP host name and port
"""
if 'gmail' in host:
return 'smtp.gmail.com', 587
elif 'yahoo' in host:
return 'smtp.mail.yahoo.com', 465
elif 'hotmail' in host or 'outlook' in host:
return 'smtp.live.com', 25 | e828039a23d788b534a9df42d97dfd4aa782ec01 | 44,855 |
def in_memory_connection_info():
""" get mock db connection string """
return ':memory:' | e52687fb29118df4c4f8b7c2bb072359fc782ca1 | 44,857 |
def do_intersect(bb1, bb2):
"""
Helper function that returns True if two bounding boxes overlap.
"""
if bb1[0] + bb1[2] < bb2[0] or bb2[0] + bb2[2] < bb1[0]:
return False
if bb1[1] + bb1[3] < bb2[1] or bb2[1] + bb2[3] < bb1[1]:
return False
return True | 12b2797254f0bdfebcc646019e261bdc21474602 | 44,858 |
def single_number_3(nums):
"""
Find single number in given array
:param nums: given array
:type nums: list[int]
:return: single number
:rtype: int
"""
def twos_comp(val, bits):
"""
Compute the 2's compliment of int value val
e.g. -4 ==> 11100 == -(10000) + 01100
:param val: value
:type val: int
:param bits: number of bits
:type bits: int
:return: 2's compliment of int value
:rtype: int
"""
return -(val & (1 << (bits - 1))) | (val & ((1 << (bits - 1)) - 1))
result = 0
# iterate through every bit
for i in range(32):
# find sum of set bits at ith position in all array elements
bit_i_sum = 0
for num in nums:
bit_i_sum += ((num >> i) & 1)
# the bits with sum not multiple of 3, are the bits of element with
# single occurrence.
result |= ((bit_i_sum % 3) << i)
# since the integer representation can be infinity in Python 3
# we need convert the value of result into 2's compliment
return twos_comp(result, 32) | 0564c235a00afd423da652d45b3e95cb5146b33c | 44,859 |
import torch
def class_reduce(num: torch.Tensor,
denom: torch.Tensor,
weights: torch.Tensor,
class_reduction: str = 'none') -> torch.Tensor:
"""
Function used to reduce classification metrics of the form `num / denom * weights`.
For example for calculating standard accuracy the num would be number of
true positives per class, denom would be the support per class, and weights
would be a tensor of 1s
Args:
num: numerator tensor
decom: denominator tensor
weights: weights for each class
class_reduction: reduction method for multiclass problems
- ``'micro'``: calculate metrics globally (default)
- ``'macro'``: calculate metrics for each label, and find their unweighted mean.
- ``'weighted'``: calculate metrics for each label, and find their weighted mean.
- ``'none'``: returns calculated metric per class
"""
valid_reduction = ('micro', 'macro', 'weighted', 'none')
if class_reduction == 'micro':
return torch.sum(num) / torch.sum(denom)
# For the rest we need to take care of instances where the denom can be 0
# for some classes which will produce nans for that class
fraction = num / denom
fraction[fraction != fraction] = 0
if class_reduction == 'macro':
return torch.mean(fraction)
elif class_reduction == 'weighted':
return torch.sum(fraction * (weights / torch.sum(weights)))
elif class_reduction == 'none':
return fraction
raise ValueError(f'Reduction parameter {class_reduction} unknown.'
f' Choose between one of these: {valid_reduction}') | d790cee3cf30f20a0eaeedaa15a6099dbe4b2508 | 44,860 |
def get_unique_patterns(query_result):
"""
Sorts all node names,
then returns a set of tuples with only unique node names.
:param query_result: Neo4j query outcome (list of dictionaries)
:return:
"""
all_motifs = [[y['name'] for y in x['p'] if type(y) == dict] for x in query_result]
if 'q' in query_result[0]:
for i in range(len(all_motifs)):
all_motifs[i].extend([y['name'] for y in query_result[i]['q'] if type(y) == dict])
all_motifs[i].extend([y['name'] for y in query_result[i]['r'] if type(y) == dict])
for y in all_motifs:
y.sort()
# the duplicated first node is variable,
# need to use set() to filter this
# otherwise each pattern with
# a different duplicate node is recognized as unique
all_motifs = [set(x) for x in all_motifs]
all_motifs = set(map(tuple, all_motifs))
return all_motifs | 1e30259e9aaedf8d2668955231cd2ceffa1d3caf | 44,861 |
import sys
import http.client as hc
def _apply_cn_keys_patch():
"""
apply this patch due to an issue in http.client.parse_headers
when there're multi-bytes in headers. it will truncate some headers.
https://github.com/aliyun/aliyun-log-python-sdk/issues/79
"""
if sys.version_info[:2] == (3, 5):
old_parse = hc.parse_headers
def parse_header(*args, **kwargs):
fp = args[0]
old_readline = fp.readline
def new_readline(*args, **kwargs):
ret = old_readline(*args, **kwargs)
if ret.lower().startswith(b'x-log-query-info'):
return b'x-log-query-info: \r\n'
return ret
fp.readline = new_readline
ret = old_parse(*args, **kwargs)
return ret
hc.parse_headers = parse_header | e92763510030528b221f257e51e41cb06815d3d3 | 44,862 |
import time
from pathlib import Path
def _get_anonymous_file_name(ds, target_root, file_type, patient_dict):
"""
input: ds, target_root, patient_dict, file_type
output: final full_file_path
"""
# get metadata
try:
AccessionNumber = ds.AccessionNumber # Acc number
except:
AccessionNumber = 'UnknownAccNum'
try:
Modality = ds.Modality # modality
except:
Modality = 'UnknownModality'
try:
PatientID = ds.PatientID # patient id
except:
PatientID = 'UnknownID'
try:
SeriesNumber = ds.SeriesNumber # series number
except:
SeriesNumber = 'Ser'
try:
InstanceNumber = ds.InstanceNumber
except:
InstanceNumber = 'Ins'
# if new patient -> write patient ID
if PatientID not in patient_dict:
patient_dict['last_pt_num']+=1
patient_dict[PatientID] = {}
patient_dict[PatientID]['patient_num'] = patient_dict['last_pt_num']
patient_dict[PatientID]['unknown_file'] = 0
patient_dict[PatientID]['last_study_num'] = 0
if AccessionNumber not in patient_dict[PatientID]:
patient_dict[PatientID]['last_study_num']+=1
patient_dict[PatientID][AccessionNumber] = patient_dict[PatientID]['last_study_num']
# if new patient -> write patient ID
if PatientID not in patient_dict:
patient_dict['last_pt_num']+=1
patient_dict[PatientID] = {}
patient_dict[PatientID]['patient_num'] = patient_dict['last_pt_num']
patient_dict[PatientID]['unknown_file'] = 0
patient_dict[PatientID]['last_study_num'] = 0
if AccessionNumber not in patient_dict[PatientID]:
patient_dict[PatientID]['last_study_num']+=1
patient_dict[PatientID][AccessionNumber] = patient_dict[PatientID]['last_study_num']
# if any unknown components
is_unknown_file = AccessionNumber=='UnknownAccNum' or \
Modality == 'UnknownModality' or \
PatientID == 'UnknownID' or \
SeriesNumber == 'Ser' or \
InstanceNumber == 'Ins'
# patient folder and study folder
patient_folder_name = f"Patient_{patient_dict[PatientID]['patient_num']}"
study_folder_name = f"{patient_dict[PatientID][AccessionNumber]}_{Modality}"
# file name. if any unknown components -> use unknown file count
if is_unknown_file:
patient_dict[PatientID]['unknown_file']+=1
file_name = f"img_{patient_dict[PatientID]['unknown_file']}.{file_type}"
else:
file_name = f"{SeriesNumber}_{InstanceNumber}.{file_type}"
# date
today_str = time.strftime('%Y%m%d')
full_file_path = target_root / Path(today_str) / Path(patient_folder_name) / Path(study_folder_name) / Path(file_name)
return full_file_path | 4fa24f58574858be3e80a3964d193a99a33a32fa | 44,863 |
def make_readline_completer(engine):
"""Return a readline completer function for the specified engine."""
commands = engine.list_commands()
def completer(text, state):
matches = [s for s in commands if s.startswith(text)]
try:
return matches[state] + " "
except IndexError:
return None
return completer | 43a9e2483c9bb47975a3d4c1a5786b974300243c | 44,864 |
def normalize_execute_kwargs(kwargs):
"""Replace alias names in keyword arguments for graphql()"""
if "root" in kwargs and "root_value" not in kwargs:
kwargs["root_value"] = kwargs.pop("root")
if "context" in kwargs and "context_value" not in kwargs:
kwargs["context_value"] = kwargs.pop("context")
if "variables" in kwargs and "variable_values" not in kwargs:
kwargs["variable_values"] = kwargs.pop("variables")
if "operation" in kwargs and "operation_name" not in kwargs:
kwargs["operation_name"] = kwargs.pop("operation")
return kwargs | ff10b2f52f3d19da9353a9bbca338e92dca75500 | 44,865 |
import importlib
def _vekt(self):
"""Spaltenvektoren"""
vekt = []
Vektor = importlib.import_module('agla.lib.objekte.vektor').Vektor
for j in range(self.shape[1]):
vekt.append(Vektor([self[i, j] for i in range(self.shape[0])]))
return vekt | ed2a377991facf9eacddb9edcebe14b00167fa27 | 44,866 |
def is_app_version_ok(version):
"""
Native apps should always send the app_version when they send a request to the API.
This function returns True if the version is compatible, false otherwise.
"""
if version=="1.0":
return True
return False | 54da1fcbfa3a6bda8a1fb3438f9360a1555cf292 | 44,867 |
def repr_opcode(opcode):
"""Returns a textual representation for the given opcode.
@type opcode: int
@rtype: str
"""
opmap = {1: "open", 2: "refresh", 3: "update", 4: "notify", 5: "status", 6: "delete"}
return opmap.get(opcode, "unknown (%d)" % opcode) | 31346162db7439ddc33192b63de74081ec04febc | 44,869 |
def to_char(num):
"""
Converts class index to char
:param num: class index
:return: corresponding char
"""
if num < 10:
return str(num)
elif num < 36:
return chr(num + 55)
else:
return chr(num + 61) | 7867c09f0faec11b4ba9dce0a97123affb2cdc26 | 44,871 |
def pip_to_rez_package_name(dist_name):
"""Convert a distribution name to a rez compatible name.
The rez package name can't be simply set to the dist name, because some
pip packages have hyphen in the name. In rez this is not a valid package
name (it would be interpreted as the start of the version).
Example: my-pkg-1.2 is 'my', version 'pkg-1.2'.
Args:
dist_name (str): Distribution name to convert.
Returns:
str: Rez-compatible package name.
"""
return dist_name.replace("-", "_") | 6a817d5dc11b072d3b44abb56c29e853c3722873 | 44,872 |
def check_parse_errors(options, args):
"""Do validations on command line options, returning error messages, if any."""
if not options.language:
return "language parameter not informed."
elif not args:
return "base path not informed."
else:
return None | cefa9608bc37b551d8ca3f93a196ff072f67b451 | 44,873 |
def strip_characters(text: str, *args: str) -> str:
"""Remove specified characters from given text."""
for char in args:
text = text.replace(char, "")
return text | a44788dcba864aa97b8165c6668008f692a2a45b | 44,874 |
import re
import html
def format_type(expr):
"""
Format type expression for popup
Set span 'type' for types and 'tyvar' for type variables
"""
if not expr:
return expr
tyname = re.search(r'([a-zA-Z]\w*)|(->|=>|::|\u2192|\u21d2|\u2237)', expr)
if tyname:
tyexpr = expr[tyname.start():tyname.end()]
expr_class = ''
if tyname.group(1):
expr_class = 'type' if tyexpr[0].isupper() else 'tyvar'
elif tyname.group(2):
expr_class = 'operator'
decorated = '<span class="{0}">{1}</span>'.format(expr_class, html.escape(tyexpr, quote=False))
return html.escape(expr[0:tyname.start()], quote=False) + decorated + format_type(expr[tyname.end():])
else:
return html.escape(expr, quote=False) | 967fbfffe751cccb51a0bb7384180d2813efaafa | 44,875 |
def create_fabric(cfmclient, data, fabric_type=None):
"""
Create :A Composable Fabric.
"""
path = 'fabrics'
params = {'type': fabric_type} if fabric_type else None
return cfmclient.post(path, params, data).json().get('result') | 55151c7027c80104a1f87f634eaca48a78bf477d | 44,876 |
def uescapejs(value):
"""
Hex encodes unicode characters for use in JavaScript unicode strings, with surrounding quotes.
We benefit from the fact that for the BSP, javascript and python have the same escape sequences.
"""
data = repr(value)
return data.lstrip("u") | 6a6265345a745d09c5ef99002709430c0894205a | 44,877 |
import os
def port():
"""Port of agent under test."""
return os.environ.get("AGENT_PORT", 3000) | 6223cdb31c1b4cdacc76727b29f9111dbd2ccc48 | 44,878 |
def wrong_task(param):
"""
Never divide by 0, until You really want exception.
"""
return param / 0 | e08c722bad7af431e15fdea6c3c1c5ded7b1dbb6 | 44,879 |
def null_guess(atomic, params={}):
"""Default guess type for testing, returns empty array"""
return [] | 2e2b9cec0759833f635b4f639f41c14e1c19a30e | 44,880 |
def reduce_func(nodes):
"""Collect messages and update node representations.
Parameters
----------
nodes : NodeBatch
A batch of nodes.
Returns
-------
dict mapping 'hv_new' to Float32 tensor of shape (V, K * T)
Updated node representations. V for the number of nodes, K for the number of
radial filters and T for the number of features to use
(types of atomic number in the paper).
"""
return {'hv_new': nodes.mailbox['m'].sum(1)} | 647f0bda0ea6a9b77972ce571f2bd868de0a032a | 44,881 |
import math
def error_ee_split(X, Wp, Wn, lam, memory=2, device=None):
"""Elastic embedding loss function deployed on GPU.
It splits X, Wp, Wn into pieces and summarizes respective loss values
to release computation stress.
:param X: sample-coordinates matrix
:type X: torch.FloatTensor
:param Wp: attractive weights.
:type Wp: torch.FloatTensor
:param Wn: repulsive weights.
:type Wn: torch.FloatTensor
:param lam: trade-off factor of elastic embedding function.
:param memory: memory(GB) allocated to computer error.
:type device: torch.device
:param device: device chosen to operate.
If None, set as torch.device('cpu').
:type device: torch.device
:returns: elastic embedding loss value.
"""
device = X.device if device is None else device
X = X.to(device)
N = X.shape[0]
B = math.floor((memory * 1024 ** 3) / (2 * N * 8))
error = 0
i1 = 0
i2 = min(N, B)
X2 = X ** 2
x2 = X2.sum(dim=1, keepdim=True)
while i1 < N:
sqd = X2[i1: i2, :].sum(dim=1, keepdim=True) - \
2 * X[i1: i2, :] @ X.t() + x2.t()
ker = (-sqd).exp()
error += Wp[i1: i2, :].to(device).view(-1).dot(sqd.view(-1)) + \
lam * Wn[i1: i2, :].to(device).view(-1).dot(ker.view(-1))
i1 = i1 + B
i2 = min(N, i1 + B)
return error | b59fc2abb510e17e87adce4cb4e7e8fcff9be7e7 | 44,883 |
import torch
def hsv_to_rgb(input_hsv_tensor):
"""
Differentiable HSV to RGB conversion function.
:param input_hsv_tensor: Batch of HSV images [batch_size, 3, height, width]
:return: Batch of RGB images [batch_size, 3, height, width]
"""
assert len(input_hsv_tensor.shape) == 4 and input_hsv_tensor.shape[1] == 3
hues = input_hsv_tensor[:, 0, :, :]
sats = input_hsv_tensor[:, 1, :, :]
vals = input_hsv_tensor[:, 2, :, :]
c = sats * vals
x = c * (1 - torch.abs((hues * 6.0) % 2.0 - 1.0))
m = vals - c
# Compute R
r_hat = torch.zeros_like(hues)
filter_hues = hues.clone()
r_hat[filter_hues < 1.0 / 6.0] = c[filter_hues < 1.0 / 6.0]
filter_hues[filter_hues < 1.0 / 6.0] += 10.0
r_hat[filter_hues < 2.0 / 6.0] = x[filter_hues < 2.0 / 6.0]
filter_hues[filter_hues < 2.0 / 6.0] += 10.0
r_hat[filter_hues < 3.0 / 6.0] = 0
filter_hues[filter_hues < 3.0 / 6.0] += 10.0
r_hat[filter_hues < 4.0 / 6.0] = 0
filter_hues[filter_hues < 4.0 / 6.0] += 10.0
r_hat[filter_hues < 5.0 / 6.0] = x[filter_hues < 5.0 / 6.0]
filter_hues[filter_hues < 5.0 / 6.0] += 10.0
r_hat[filter_hues <= 6.0 / 6.0] = c[filter_hues <= 6.0 / 6.0]
filter_hues[filter_hues <= 6.0 / 6.0] += 10.0
# Compute G
g_hat = torch.zeros_like(hues)
filter_hues = hues.clone()
g_hat[filter_hues < 1.0 / 6.0] = x[filter_hues < 1.0 / 6.0]
filter_hues[filter_hues < 1.0 / 6.0] += 10.0
g_hat[filter_hues < 2.0 / 6.0] = c[filter_hues < 2.0 / 6.0]
filter_hues[filter_hues < 2.0 / 6.0] += 10.0
g_hat[filter_hues < 3.0 / 6.0] = c[filter_hues < 3.0 / 6.0]
filter_hues[filter_hues < 3.0 / 6.0] += 10.0
g_hat[filter_hues < 4.0 / 6.0] = x[filter_hues < 4.0 / 6.0]
filter_hues[filter_hues < 4.0 / 6.0] += 10.0
g_hat[filter_hues < 5.0 / 6.0] = 0
filter_hues[filter_hues < 5.0 / 6.0] += 10.0
g_hat[filter_hues <= 6.0 / 6.0] = 0
filter_hues[filter_hues <= 6.0 / 6.0] += 10.0
# Compute B
b_hat = torch.zeros_like(hues)
filter_hues = hues.clone()
b_hat[filter_hues < 1.0 / 6.0] = 0
filter_hues[filter_hues < 1.0 / 6.0] += 10.0
b_hat[filter_hues < 2.0 / 6.0] = 0
filter_hues[filter_hues < 2.0 / 6.0] += 10.0
b_hat[filter_hues < 3.0 / 6.0] = x[filter_hues < 3.0 / 6.0]
filter_hues[filter_hues < 3.0 / 6.0] += 10.0
b_hat[filter_hues < 4.0 / 6.0] = c[filter_hues < 4.0 / 6.0]
filter_hues[filter_hues < 4.0 / 6.0] += 10.0
b_hat[filter_hues < 5.0 / 6.0] = c[filter_hues < 5.0 / 6.0]
filter_hues[filter_hues < 5.0 / 6.0] += 10.0
b_hat[filter_hues <= 6.0 / 6.0] = x[filter_hues <= 6.0 / 6.0]
filter_hues[filter_hues <= 6.0 / 6.0] += 10.0
r = (r_hat + m).view(input_hsv_tensor.shape[0], 1, input_hsv_tensor.shape[2],
input_hsv_tensor.shape[3])
g = (g_hat + m).view(input_hsv_tensor.shape[0], 1, input_hsv_tensor.shape[2],
input_hsv_tensor.shape[3])
b = (b_hat + m).view(input_hsv_tensor.shape[0], 1, input_hsv_tensor.shape[2],
input_hsv_tensor.shape[3])
rgb = torch.cat([r, g, b], dim=1)
return rgb | 68a802433cf42d4f2d67931db7fe4a82ddb270a8 | 44,885 |
def get_profile_info(org_vm, inst):
"""
Get the org, name, and version from the profile instance and
return them as a tuple.
"""
org = org_vm.tovalues(inst['RegisteredOrganization'])
name = inst['RegisteredName']
vers = inst['RegisteredVersion']
return org, name, vers | 1206c4aec45b33bf6f865c80a0e354125d42c554 | 44,886 |
import pandas
import numpy
def readData(fname):
"""
read in a csv file that is of the right form - rotate it (so each column is a signal)
:param fname:
:return:
"""
pd = pandas.read_csv(fname)
return [numpy.array(pd[colname]) for colname in pd.columns[1:]] | 50622908038c2a01f5f5c7c68ec5d47e34f03a66 | 44,888 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.