content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import hmac
import hashlib
def _hmac_sha256(key, msg):
"""
Generates a sha256 digest using the given key and message.
:param str key: starting key for the hash
:param str msg: message to be hashed
:returns: sha256 digest of msg as bytes, hashed using the given key
"""
return hmac.new(key, msg, hashlib.sha256).digest() | 0e85b6b26364fc06b8eb083e37e64803f33a9834 | 43,946 |
from typing import Dict
from typing import List
def convert_spin_to_list(spins: Dict) -> List:
"""Convert the Spin dictionary from the ssoCard into a list.
Add the spin index as parameter to the Spin entries.
Parameters
----------
spin : dict
The dictionary located at parameters.physical.spin in the ssoCard.
Returns
-------
list
A list of dictionaries, with one dictionary for each entry in parameters.physical.spin
after removing the index layer.
"""
spin_dicts = []
for spin_id, spin_dict in spins.items():
spin_dict["id_"] = spin_id
spin_dicts.append(spin_dict)
return spin_dicts | 0d06523abe118305bbef13d681cb1d811628edda | 43,947 |
import os
def get_file_class(path: str) -> str:
"""
Return the type of the file as a human readable string.
"""
if os.path.islink(path):
return "Link"
elif os.path.isfile(path):
return "File"
elif os.path.isdir(path):
return "Directory"
return "Unknown" | 412ad938fb84d71e600ea397e8bffbfe12dc9e01 | 43,948 |
import re
def get_protein_sequence(entry):
"""Gets the sequence from the entry. This method
assumes that the sequence is ALWAYS at the end of
the entry.
"""
sequence_section = re.split('SQ [ ]+', entry)[1]
newline_split_sequence = sequence_section.split('\n')[1:]
return ''.join(newline_split_sequence).replace(' ', '') | 4196ff9bee380d171dd406eb1088d5879ac588cb | 43,949 |
from typing import Dict
from typing import Any
def shim_store_v0(v0: Dict[str, Any], master_address: str) -> Dict[str, Any]:
"""
v1 schema is just a bit more nesting to support multiple masters.
"""
v1 = {"version": 1, "masters": {master_address: v0}}
return v1 | c4fa2b97b31981aaadc0e8999aae62b9e1843645 | 43,950 |
def _get_scripts_resource(pe):
"""Return the PYTHONSCRIPT resource entry."""
res = None
for entry in pe.DIRECTORY_ENTRY_RESOURCE.entries:
if entry.name and entry.name.string == b"PYTHONSCRIPT":
res = entry.directory.entries[0].directory.entries[0]
break
return res | 3bef22589a2793b09d89c4f4552f12f1583c9274 | 43,952 |
import logging
def viosupgrade_query(module):
"""
Query to get the status of the upgrade for each target
runs: viosupgrade -q { [-n hostname | -f filename] }
The caller must ensure either the filename or the target list is set.
args:
module the Ansible module
module.param used:
target_file_name (optional) filename with targets info
targets (required if not target_file_name)
return:
ret the number of error
"""
ret = 0
if module.param['target_file_name']:
cmd = '/usr/sbin/viosupgrade -q -f {}'\
.format(module.param['target_file_name'])
(ret, stdout, stderr) = module.run_command(cmd)
logging.info("[STDOUT] {}".format(stdout))
if ret == 0:
logging.info("[STDERR] {}".format(stderr))
else:
logging.error("command {} failed: {}".format(stderr))
ret = 1
else:
for target in module.param['targets']:
cmd = '/usr/sbin/viosupgrade -q -n {}'.format(target)
(rc, stdout, stderr) = module.run_command(cmd)
logging.info("[STDOUT] {}".format(stdout))
if rc == 0:
logging.info("[STDERR] {}".format(stderr))
else:
logging.error("command {} failed: {}".format(stderr))
ret += 1
return ret | 4f3711642963ae5c426c0dd1e42e3386b9585d03 | 43,953 |
def messages_get(body, connection):
"""A stored join function that gets all the currently registered commands,
their relevant metadata, the name of the client they are associated with
and the message, if any. This is returned to the requestor in a JSON
format for further processing."""
cur = connection.cursor()
del body
# Unwieldy command handles most of the data processing in SQL which is faster than doing this in python.
cmd = "SELECT * FROM messages ORDER BY time_raised desc;"
cur.execute(cmd)
messages = cur.fetchall()
response = {}
counter = -1
for message in messages:
output_keys = {"id": "messageId", "name": "name", "read_flag": "read", "errorlevel": "errorLevel",
"time_raised": "timestamp", "message": "message"}
counter += 1
this_message = {}
for key in output_keys.keys():
this_message.update({output_keys[key]: message[key]})
if this_message["read"] == b'\x00':
this_message.update({"read": False})
else:
this_message.update({"read": True})
time_out = this_message["timestamp"].strftime("%Y-%m-%dT%H:%M:%SZ") # It is nice to have ISO 8601 compliance
this_message.update({"timestamp": time_out})
response.update({str(counter): this_message})
response.update({"error": 200})
return response | 4c01b2d7901e6de8ffb9fbd7fc70e531674db162 | 43,954 |
import os
def get_filename_root(directoryPath):
""" guess the prefix of texture filenames """
# guess the filename root by figuring out the full filename for 'albedo'
albedo_path = next(f for f in os.listdir(directoryPath) if "albedo" in f.lower())
if albedo_path == None:
print('Could not find an albedo file in directory %s. Aborting.' % directoryPath)
# ...then chop that full filename, to guess the prefix
albedo_idx = albedo_path.lower().find("albedo")
return albedo_path[:albedo_idx] | 783b79db6790bcd37a41d92dd618341fc59bcc5a | 43,955 |
from typing import List
from typing import Any
from typing import Tuple
def pad(x: List[List[Any]],
pad: int) -> Tuple[List[List[int]], List[List[bool]]]:
"""Pad 2d list x based on max length. Also generate a mask to access valid
values in the padded list.
Args:
x (List[List[Any]]): The 2d list of values to be padded with pad
values.
pad (Any): the value that will be used to pad x.
Returns:
Tuple[List[List[int]], List[List[bool]]]: padded x along with its
mask
"""
max_length = max(len(sample) for sample in x)
mask = [
[True] * len(sample) + [False] * (max_length - len(sample))
for sample in x
]
x = [sample + [pad] * (max_length - len(sample)) for sample in x]
return x, mask | 014ab67b5ff3337f700cf8c39991f91a8ff23dd2 | 43,959 |
def get_last_slackblocks_message_text(messages) -> str:
"""
Utility methods for retrieving the text content of the most recent message.
Assumes that message was constructed using a single SlackBlocks SectionBlock with
at least one attachment.
:raises KeyError: likely because the last messages was not a SlackBlocks message
:raises IndexError: if there are no messages in `messages`
"""
return messages[-1]["attachments"][0]["blocks"][0]["text"]["text"] | 63755b319445ca9b030c4a244da990f39a4b33bf | 43,960 |
import uuid
def gen_uid():
"""
Generate CM UID
"""
return {'return':0,
'uid':uuid.uuid4().hex[:16]} | 3a5364262801b94bd998cec7601cf60657784e7c | 43,962 |
from typing import Dict
def config_get_policy(config: Dict[str, str]):
"""Return the effective SELinux policy
Checks if SELinux is enabled and if so returns the
policy; otherwise `None` is returned.
"""
enabled = config.get('SELINUX', 'disabled')
if enabled not in ['enforcing', 'permissive']:
return None
return config.get('SELINUXTYPE', None) | 36310d1cb67bd6288c5db4c0f240a24c11f09b81 | 43,963 |
def areDisplayOutputsCloned(outputs):
"""returns True whether more than one display output points at the same
framebuffer address
:outputs: array
:returns: boolean
"""
for index,output in enumerate(outputs):
if index > 0:
if output['x'] != outputs[index-1]['x'] or output['y'] != outputs[index-1]['y']:
return False
return True | e576e99d057e8ec3d622c5d7b09b553cc0e08021 | 43,965 |
def _gr_call_ ( graph , x , spline = None , opts = '' ) :
""" Use graph as function
>>> graph = ...
>>> y = graph ( 0.2 )
"""
## if not spline : spline = ROOT.MakeNullPointer(ROOT.TSpline)
return graph.Eval ( float( x ) , spline , opts ) | 51ab14baf25a772823b7b4d06d9cfa45e9ce8305 | 43,968 |
def ebitda(gross_profit, sg_a):
"""
Computes EBITDA(earnings before interest, tax, depreciation and amortizatin).
Parameters
----------
gross_profit : int or float
Gross profit for the period
sg_a : int or float
Selling, general and administrative cost
Returns
-------
out : int or float
EBITDA
"""
return gross_profit - sg_a | 4de26c7535d6c76e2945af2f59e5aee8ab47afe9 | 43,969 |
def verctor_add(a, b):
"""
two vector add
a: list
b: list
return: list
"""
return [a[i]+b[i] for i in range(len(a))] | 86291f0b510454ea759d58e926390b778324587e | 43,970 |
def king(r, rc, rt, sigma_0, alpha=2):
"""
See http://iopscience.iop.org/1538-3881/139/6/2097/fulltext/
Parameters
----------
r: float
radius
rc: float
core radius
rt: float
truncation radius
sigma_0: float
central density
"""
def z(x):
return 1/(1+(x/rc)**2)**(1./alpha)
term1 = (1 - z(rt))**-alpha
term2 = (z(r) - z(rt))**alpha
sigma = sigma_0 * term1 * term2
return sigma | d5df62bfb6e2973d394fe65c77b24160eccfc125 | 43,971 |
from pathlib import Path
import glob
import re
def increment_path(path, exist_ok=False):
""" Automatically increment path, i.e. runs/exp --> runs/exp0, runs/exp1 etc.
Args:
path (str or pathlib.Path): f"{model_dir}/{args.name}".
exist_ok (bool): whether increment path (increment if False).
"""
path = Path(path)
if (path.exists() and exist_ok) or (not path.exists()):
return str(path)
else:
dirs = glob.glob(f"{path}*")
matches = [re.search(rf"%s(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m]
n = max(i) + 1 if i else 2
return f"{path}{n}" | eaffc47b83ae969242b5e7af1389658a25873f1c | 43,972 |
def calculate_production_time(form_data, blueprint):
""" Returns the production time for the given blueprint. """
"""
The following data is taken into account while calculation:
1. Players industry skill level
2. Players hardwirings
3. Installation slot production time modifier
4. Blueprint Production efficiency
"""
# implant modifiers. (type_id, modifier)
IMPLANT_MODIFIER = {
0: 0.00, # no hardwiring
27170: 0.01, # Zainou 'Beancounter' Industry BX-801
27167: 0.02, # Zainou 'Beancounter' Industry BX-802
27171: 0.04 # Zainou 'Beancounter' Industry BX-804
}
# calculate production time modifuer
implant_modifier = IMPLANT_MODIFIER[int(form_data['hardwiring'])]
slot_productivity_modifier = form_data['slot_production_time_modifier']
production_time_modifier = (1 - (0.04 * float(form_data['skill_industry']))) * (1 - implant_modifier) * slot_productivity_modifier
base_production_time = blueprint.production_time
production_time = base_production_time * production_time_modifier
blueprint_pe = form_data['blueprint_production_efficiency']
if blueprint_pe >= 0:
production_time *= (1 - (float(blueprint.productivity_modifier) / base_production_time) * (blueprint_pe / (1.00 + blueprint_pe)))
else:
production_time *= (1 - (float(blueprint.productivity_modifier) / base_production_time) * (blueprint_pe - 1))
return production_time | 1538895f7f374ea4c404dc6ea696dde9f476b47b | 43,973 |
def mk_rule_key(rule):
""" Convert a rule tuple to a hyphen-separated string.
"""
return '{}-{}-{}-{}'.format(rule.source_institution,
rule.destination_institution,
rule.subject_area,
rule.group_number) | ca7ffddd023eaf10f6a50532e954c03ced182d29 | 43,976 |
import json
def getSuffixes():
""" get the suffixes from tibetan-spellchecker """
suffixesData = json.load(open('tibetan-spellchecker/syllables/suffixes.json'))
print(suffixesData)
return suffixesData | ff0f5a6c968865eed54bd486b0feb473506794e8 | 43,977 |
import re
def __replace_all_envs_in_str(content, env):
"""
Docker does not allow to replace volume names or service names, so we do it by hand
"""
all_params = re.findall(r'\$\{[^\}]*?\}', content)
for param in all_params:
name = param
name = name.replace("${", "")
name = name.replace("}", "")
if name in env.keys():
content = content.replace(param, env[name])
return content | fb5fc1f87a2b665450a9dd684a68efa51d9c4898 | 43,979 |
def is_mt_str(my_str) -> bool:
"""Check to see if the given input is an empty string
Function that checks the given parameter my_str to see if it is an empty
string. Uses the innate identity __eq__ to check.
:param my_str: String to check if it is empty
:type my_str: str
:return: Boolean indicating if the input is an empty string
:rtype: bool
"""
if not isinstance(my_str, str):
return False
elif "".__eq__(my_str):
return True
else:
return False | 68c044537edd522c78565b8fbc4b0a1d1986f4aa | 43,980 |
import pickle
def get_original_config(dirname: str) -> dict:
"""
Get original model config used with hydra
Args:
dirname (str): model directory
Returns:
dict: config dict
"""
config = pickle.load(open(f"{dirname}/tb/config.pkl", "rb"))
return config | 0dc54883bd508e8e95905a6ba67e7ec7cdb66d82 | 43,981 |
import six
def to_bytestring(data):
"""
Convert data to a (utf-8 encoded) byte-string if it isn't a byte-string
already.
"""
if not isinstance(data, six.binary_type):
data = six.text_type(data).encode('utf-8')
return data | a516417979a2cca0ae71a39cb8b504c2395ad2c4 | 43,982 |
import numpy
def get_savings(quantities, net_costs, target_ppu):
"""
For a given target price-per-unit calculate how much would be saved by each
practice if they had achieved that price-per-unit
"""
target_costs = quantities * target_ppu
savings = net_costs - target_costs
# Replace any negative savings (i.e. practices already performing better
# than the target) with zero savings
numpy.clip(savings, a_min=0, a_max=None, out=savings)
return savings | 3ecd1be8bb7b6e2b4e677bf884930b5b7c43df9e | 43,983 |
def get_verbose_field_name(instance, field_name):
"""
Returns verbose_name for a field.
Usage:
{% get_verbose_field_name test_instance "name" %}
Thanks to pankaj28843,
https://stackoverflow.com/questions/14496978/fields-verbose-name-in-templates#14498938
"""
return instance._meta.get_field(field_name).verbose_name.title() | 4bbeec943f84cc7f94343895e4a256941be922b6 | 43,985 |
from typing import Any
from typing import Union
def remove_duplicates_in_list_of_dict(o: dict[Any, Union[Any, list]]) -> dict:
"""
Remove duplicates in values of `o` of type `list`.
"""
return {
k: (list(dict.fromkeys(v)) if isinstance(v, list) else v) for k, v in o.items()
} | 5e97aa99b7494bf6270a04985db3c74727015601 | 43,989 |
import argparse
def cliparse():
"""sets up and parses the cli arguments"""
parser = argparse.ArgumentParser(description="Setup stalker database in "
"rethinkdb for stalkerd")
parser.add_argument("--host", help="hostname to connect to rethinkdb on",
default="localhost")
parser.add_argument("--port", help="port to connect to rethinkdb on",
type=int, default=28015)
parser.add_argument("--db", help="name of stalker database",
default="stalker")
parser.add_argument("--drop", help="drop existing tables",
action="store_true")
parser.add_argument("--auth-key", help="Prompt for auth-key",
action="store_true")
args = parser.parse_args()
return args | 857612a223de1903db9658f04e37d444dd40e803 | 43,991 |
def point_to_node(gfa_, node_id):
"""Check if the given node_id point to a node in the gfa graph.
"""
return gfa_.nodes(identifier = node_id) != None | 16053e89556ce0e097f1dc62c02d5e313faf89eb | 43,993 |
import copy
def filter_dict(target_dict, keys_to_filter):
"""Filters key(s) from top level of a dict
Args:
target_dict (dict): the dictionary to filter
keys_to_filter (list, tuple, str): set of keys to filter
Returns:
A filtered copy of target_dict
"""
assert isinstance(target_dict, dict)
d = copy.deepcopy(target_dict)
# Let keys_to_filter be a single string or a tuple of values
if isinstance(keys_to_filter, str):
keys_to_filter = [keys_to_filter]
elif isinstance(keys_to_filter, tuple):
keys_to_filter = list(keys_to_filter)
elif keys_to_filter is None:
keys_to_filter = []
# Do the filtering
for kf in keys_to_filter:
try:
d.pop(kf)
except KeyError:
pass
return d | 70a6577c30de54c8d826aae8ba7e76b3b07412f2 | 43,994 |
def greater_than_or_equal_to(x, y) -> bool:
"""x (actual), y (requirement)"""
return x >= y | 3a31bbf96129ed77d4d4fcbea42ee716a72b945e | 43,995 |
def create_pinhole_camera(height, width):
"""
Creates a pinhole camera according to height and width, assuming the principal point is in the center of the image
"""
cx = (width - 1) / 2
cy = (height - 1) / 2
f = max(cx, cy)
return f, cx, cy | 6dc23f777b97bcce3cc4b32c36f473fe0c507d0f | 43,996 |
from typing import List
import pathlib
def get_folder_paths(data_dir_path: str, species: List[str]) -> List[pathlib.Path]:
""" Get folder paths that match the species """
data_dir = pathlib.Path(data_dir_path)
if "all" in species:
return sorted(list(data_dir.glob(f"*/")))
folder_paths = []
for spec in species:
folder_paths.extend(list(data_dir.glob(f"{spec}*/")))
return sorted(folder_paths) | 188bbe23ffc00eca304b4f3faf4a85a62dec7f0b | 43,998 |
def table(fn):
"""Generate a 12-bit lookup table."""
ret = []
for n in range(0, 2**12):
pre = "\n\t" if n % 8 == 0 else " "
pre = "\t" if n == 0 else pre
ret.append("{}0x{:04X}U,".format(pre, fn(n)))
return "".join(ret) | 2fd822150317373947fdf101cc3b034eb5e65ca9 | 43,999 |
from typing import List
def _replace_pw_references(alias_cmd: str, pw_args: List[str]) -> str:
"""Replace all occurrences of pw@ with the path to the pw script (argv[0]) plus all pw options"""
replacement = " ".join(pw_args) + " "
return alias_cmd.replace("pw@", replacement) | 9442bbc61389e3d0e6303668463cbf29aad52a1b | 44,000 |
def single_state_time_spent(state_dict, state):
"""
Given a ticket's state dictionary, returns how much time it spent
in the given `state`.
Assumes state_dict has the key `state` present.
"""
# Measurement 2: Average Time Spent in Scrum Team Backlog
# For the PRs that need to be reviewed by a scrum team, obtain an average of how long a ticket spends in a team backlog.
# AverageBacklog = sum(amount of time a ticket spends in "Awaiting Prioritization") /
# count(tickets with a non-zero amount of time spent in "Awaiting Prioritization")
# This will be a rolling average over all tickets currently open, or closed in the past X days.
# In the initial rollout of this measurement, we'll track for X=14, 30, and 60 days. After we have a few months'
# worth of data, we can assess what historical interval(s) gives us the most useful, actionable data.
return state_dict[state] | 94534ffd8958fe9c9d7ae3ec6a317108faad766d | 44,001 |
from typing import Union
def clamp(value: Union[float, int], lower: Union[float, int], upper: Union[float, int]) -> Union[float, int]:
"""
Clamp a number
Same as min(max((value, lower), higher)
Args:
value (Union[float, int]): Value to clamp
lower (Union[float, int]): min value
upper (Union[float, int]): max value
Returns:
Union[float, int]: clamped value
"""
return lower if value < lower else upper if value > upper else value | 48b4e6c81219385b0c9d608459bea02370b63880 | 44,002 |
def _scale8_video_LEAVING_R1_DIRTY(i, scale):
"""Internal Use Only"""
nonzeroscale = 0
if scale != 0:
nonzeroscale = 1
if i != 0:
i = ((i * scale) >> 8) + nonzeroscale
return i | e9425d2e92e5a754b35c54b95068a7aef7de33b1 | 44,004 |
import pathlib
def read_file(file_name='README.md', encoding='utf-8'):
"""
读取本地文件
:param file_name: 文件名
:param encoding: 文件编码,默认utf-8
:return:
"""
return pathlib.Path(file_name).open('r', encoding=encoding).read() | cd137b3bf44bf2a1116d7e879941ea9a7779ee8f | 44,006 |
def create(name, playbook, cluster_id, server_ids, hints, client):
"""Create new playbook configuration."""
cluster_id = str(cluster_id)
server_ids = [str(item) for item in server_ids]
return client.create_playbook_configuration(
name, cluster_id, playbook, server_ids, hints
) | a5ec5e359bd602038463b188b911ae8e31bfb752 | 44,007 |
import json
def file_open_json(file):
"""Reads json file from the location input
:param file: path of the json file
:type file: String
:return: contents of json
:rtype: dictionary(dict)
:raise Exception: Throws exception if unable to load file
"""
try:
with open(file) as f:
return json.load(f)
except Exception as e:
print(e)
raise Exception("Could not open the json file") | 99cfc9413880698260a7802b8917adfe4111b1e3 | 44,009 |
def decorate_class(patcher):
"""Generic way to decorate all methods in a class. Accepts the decorator function as it's argument. Note that I am
using the callable function, which wasn't included in python 3.0 and 3.1."""
def decorate(cls):
for name, attr in cls.__dict__.items():
if callable(attr):
setattr(cls, name, patcher(attr))
return cls
return decorate | 6b2f7961abf1bd4325ad539038e767050b0dd7ff | 44,010 |
def read_block(fi):
"""Read and returns one block of non-empty lines from an input stream (stripped lines are returned in a list)"""
block = []
for line in fi:
line = line.strip()
if not line and len(block):
break
block.append(line)
return block | ac5852e74bac7ffc084300d7a0b2ee8e3f51ccb6 | 44,011 |
def __compare_table_headers(table_1, table_2):
"""
Step 2_2: Comparing table header (first row) values on each table
"""
col_num = len(table_1.rows[0].cells)
for i in range(0,col_num-1):
if table_1.rows[0].cells[i] != table_2.rows[0].cells[i]:
return False
return True | a1ed0906c76c350b46daf081efd269414fbcbbc2 | 44,013 |
def details_route(method, uri, is_list=False):
"""A decorator for Endpoint classes to define a custom URI.
Extends the endpoint's details route. For example, suppose the following
method was defined on an endpoint with the base uri as `foo`:
::
@details_route('GET', 'bar')
def bar_callback(self, pk):
return response
This will generate a new endpoint /foo/:pk/bar that routes to the
`bar_callback` method.
"""
def details_route_decorator(func):
func.is_route = True
func.method = method
func.uri = uri
func.is_list = is_list
return func
return details_route_decorator | 5aacde4d3a62fdf1e4a162b9cb79dd11e9db34f4 | 44,014 |
from typing import List
def delete_duplicates(A: List[int]) -> List[int]:
"""
Time Complexity: O(n)
:param A: List[int]
:return: List[int]
"""
result = []
for i in A:
if not result:
result.append(i)
else:
if i != result[-1] and i not in result:
result.append(i)
return result | a3a3f1e1d4cf5e53aea909803cd50710e9661d59 | 44,016 |
import os
def readMarkers(map):
"""
Read markers from local file
"""
mapfile='plugins/mapmark/mapdata/markers/%s.dat' % map
if os.path.isfile(mapfile):
fd=open(mapfile,'r')
line=fd.readline()
markerlist=[]
while line:
line=fd.readline().strip()
if line:
lat,lon,name,type=line.split(';')
markerlist.append([float(lat),float(lon),name,type])
# Close file
fd.close()
return markerlist
else:
return None | c4850d36cab7f88a0bb3e77f9af6c07291d6d414 | 44,017 |
import subprocess
def get_kernel_version():
""" Return a str containing the kernel version.
"""
proc = subprocess.Popen(["cat /proc/version"],
stdout=subprocess.PIPE, shell=True)
(output, error) = proc.communicate()
version = output.decode("utf-8").split()[2]
return version | f0ec5ddce13fdf3eeb13df26c5f0352e24200b3e | 44,018 |
import torch
def is_supported_instance(module):
"""Internal auxiliary function"""
if isinstance(module, (torch.nn.Conv2d, torch.nn.ReLU, torch.nn.PReLU, torch.nn.ELU, \
torch.nn.LeakyReLU, torch.nn.ReLU6, torch.nn.Linear, torch.nn.MaxPool2d, \
torch.nn.AvgPool2d, torch.nn.BatchNorm2d)):
return True
return False | 8e9ac71a6466bc9e5f7ccdef8fd8c6b893a4148f | 44,019 |
import sympy
def handle_gcd_lcm(f, args):
"""
Return the result of gcd() or lcm(), as UnevaluatedExpr
f: str - name of function ("gcd" or "lcm")
args: List[Expr] - list of function arguments
"""
args = tuple(map(sympy.nsimplify, args))
# gcd() and lcm() don't support evaluate=False
return sympy.UnevaluatedExpr(getattr(sympy, f)(args)) | 34ecae7681e1ba7f18ab00969703fec6a14e864a | 44,020 |
def check_geojson_is_polygon(geojson):
"""Checking geojson is polygon"""
types = ["Polygon", "MultiPolygon"]
for feature in geojson['features']:
if feature['geometry'] and feature['geometry']['type'] not in types:
return False
return True | cd0c20ef9fb9c723c23ad2b6a5fadfc29ceaa05c | 44,021 |
def extract_from(bibtex_db):
"""Get indentifer(s) from bibtex data
Returns a list of adsabs queries
"""
result = []
for i in bibtex_db.entries:
if "ID" in i:
if len(i["ID"]) == 19:
result.append(f'identifier:{i["ID"]}')
elif "doi" in i:
result.append(f'identifier:{i["doi"]}')
elif "eprint" in i:
result.append(f'identifier:{i["eprint"]}')
else: # Fallback
t = i["title"]
t.replace("}{", "")
result.append(f'title:"{t}" year:{i["year"]}')
return result | ea9f2b1cb0e29c5e5bddfb902d17853f93e6b4b9 | 44,022 |
import os
def HQfilterReads(path, Q ):
""" passes the commands of NanoFilt for generating a fastq file only with better quality reads
Requires the entery of full path of fastq reads file and the minimum quality to filter (Q)
Also """
Output_file = path.split(".")[0] + "_HQonly.fastq.gz"
commands = "gunzip -c " + path + " | NanoFilt -q " + str(Q) + " | gzip > " + Output_file
print ("\n ...filtering reads with quality > Q", str(Q), " \n ")
os.system(commands)
exist_status = os.system(commands)
if (exist_status != 0):
print('Fail to run NanoFilt tool commands for HQ reads filtering \n please ensure that the tool is installed and run again the pipeline')
exit(0)
return Output_file | fcdea1c840004d5e7a6e5cd6499f5611cac4233d | 44,023 |
import argparse
import sys
def parse_arguments():
"""Parse arguments passed to script"""
parser = argparse.ArgumentParser(description=
"Check the set of jobs on the scheduler queue and \
\npresent the data in a clean, readable format. \
\n**Requires either '--user' or '--queue' be specified. \
\nUsage: {0} [-u user -q queue] \n".format(sys.argv[0]),
formatter_class = argparse.RawDescriptionHelpFormatter)
parser.add_argument("-u", "--user", type=str, required=False,
help="view specific user data", action="store")
parser.add_argument("-q", "--queue", type=str, required=False,
help="view queue with substring 'QUEUE'", action="store")
parser.add_argument("-s", "--state", type=str, required=False,
help="view jobs in specific state",
action="store")
parser.add_argument("-n", "--name", type=str, required=False,
help="view jobs with substring 'NAME'",
action="store")
parser.add_argument("-b", "--brief", type=str, required=False,
default="f",
help="view summary of submitted jobs [t,f]. default: f",
action="store")
return parser | fd3962902c878b63833ff8cba6a07c9b6cc353f2 | 44,024 |
def get_sent_id(corpus_id,
doc_id,
naf_sent_id,
fill_width=8):
"""
:param corpus_id:
:param doc_id:
:param naf_sent_id:
:return:
"""
nltk_sent_id = ''
for id_ in [corpus_id, doc_id, naf_sent_id]:
id_filled = str(id_).zfill(fill_width)
nltk_sent_id += id_filled
return nltk_sent_id | b6cdf2ec39fc88ad5abf1a713280a7edc47eb043 | 44,025 |
def get_author_name_and_id(soup):
"""
Get author name and id.
Examples
--------
>>> a_tag = '<a class="topic__author-link" href="/author_id">author_name</a>'
>>> soup = make_soup(a_tag)
>>> get_author_name_and_id(soup)
('author_name', 'author_id')
"""
author = soup.select('a.topic__author-link')[0]
return author.text.strip(), author.get('href').strip('/') | d7ecb9c56337d38541dc9cfd63a4976ab8e5acda | 44,026 |
def function_namespace(f):
"""
Attempts to returns unique namespace for function
"""
if hasattr(f, 'im_func'):
return '%s.%s.%s' % (f.__module__, f.im_class.__name__, f.__name__)
else:
return '%s.%s' % (f.__module__, f.__name__) | 5951d2108fab56303791e6f3bcf08a43d0adc8ad | 44,027 |
def levenshtein(word_1, word_2):
"""
arg :
listRef --> list of string(word)
listComp --> list of string(word)
return :
integer
Description :
return the distance between 2 list of string
"""
char_list_1 = [l for l in word_1]
char_list_2 = [l for l in word_2]
if len(char_list_1) == 0:
return len(char_list_2)
elif len(char_list_2) == 0:
return len(char_list_1)
v0 = [0] * (len(char_list_2) + 1)
v1 = [0] * (len(char_list_2) + 1)
for i in range(len(v0)):
v0[i] = i
for i in range(len(char_list_1)):
v1[0] = i + 1
for j in range(len(char_list_2)):
cost = 0 if char_list_1[i] == char_list_2[j] else 1
v1[j + 1] = min(v1[j] + 1, v0[j + 1] + 1, v0[j] + cost)
for j in range(len(v0)):
v0[j] = v1[j]
return v1[len(char_list_2)] | 6409f08d34564f7fbbef6c06c39f4576b716eb09 | 44,028 |
def getBackend(x):
"""
This function determines the the backend of a given variable
"""
if 'numpy' in str(x.__class__):
return 'numpy'
elif 'arrayfire' in str(x.__class__):
return 'arrayfire'
elif 'torch' in str(x.__class__):
return 'torch'
elif str(x.__class__) in [
"<class 'complex'>", "<class 'float'>", "<class 'int'>"
]:
return 'scalar' # This is a hack for now, but numpy will treat scalars as if they were numpy types
elif 'Operator' in str(x.__class__):
return x.backend
elif type(x) is list:
return 'list'
elif type(x) is tuple:
return 'tuple'
elif x is None:
return None
else:
return type(x)
# raise ValueError("Type %s is not supported!" % (str(x.__class__))) | b7736b930b4af204b2e0d6ee62aca2af70907a6c | 44,029 |
def paginator(context, adjacent_pages=3):
"""
To be used in conjunction with the object_list generic view.
Adds pagination context variables for use in displaying first, adjacent and
last page links in addition to those created by the object_list generic
view.
"""
page = context['page_obj']
paginator = context['paginator']
startPage = max(page.number - adjacent_pages, 1)
if startPage <= 3: startPage = 1
endPage = page.number + adjacent_pages + 1
if endPage >= paginator.num_pages - 1: endPage = paginator.num_pages + 1
page_numbers = [n for n in range(startPage, endPage) \
if n > 0 and n <= paginator.num_pages]
dict = {
'request': context['request'],
'is_paginated': paginator.num_pages > 0,
'page_obj': page,
'paginator': paginator,
'results': paginator.per_page,
'page_numbers': page_numbers,
'show_first': 1 not in page_numbers,
'show_last': paginator.num_pages not in page_numbers,
'first': 1,
'last': paginator.num_pages,
'has_next': page.has_next(),
'has_previous': page.has_previous(),
}
if page.has_next():
dict['next'] = page.next_page_number()
if page.has_previous():
dict['previous'] = page.previous_page_number()
return dict | e26758417f194f8f69899b7b29c2036874c95108 | 44,030 |
import re
def _find_sgRNA(guide_sequence, target_sequence, strand):
"""
Find start, stop position of a sgRNA in a target sequence.
Parameters:
-----------
guide_sequence
target_sequence
Returns:
--------
start, stop
"""
guide_span = re.search(guide_sequence, target_sequence).span()
start, stop = guide_span[0], guide_span[1]
if strand == "-":
start, stop = (
len(target_sequence) - guide_span[0],
len(target_sequence) - guide_span[1],
)
return start, stop, strand | 3b478908145a307262b7303bfc21320591b374db | 44,031 |
import struct
def getheader(filename, gtype):
"""Read header data from Gadget data file 'filename' with Gadget file
type 'gtype'. Returns a dictionary with loaded values and filename."""
DESC = '=I4sII' # struct formatting string
HEAD = '=I6I6dddii6iiiddddii6ii60xI' # struct formatting string
keys = ('Npart', 'Massarr', 'Time', 'Redshift', 'FlagSfr', 'FlagFeedback', 'Nall', 'FlagCooling', 'NumFiles', 'BoxSize', 'Omega0', 'OmegaLambda', 'HubbleParam', 'FlagAge', 'FlagMetals', 'NallHW', 'flag_entr_ics', 'filename')
f = open(filename, 'rb')
if gtype == 2:
f.seek(16) # If you want to use the data: desc = struct.unpack(DESC,f.read(16))
raw = struct.unpack(HEAD,f.read(264))[1:-1]
values = (raw[:6], raw[6:12]) + raw[12:16] + (raw[16:22],) + raw[22:30] + (raw[30:36], raw[36], filename)
header = dict(zip(keys, values))
f.close()
return header | daca7603362e8804736b4fae0ed95e772a183129 | 44,032 |
def gradient_add(grad_1, grad_2, param, verbose=0):
"""
Sum two gradients
:param grad_1: (TensorFlow Tensor) The first gradient
:param grad_2: (TensorFlow Tensor) The second gradient
:param param: (TensorFlow parameters) The trainable parameters
:param verbose: (int) verbosity level
:return: (TensorFlow Tensor) the sum of the gradients
"""
if verbose > 1:
print([grad_1, grad_2, param.name])
if grad_1 is None and grad_2 is None:
return None
elif grad_1 is None:
return grad_2
elif grad_2 is None:
return grad_1
else:
return grad_1 + grad_2 | e9e620c06edaf124830b0aec78604d653565860c | 44,033 |
def ROC_curve_compute(ROC, compute_area=False):
"""Compute the ROC curve and its area from the given ROC object.
Parameters
----------
ROC : dict
A ROC curve object created with ROC_curve_init.
compute_area : bool
If True, compute the area under the ROC curve (between 0.5 and 1).
Returns
-------
out : tuple
A two-element tuple containing the probability of detection (POD) and
probability of false detection (POFD) for the probability thresholds
specified in the ROC curve object. If compute_area is True, return the
area under the ROC curve as the third element of the tuple.
"""
POD_vals = []
POFD_vals = []
for i in range(len(ROC["prob_thrs"])):
POD_vals.append(1.0*ROC["hits"][i] / (ROC["hits"][i] + ROC["misses"][i]))
POFD_vals.append(1.0*ROC["false_alarms"][i] / \
(ROC["corr_neg"][i] + ROC["false_alarms"][i]))
if compute_area:
# Compute the total area of parallelepipeds under the ROC curve.
area = (1.0 - POFD_vals[0]) * (1.0 + POD_vals[0]) / 2.0
for i in range(len(ROC["prob_thrs"])-1):
area += (POFD_vals[i] - POFD_vals[i+1]) * (POD_vals[i+1] + POD_vals[i]) / 2.0
area += POFD_vals[-1] * POD_vals[-1] / 2.0
return POFD_vals,POD_vals,area
else:
return POFD_vals,POD_vals | 555aab3e5798a5ef59c994d6f918074cbb34cce7 | 44,034 |
def _normalize_item(item, shape):
"""
>>> def print_item(item):
... print('[' + ', '.join([(f'{i.start if i.start is not None else ""}:{i.stop if i.stop is not None else ""}' if isinstance(i, slice) else str(i)) for i in item]) + ']')
>>> print_item((0, slice(None), slice(1), slice(5, 10)))
[0, :, :1, 5:10]
>>> print_item(_normalize_item((), (1, 2, 3)))
[:, :, :]
>>> print_item(_normalize_item((0,), (1, 2, 3)))
[0, :, :]
>>> print_item(_normalize_item(0, (1, 2, 3)))
[0, :, :]
>>> print_item(_normalize_item(slice(None), (1, 2, 3)))
[:, :, :]
>>> print_item(_normalize_item(slice(10), (1, 2, 3)))
[:10, :, :]
>>> print_item(_normalize_item(..., (1, 2, 3)))
[:, :, :]
>>> print_item(_normalize_item((0, ...), (1, 2, 3)))
[0, :, :]
>>> print_item(_normalize_item((..., 0), (1, 2, 3)))
[:, :, 0]
>>> print_item(_normalize_item((1, 2, 3, ...), (1, 2, 3)))
[1, 2, 3]
>>> print_item(_normalize_item((..., 1, 2, 3, ...), (1, 2, 3)))
Traceback (most recent call last):
...
IndexError: an index can only have a single ellipsis ('...')
>>> print_item(_normalize_item((0, 1, 2, 3, ...), (1, 2, 3)))
Traceback (most recent call last):
...
IndexError: too many indices for array: array is 3-dimensional, but 4 were indexed
"""
if isinstance(item, list):
raise NotImplementedError()
if not isinstance(item, tuple):
item = (item,)
if ... in item:
item = list(item)
idx = item.index(...)
item[idx:idx + 1] = [slice(None)] * (len(shape) - len(item) + 1)
else:
item = list(item) + [slice(None)] * (len(shape) - len(item))
if ... in item:
raise IndexError('an index can only have a single ellipsis (\'...\')')
if len(item) > len(shape):
raise IndexError(
f'too many indices for array: array is {len(shape)}-dimensional, '
f'but {len(item)} were indexed'
)
return item | ddbf34783268ddc4bca59cb3af397ee0a6c1e083 | 44,035 |
import os
import subprocess
import sys
def check_rDNA_copy_number(ref, output, logger):
"""ensure reference has multiple rDNAs
Using barrnap to check that there are multiple rDNA copies
in the reference genome
"""
os.makedirs(os.path.join(output, "barrnap_reference"), exist_ok=True)
barroutput = os.path.join(output, "barrnap_reference",
os.path.basename(ref) + ".gff")
cmd = "barrnap {ref} > {barroutput}".format(**locals())
subprocess.run(cmd,
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
rrn_num = 0
with open(barroutput, "r") as rrn:
for rawline in rrn:
line = rawline.strip().split('\t')
if line[0].startswith("##"):
continue
if line[8].startswith("Name=16S"):
rrn_num += 1
return rrn_num | 44b7089fafbb30d82013e689c40699b004c9e8d5 | 44,036 |
def _remove_model_weights(model: dict, to_delete=None) -> dict:
"""
Removes certain weights of a given model. The weights to remove are given by the to_delete argument.
If there is also a bias term, that is deleted as well.
Args:
model: Loaded detectron2 model
to_delete (list): Names of the weights to delete from the model, by default:
['roi_heads.box_predictor.cls_score',
'roi_heads.box_predictor.bbox_pred']
"""
assert isinstance(model, dict)
assert 'model' in model
# print("Removing model weights with to_delete = None\n It is recommended to specify the to_delete weights directly, or use remove_model_weights_fsdet etc")
# to_delete default values written here in order for default args to be immutable.
if to_delete is None:
# Heads in the bbox predictor:
to_delete = ['roi_heads.box_predictor.cls_score',
'roi_heads.box_predictor.bbox_pred']
for param_name in to_delete:
del model['model'][param_name + '.weight']
if param_name + '.bias' in model['model']:
del model['model'][param_name + '.bias']
return model | 70f88910db2fae52893869fc7acda8161b5df61e | 44,037 |
def overlaps(df, idx):
"""
Check if the note at the given index in the given dataframe overlaps any
other notes in the dataframe.
Parameters
----------
df : pd.DataFrame
The DataFrame to check for overlaps.
idx : int
The index of the note within df that might overlap.
Returns
-------
overlap : boolean
True if the note overlaps some other note. False otherwise.
"""
note = df.loc[idx]
df = df.loc[
(df["pitch"] == note.pitch) & (df["track"] == note.track) & (df.index != idx)
]
overlap = any(
(note.onset < df["onset"] + df["dur"]) & (note.onset + note.dur > df["onset"])
)
return overlap | 382c21c7b2232b40ce7c563d677afa8a70f5bfcc | 44,038 |
from typing import List
from typing import Tuple
def remove_pads(y_true: List[str], y_pred: List[str]) -> Tuple[List[str], List[str]]:
"""
Takes as input two lists of strings corresponding to the predicted and actual tags
and returns the same lists except that any <pad> tags in y_true are removed and the tags
corresponding to the same index position in y_pred are also removed.
"""
new_y_true = []
new_y_pred = []
for i in range(len(y_true)):
if y_true[i] != "<pad>":
new_y_true.append(y_true[i])
new_y_pred.append(y_pred[i])
return new_y_true, new_y_pred | fb660e2aa31c04be1ea77a57ac39b681786389f3 | 44,039 |
def chop(x, y, ymax=None, ymin=None, xmin=None, xmax=None):
"""Chops x, y."""
if xmax:
y = y[x < xmax]
x = x[x < xmax]
if xmin:
y = y[x > xmin]
x = x[x > xmin]
if ymax:
x = x[y < ymax]
y = y[y < ymax]
if ymin:
x = x[y > ymin]
y = y[y > ymin]
return x, y | ce3e2b212e4a132b242135485eeaf6b89978220f | 44,040 |
def splinter_headless():
"""Run Chrome in headless mode, for faster tests."""
return True | 3cf2ea16be08ef0405d250462506341c481b2693 | 44,041 |
from typing import OrderedDict
def provide_trigger_dict():
"""Provide a dictionnary mapping str names to byte values."""
trigger_dict = OrderedDict()
# At the beginning and end of the experiment ... take these triggers to
# crop the meaningful EEG data. Make sure to include some time BEFORE and
# AFTER the triggers so that filtering does not introduce artifacts into
# important parts.
trigger_dict['trig_begin_experiment'] = bytes([1])
trigger_dict['trig_end_experiment'] = bytes([2])
# Indication when a new trial is started
trigger_dict['trig_new_trl'] = bytes([3])
# Wenever a new sample within a trial is started (fixation stim)
trigger_dict['trig_sample_onset'] = bytes([4])
# Whenever a choice is being inquired during sampling
trigger_dict['trig_left_choice'] = bytes([5])
trigger_dict['trig_right_choice'] = bytes([6])
trigger_dict['trig_final_choice'] = bytes([7])
# When displaying outcomes during sampling
trigger_dict['trig_mask_out_l'] = bytes([8])
trigger_dict['trig_show_out_l'] = bytes([9])
trigger_dict['trig_mask_out_r'] = bytes([10])
trigger_dict['trig_show_out_r'] = bytes([11])
# Indication when a final choice is started
trigger_dict['trig_new_final_choice'] = bytes([12])
# Whenever a final choice is started (fixation stim)
trigger_dict['trig_final_choice_onset'] = bytes([13])
# Inquiring actions during CHOICE
trigger_dict['trig_left_final_choice'] = bytes([14])
trigger_dict['trig_right_final_choice'] = bytes([15])
# Displaying outcomes during CHOICE
trigger_dict['trig_mask_final_out_l'] = bytes([16])
trigger_dict['trig_show_final_out_l'] = bytes([17])
trigger_dict['trig_mask_final_out_r'] = bytes([18])
trigger_dict['trig_show_final_out_r'] = bytes([19])
# trigger for ERROR, when a trial has to be reset
# (ignore all markers prior to this marker within this trial)
trigger_dict['trig_error'] = bytes([20])
# If the subject sampled a maximum of steps and now wants to take yet
# another one, we force stop and initiate a final choice
trigger_dict['trig_forced_stop'] = bytes([21])
# If subject tried to make a final choice before taking at least one sample
trigger_dict['trig_premature_stop'] = bytes([22])
# Display the block feedback
trigger_dict['trig_block_feedback'] = bytes([23])
return trigger_dict | ea8ba1c5de5071b2cdb7764effb667a619ef3f01 | 44,042 |
import requests
def request_url(url, headers):
"""fetch html page via request package"""
r = requests.get(url, headers = headers)
if r.status_code == 200:
return r.content
return False | 8344a52f3482bf465db172a4158a9e34bf3e346a | 44,043 |
def corr(result, result2, result3):
"""
Computes the correlation between the three regression methods by taking squared differences from averages in each category and then averaging those results.
Interpret this as a smaller # meaning a better score / agreement.
Parameters
----------
result: pandas dataframe
linear regression results
result2: pandas dataframe
the neural network results
result3: pandas dataframe
the random forest regression results
Returns
----------
correlation: pandas.core.series.Series
the correlation value for the given player
"""
avpts = (result['Pts'] + result2['Pts'] + result3['Pts']) / 3
diffs_pts = (avpts - result['Pts'])**2 + (avpts - result2['Pts'])**2 + (avpts - result3['Pts'])**2
avast = (result['Ast'] + result2['Ast'] + result3['Ast']) / 3
diffs_ast = (avast - result['Ast'])**2 + (avast - result2['Ast'])**2 + (avast - result3['Ast'])**2
avreb = (result['Reb'] + result2['Reb'] + result3['Reb']) / 3
diffs_reb = (avreb - result['Reb'])**2 + (avreb - result2['Reb'])**2 + (avreb - result3['Reb'])**2
correlation = (diffs_reb + diffs_ast + diffs_pts) / 3
return correlation | 9b7c00af5e5b03483b1e18565f5b590f7a51d745 | 44,044 |
from typing import Union
import builtins
import importlib
def _load_exception_class(import_specifier: str) -> Union[Exception, None]:
"""Load an exception class to be used for filtering Sentry events.
This function takes a string representation of an exception class to be filtered out
of sending to Sentry and returns an uninitialized instance of the class so that it
can be used as the argument to an `isinstance` method call.
:param import_specifier: A string containing the full import path for an exception
class. ex. 'ValueError' or 'requests.exceptions.HTTPError'
:type import_specifier: str
:returns: An uninitialized reference to the exception type to be used in
`isinstance` comparisons.
:rtype: Exception
"""
namespaced_class = import_specifier.rsplit(".", 1)
if len(namespaced_class) == 1:
return builtins.__dict__.get(namespaced_class[0]) # noqa: WPS609
exception_module = importlib.import_module(namespaced_class[0])
return exception_module.__dict__.get(namespaced_class[1]) | a1add93a1f637caad974593536e00063eca14f34 | 44,045 |
def has_missing_keys(minimum_keys, actual_keys, name):
"""Returns an error if expected keys are not present.
Do not warn about unexpected keys.
"""
actual_keys = frozenset(actual_keys)
missing = minimum_keys - actual_keys
if missing:
msg_missing = (' missing: %s' % sorted(missing)) if missing else ''
return 'Unexpected %s%s; did you make a typo?' % (name, msg_missing) | 22b6fda9cb5e2b8b5a3e1227caf4f2e0679d4ab4 | 44,046 |
def split_id(url_id):
"""
Data for the response is encoded in the ID URL
"""
parts = url_id.split("/")
data = {}
for i in range(1,len(parts)-1,2):
data[parts[i]] = parts[i + 1]
return data | 27579d7a0f8181ec5e141c7159490781bd130fa7 | 44,047 |
def yaml_time_serializer(dumper, data):
"""
This function is required to serialize datetime.time as string objects
when working with YAML as output format.
"""
return dumper.represent_scalar('tag:yaml.org,2002:str', str(data)) | b6c6a5d44a57391ca440704537cb3c449900d0cb | 44,049 |
def year_range(y_start, y_end):
"""Format a year range."""
year = y_start or ''
if y_end:
year += '–' + y_end
return year | ddd911e61d86e11f8f79de24a99620319ad605bd | 44,050 |
def same_keys(a, b):
"""Determine if the dicts a and b have the same keys in them"""
for ak in a.keys():
if ak not in b:
return False
for bk in b.keys():
if bk not in a:
return False
return True | 841a6d715fdfcefb9a2557e01b7ccb586fd62c06 | 44,053 |
def insert_fixed_parameters(parameters_dict, fixed_parameters):
"""Insert the fixed parameters into the parameters_dict."""
if fixed_parameters is not None:
for key, value in fixed_parameters.items():
parameters_dict[key] = value
return parameters_dict | 6c46f6bc7be1a6832dfeeb0007bd448bebfb7e1e | 44,054 |
def line(tam=43):
"""
Returns the number of the argument size. Remembering that the size is in pixels.
"""
return '-' * tam | d0b5179e370349226a21a4eb6139a9e587550f08 | 44,056 |
from pathlib import Path
def pyyaml_path_constructor(loader, node):
"""Helper method to load Path tag in PyYAML."""
value = loader.construct_scalar(node)
return Path(value) | e8740a9abe3e0d8b5ab27095eb8a89cb2b6f1af1 | 44,057 |
def filter_state(hdf_file_content, link_id, state='State0'):
"""Get the time series of a state for a link
Parameters
----------
hdf_file_content : np.array
data in the h5 file as a numpy array
link_id : int
link_id to be filtered
state : str , ex. State0(default), State1 ...
state to be retrieved from h5 file
Returns
------
time : np.array
array of timesteps
state: np.array
state time series
"""
index = hdf_file_content['LinkID'] == link_id
time = hdf_file_content['Time'][index]
state = hdf_file_content[state][index]
return time, state | 3ca64fcd455edfa0c5915fb1caba985370ce7dcc | 44,058 |
def _get_metric_prefix(power: int, default: str = "") -> str:
"""Return the metric prefix for the power.
Args:
power (int): The power whose metric prefix will be returned.
default (str): The default value to return if an exact match is
not found.
Returns:
str: The metric prefix.
"""
metric_prefix = {
24: "Y",
21: "Z",
18: "E",
15: "P",
12: "T",
9: "G",
6: "M",
3: "k",
-3: "m",
-6: "μ",
-9: "n",
-12: "p",
-15: "f",
-18: "a",
-21: "z",
-24: "y",
}
return metric_prefix.get(power, default) | b35f5ff3691eafe87274a685d41f9c57161df1fb | 44,060 |
def get_average_att_network(networks, select='accuracy'):
"""Get the average accuracy for a group of networks.
Args:
networks (list): List of networks
Returns:
float: The average accuracy of a population of networks.
"""
total = 0
for network in networks:
if select == 'accuracy':
total += network.accuracy
elif select == 'params':
total += network.params
elif select == 'flops':
total += network.flops
return total / len(networks) | 78d14f731b3b10179abb5d85ff1150c4e784c94d | 44,061 |
def parse_size(size):
"""
Converts a size specified as '800x600-fit' to a list like [800, 600]
and a string 'fit'. The strings in the error messages are really for the
developer so they don't need to be translated.
"""
first_split = size.split('-')
if len(first_split) != 2:
raise AttributeError(
'Size must be specified as 000x000-method such as 800x600-fit.')
size, method = first_split
if method not in ('fit', 'thumb'):
raise AttributeError(
'The method must either be "fit" or "thumb", not "%s".' % method)
try:
size_ints = [int(x) for x in size.split('x')]
except ValueError:
raise AttributeError(
'Size must be specified as 000x000-method such as 800x600-fit.')
if len(size_ints) != 2:
raise AttributeError(
'Size must be specified as 000x000-method such as 800x600-fit.')
if size_ints[0] <= 0 or size_ints[1] <= 0:
raise AttributeError(
'Height and width for size must both be greater than 0.')
return size_ints, method | 7a3ee86a48e320df70dec8f2a8fcb72bbaf377fe | 44,062 |
def split_writable_text(encoder, text, encoding):
"""Splits off as many characters from the begnning of text as
are writable with "encoding". Returns a 2-tuple (writable, rest).
"""
if not encoding:
return None, text
for idx, char in enumerate(text):
if encoder.can_encode(encoding, char):
continue
return text[:idx], text[idx:]
return text, None | c959aedca9085947043d676c8cd8420105e7af97 | 44,063 |
def rxn_chg_mult(rxn_muls, rxn_chgs):
""" evaluate the ts multiplicity from the multiplicities
of the reactants and products
"""
nrcts, nprds = len(rxn_muls[0]), len(rxn_muls[1])
# Set the multiplicities
rct_spin_sum, prd_spin_sum = 0, 0
rct_muls, prd_muls = [], []
if nrcts == 1 and nprds == 1:
ts_mul_low = max(rxn_muls[0][0], rxn_muls[1][0])
ts_mul_high = ts_mul_low
# rad_rad = False
else:
for rct_mul in rxn_muls[0]:
rct_spin_sum += (rct_mul - 1.)/2.
rct_muls.append(rct_mul)
for prd_mul in rxn_muls[1]:
prd_spin_sum += (prd_mul - 1.)/2.
prd_muls.append(prd_mul)
# rct_chk = bool(min(rct_muls) == 1 or nrcts == 1)
# prd_chk = bool(min(prd_muls) == 1 or nprds == 1)
# if rct_chk and prd_chk:
# rad_rad = False
ts_mul_low = min(rct_spin_sum, prd_spin_sum)
ts_mul_low = int(round(2*ts_mul_low + 1))
ts_mul_high = max(rct_spin_sum, prd_spin_sum)
ts_mul_high = int(round(2*ts_mul_high + 1))
# Set the charges
ts_chg = 0
for rct_chg in rxn_chgs[0]:
ts_chg += rct_chg
return ts_chg, ts_mul_low, ts_mul_high | c0beefb52e06f47fe117d02b889795a57f0d4f32 | 44,064 |
def get_all_lib_paths(project):
"""."""
all_lib_paths = []
# if project.is_arduino_project():
# core_src_path = selected.get_build_core_src_path(arduino_info)
# variant_path = selected.get_build_variant_path(arduino_info)
# all_lib_paths.append(core_src_path)
# all_lib_paths.append(variant_path)
all_lib_paths.append(project.get_path())
all_lib_paths = [p.replace('\\', '/') for p in all_lib_paths]
return all_lib_paths | 93483c05e94e8173aec30671eac781bad2e37594 | 44,065 |
from typing import OrderedDict
def backwards_state_dict(state_dict):
"""
Modify the state dict of older models for backwards compatibility
Parameters
----------
state_dict : dict
Model state dict with pretrained weights
Returns
-------
state_dict : dict
Updated model state dict with modified layer names
"""
# List of layer names to change
changes = (('model.model', 'model'),
('pose_network', 'pose_net'),
('disp_network', 'depth_net'))
# Iterate over all keys and values
updated_state_dict = OrderedDict()
for key, val in state_dict.items():
# Ad hoc changes due to version changes
key = '{}.{}'.format('model', key)
if 'disp_network' in key:
key = key.replace('conv3.0.weight', 'conv3.weight')
key = key.replace('conv3.0.bias', 'conv3.bias')
# Change layer names
for change in changes:
key = key.replace('{}.'.format(change[0]),
'{}.'.format(change[1]))
updated_state_dict[key] = val
# Return updated state dict
return updated_state_dict | ed52ca5897c25eed9a1d36b8aca6a14fb9f6a48e | 44,066 |
def are_attributes_valid(attributes):
""" Determine if attributes provided are dict or not.
Args:
attributes: User attributes which need to be validated.
Returns:
Boolean depending upon whether attributes are in valid format or not.
"""
return type(attributes) is dict | 7f9adebbbe64716333ee0114ffe1f63d2100e6c8 | 44,067 |
def chi_squared(text, standard, key_length):
"""
Finds the Chi-Squared value of the text
based on the standard distribution of letters
@param text is the text you are analyzing
@standard is the dictionary of letter : frequency
@key_length is the length of the key
@returns a Chi-Squared value representing how close the text
is to the standard distribution
"""
text_length = len(text)
chi_squared_sum = 0.0
for i in range(len(text)):
letter = text[i]
count_of_letter = float(text.count(letter))
expected_count_of_letter = standard[letter] * text_length
val = count_of_letter - expected_count_of_letter
val *= val
val /= expected_count_of_letter
chi_squared_sum += val
return chi_squared_sum | 20c83cfe136592a15ade9f5d55eb8becb9e559b4 | 44,069 |
import shutil
def desk_per_Win(path):
"""
path: the disk that you want to check, example: path = 'C:'
return: the percentage of the free space on that disk
"""
# Get the disk usage statistics
# about the given path
stat = shutil.disk_usage(path)
# Print disk usage statistics
# rint("Disk usage statistics:")
percent = round(stat[2]/stat[0]*100)
return percent | ac6a8b76b46fbf6a9ceeb9f48b01543857a35f59 | 44,070 |
def find_first_non_none(positions):
"""Given a list of positions, find the index and value of first non-none element.
This method is specifically designed for pysam, which has a weird way of returning
the reference positions. If they are mismatched/softmasked it returns None
when fetched using get_reference_positions.
query_alignment_start and query_alignment_end give you indexes of position in the read
which technically align, but are not softmasked i.e. it is set to None even if the position does not align
Parameters
----------
positions: list of int
Positions as returned by pysam.fetch.get_reference_positions
Return
------
index: int
Index of first non-None value
position: int
Value at that index
"""
for idx, position in enumerate(positions):
if position is not None:
return idx, position | ee020b29a50149d0cee1321a42095d770612d680 | 44,071 |
def visit_url(context, path):
"""Recursively visits the JSON response from a URL, driven by metadata
to follow links and process data."""
root = dict(context) # Makes a copy.
root["run"] = {}
root["run"]["data"] = root["retrieve_funcs"]["retrieve_data"](context, path)
root["run"]["meta"] = root["retrieve_funcs"]["retrieve_meta"](context, path)
root["run"]["coll"] = type(root["run"]["meta"])() # Collection/hiearchy of slow data.
root["run"]["tot_fast"] = 0
root["run"]["tot_slow"] = 0
func = root["collection_funcs"][str(type(root["run"]["meta"]))]
func(root, [],
root["run"]["data"],
root["run"]["meta"],
root["run"]["coll"])
return root | fd9b75dc379a2552efdf9d1fb8a46d957e42a94f | 44,072 |
def remove_bad_chars(input_string, bad_chars):
"""
:param input_string: string from which the bad characters are to be removed
:param bad_chars: list of bad characters
:return: string after removing bad characters
"""
translation_map = dict((c, ' ') for c in bad_chars)
return input_string.translate(translation_map) | 3c7f530bd9c784367f984c9bbcaa686cd177bdd8 | 44,073 |
def normalize_to_str(value):
"""
unicode convert to string
"""
if hasattr(value, "encode") and isinstance(value, str):
return value.encode("utf-8")
return value | 0ae98c5610d19a8e1cb2e2de1066960539a2fb9f | 44,074 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.