content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import platform
def is_win_platform():
"""
Checks if the current platform is Windows.
"""
WIN_PLATFORMS = ["windows", "cygwin", "cygwin_nt-10.0"]
return platform.system().lower() in WIN_PLATFORMS
|
c1e0eb0f57f69349c70470e6da13e687c1d72126
| 13,801
|
def get_full_name(participant):
"""Returns the full name of a given participant"""
return participant['fields']['First Name'].strip() + \
' ' + participant['fields']['Last Name'].strip()
|
4292ea595d13e8093f6d221c40634e8fe74b8e91
| 13,802
|
import re
def join_list_item_to_content(snts):
"""
Evaluates a list of sentences to determininne if an itemized list is
present and then puts the number and list time into the same element of
the list.
:param snts: list of report sentences.
:return: List of sentences where list item and number are the same lement
of the list
"""
if any([True if len(snt) == 2 and '.' in snt
and any([chr for chr in snt if
chr.isdigit()]) \
else False for snt \
in snts]):
ind_list_items = [i for i, ln in enumerate(snts) if
re.search('.*\d{1}\.+$', ln) and
not re.search('BI-?RADS', ln) and
not re.search('.*\d{4}\.+$', ln) and
not re.search('.*(:|/)\d{2}\.+$', ln)]
cnt = 0
for item in ind_list_items:
snts[item-cnt:item-cnt+2] = [' '.join(snts[item-cnt:item-cnt+2])]
cnt += 1
return snts
|
4cc33b7e9ab65c18b9dc0d587838bd5d56e9ffb6
| 13,803
|
def find_new_values(data, values, key):
"""Identify any new label/description values which could be added to an item.
@param data: the contents of the painting item
@type data: dict
@param values: the output of either make_labels or make_descriptions
@type values: dict
@param key: the type of values being processed (labels or descriptions)
@type key: string
@return lang-value pairs for new information
@rtype dict
"""
new_values = {}
for lang, value in values.iteritems():
if lang not in data.get(key).keys():
new_values[lang] = value['value']
return new_values
|
db56c07aedb38458be8aa0fc6bc4b5f4b49b4f4d
| 13,804
|
import re
def _remove_handles_from_text(text: str) -> str:
"""
removes userhandles. ex: @JohnDoe
"""
return re.sub(r'(@[\w]*)', '', text)
|
c8f507d58d32ee3c1f38301255cd68fc2e36c666
| 13,805
|
import typing
from typing import OrderedDict
def order_by_dependencies(dependency_mapper: typing.Dict[str, typing.Set[str]]) \
-> typing.Tuple[typing.List[str], typing.List[str]]:
"""
You have to make sure there's no cycle dependency.
"""
ordered_dict = OrderedDict()
for _ in range(1000):
# if there's no child's len(parent_set) == 0, there's no change
# so we can stop now
can_we_stop = True
for child, parent_set in list(dependency_mapper.items()):
if len(parent_set) == 0:
ordered_dict[child] = 1
for parent_set in dependency_mapper.values():
if child in parent_set:
parent_set.remove(child)
else:
can_we_stop = False
if can_we_stop:
break
# if there's cycled dependency, they will be put at the end of the list
# and we collect this information for future use
cycled_keys = list()
for child in dependency_mapper:
if child not in ordered_dict:
ordered_dict[child] = 1
cycled_keys.append(child)
ordered_keys = list(ordered_dict)
return ordered_keys, cycled_keys
|
23bc887549a722612180b6720adaf4925e886752
| 13,806
|
def getattritem(o,a):
"""
Get either attribute or item `a` from a given object `o`. Supports multiple evaluations, for example
`getattritem(o,'one.two')` would get `o.one.two`, `o['one']['two']`, etc.
:param o: Object
:param a: Attribute or Item index. Can contain `.`, in which case the final value is obtained.
:return: Value
"""
flds = a.split('.')
for x in flds:
if x in dir(o):
o = getattr(o,x)
else:
o = o[x]
return o
|
7b928b2405691dcb5fac26b7a3d7ebfcfa642f6d
| 13,807
|
def wgan_generator_loss(gen_noise, gen_net, disc_net):
"""
Generator loss for Wasserstein GAN (same for WGAN-GP)
Inputs:
gen_noise (PyTorch Tensor): Noise to feed through generator
gen_net (PyTorch Module): Network to generate images from noise
disc_net (PyTorch Module): Network to determine whether images are real
or fake
Outputs:
loss (PyTorch scalar): Generator Loss
"""
# draw noise
gen_noise.data.normal_()
# get generated data
gen_data = gen_net(gen_noise)
# feed data through discriminator
disc_out = disc_net(gen_data)
# get loss
loss = -disc_out.mean()
return loss
|
090de59ebc8e009b19e79047f132014f747972e7
| 13,809
|
def getDeploymentSections(deploymentInfos, nodesInformation):
"""
Combine deployments to the same nodes into lists in order to save on connect/disconnect time
:param deploymentInfos: deployment infos
:type: :class:`~storm.thunder.configuration.DeploymentInfos`
:param nodesInformation: nodes information
:rtype: :class:`~storm.thunder.NodesInfoMap`
:returns: deployment sections
:rtype: [([:class:`~storm.thunder.BaseDeployment`], [:class:`~BaseNodeInfo`])]
"""
deploymentSections = []
currentNodes = []
currentSection = []
for deploymentInfo in deploymentInfos.deployments:
if deploymentInfo.nodes:
deploymentNodes = nodesInformation.getNodesByNames(deploymentInfo.nodes)
else:
deploymentNodes = nodesInformation.nodes.values()
currentNodeNames = set([node.name for node in currentNodes])
nodeNames = set([node.name for node in deploymentNodes])
if currentNodeNames == nodeNames:
currentSection.append(deploymentInfo.deployment)
else:
if currentSection:
deploymentSections.append((currentSection, currentNodes))
currentNodes = deploymentNodes
currentSection = [deploymentInfo.deployment]
if currentSection:
deploymentSections.append((currentSection, currentNodes))
return deploymentSections
|
3630f5ca0d7237fd2bdc0ce80026922eae20718f
| 13,813
|
def find_val_percent(minval, maxval, x):
"""Find the percentage of a value, x, between a min and max number.
minval -- The low number of the range.
maxval -- The high number of the range.
x -- A value between the min and max value."""
if not minval < x < maxval:
print("\n" + " ERROR: X must be between minval and maxval.")
print(" Defaulting to 50 percent because why not Zoidberg. (\/)ಠ,,,ಠ(\/)" + "\n")
return (x - minval) / (maxval - minval) * 100
|
13661bb2b6b230fa212ddd3ceb96c5b362d52f19
| 13,814
|
def name(who):
"""Return the name of player WHO, for player numbered 0 or 1."""
if who == 0:
return 'Player 0'
elif who == 1:
return 'Player 1'
else:
return 'An unknown player'
|
a553b64c7a03760e974b5ddeac170105dd5b8edd
| 13,815
|
def mean(nums):
"""
Gets mean value of a list of numbers
:param nums: contains numbers to be averaged
:type nums: list
:return: average of nums
:rtype: float or int
"""
counter = 0
for i in nums:
counter += i
return counter / len(nums)
|
d3ea7af8792f4fdd503d5762b5c0e54765ce2d99
| 13,816
|
def process_data(dr): # 统一输入单词的长度,不足补0,此方法会增加训练时间
"""
:param dr: class data reader
:return: process X, Y
"""
return dr.XTrain, dr.XTest, dr.YTrain, dr.YTest # 这是为了统一一个表达方式,和之前的章节一样
|
cc05605d77e25ed7363da3de9407d1f51b7dbf4b
| 13,817
|
def threshold1(x, col, thresh):
""" 1-dimensional classification using a threshold function.
"""
return x[:, col] <= thresh
|
5814ab93fe41e2e92f4648bfee1f8033ff0864b6
| 13,818
|
import torch
def compute_rank(predictions, targets):
"""Compute the rank (between 1 and n) of of the true target in ordered predictions
Example:
>>> import torch
>>> compute_rank(torch.tensor([[.1, .7, 0., 0., .2, 0., 0.],
... [.1, .7, 0., 0., .2, 0., 0.],
... [.7, .2, .1, 0., 0., 0., 0.]]),
... torch.tensor([4, 1, 3]))
tensor([2, 1, 5])
Args:
predictions (torch.Tensor): [n_pred, n_node]
targets (torch.Tensor): [n_pred]
"""
n_pred = predictions.shape[0]
range_ = torch.arange(n_pred, device=predictions.device, dtype=torch.long)
proba_targets = predictions[range_, targets]
target_rank_upper = (proba_targets.unsqueeze(1) < predictions).long().sum(dim=1) + 1
target_rank_lower = (proba_targets.unsqueeze(1) <= predictions).long().sum(dim=1)
# break tighs evenly by taking the mean rank
target_rank = (target_rank_upper + target_rank_lower) / 2
return target_rank
|
0aed5b14ef9b0f318239e98aa02d0ee5ed9aa758
| 13,819
|
def udfize_lambda_string(code: str):
"""Return a string with the code as a function"""
return "lambda input: ({})".format(code)
# return "lambda input: (%s)" % code
|
c519c8ec408b00f033878b6083d4a922f32d2ac9
| 13,820
|
import json
def load_scalabel_frames( scalabel_frames_path ):
"""
Loads Scalabel frames from a file. Handles both raw sequences of Scalabel frames
as well as labels exported from Scalabel.ai's application.
Raises ValueError if the data read isn't of a known type.
Takes 1 argument:
scalabel_frames_path - Path to serialized Scalabel frames.
Returns 1 value:
scalabel_frames - A list of Scalabel frames.
"""
with open( scalabel_frames_path, "r" ) as scalabel_frames_fp:
scalabel_frames = json.load( scalabel_frames_fp )
# handle the case where we have exported labels from Scalabel.ai itself vs
# a list of frames.
if type( scalabel_frames ) == dict:
if "frames" in scalabel_frames:
return scalabel_frames["frames"]
elif type( scalabel_frames ) == list:
return scalabel_frames
raise ValueError( "Unknown structure read from '{:s}'.".format(
scalabel_frames_path ) )
|
7e5467d0f184dba1e3efc724391931ed4053a683
| 13,821
|
def ovs_version_str(host):
""" Retrieve OVS version and return it as a string """
mask_cmd = None
ovs_ver_cmd = "ovs-vsctl get Open_vSwitch . ovs_version"
with host.sudo():
if not host.exists("ovs-vsctl"):
raise Exception("Unable to find ovs-vsctl in PATH")
mask_cmd = host.run(ovs_ver_cmd)
if not mask_cmd or mask_cmd.failed or not mask_cmd.stdout:
raise Exception("Failed to get OVS version with command '{cmd}'"
.format(cmd=ovs_ver_cmd))
return mask_cmd.stdout.strip('"\n')
|
607ffbb2ab1099e86254a90d7ce36d4a9ae260ed
| 13,822
|
def maximum_displacement(initial_velocity):
"""Derived from s = 0.5 u + u t + 0.5 a t^2 at t = u and a = -1.
The (0.5 u) term comes from the order of rules of the motion of the
probe, the displacement changes before the velocities are
updated."""
return int(0.5 * initial_velocity * (1 + initial_velocity))
|
c4b71ad83d787cae9ad9abe7d044b0e2d66b9cc0
| 13,823
|
from typing import List
def csv_rows(s: str) -> List[List[str]]:
"""Returns a list of list of strings from comma-separated rows"""
return [row.split(',') for row in s.split('\n')]
|
7a6ea8c0f69801cfb1c0369c238e050502813b63
| 13,824
|
def GetMatchURL(soup):
"""Gets the ongoing match URL"""
live_match_url = soup.find("a", attrs={"class": "match-status-3"})["href"]
return live_match_url
|
bae3658c46976c7d43234e063d78eeea027d3095
| 13,825
|
import calendar
def create_disjunctive_constraints(solver, flat_vars):
"""
Create constrains that forbids multiple events from taking place at the same time.
Returns a list of `SequenceVar`s, one for each day. These are then used in the first
phase of the solver.
"""
events_for_day = [[] for _ in range(5)]
for v in flat_vars:
events_for_day[v.day].append(v)
sequences_for_day = []
for day_num, day in enumerate(events_for_day):
if not day:
# For empty arrays, OR-tools complains:
# "operations_research::Solver::MakeMax() was called with an empty list of variables."
continue
disj = solver.DisjunctiveConstraint(day, calendar.day_abbr[day_num])
solver.Add(disj)
sequences_for_day.append(disj.SequenceVar())
return sequences_for_day
|
f7f8592ac00c8cac9808bb80d425ff1c1cf10b9e
| 13,827
|
import os
import ntpath
def get_file_info(file_path : str) -> dict:
"""it takes a file path and returns information about the file like:
'directory': the directory path where the file resides
'base_file_name': the file name without extentation example.zip -> example
'flag_name' : 'base_file_name' + .flg
'flag_path' : 'directory' + 'flag_name'
Args:
file_path (str): path to a file
Returns:
dict: file info in dictoinary format
"""
directory = os.path.dirname(file_path)
file_name = ntpath.basename(file_path)
base_file_name, extention = os.path.splitext(file_name)
return {'directory':directory,
'base_file_name':base_file_name,
'file_name':file_name,
'extention':extention}
|
e46992217ee3869fae117a530fd020d644d9b1c4
| 13,828
|
def get_body(request):
"""
Return the body of a concrete publication.
"""
context = request['context']
entities = request['entities']
feed = context['feed']
ordinal = entities['ordinal'][0]['value']
body = feed[ordinal]['summary_detail']['value']
context['body'] = body
return context
|
53080b1727f0230a30b9fc4435d455a06e927be3
| 13,829
|
def get_default_snapshot_id(project):
""" Generate a snapshot id from data on the given project.
:param project: project object
:return: snapshot id
"""
def snapshot_id_from_branch_name(name):
return "{}:HEAD".format(name.replace('/', ':'))
default_branch = project.git.get_default_branch()
if default_branch is None:
return None, None
snapshot_id = snapshot_id_from_branch_name(default_branch)
return snapshot_id, default_branch
|
2c542678036541372fa675e61b4f5a969cd5ab0f
| 13,830
|
import random
def filter_shuffle(seq):
"""
Basic shuffle filter
:param seq: list to be shuffled
:return: shuffled list
"""
try:
result = list(seq)
random.shuffle(result)
return result
except:
return seq
|
3f2dce2133ba32d8c24d038afaecfa14d37cbd4e
| 13,831
|
def get_package_list_from_file(path):
"""
Create a list of packages to install from a provided .txt file
Parameters
__________
path: Filepath to the text file (.txt) containing the list of packages to install.
Returns
______
List of filepaths to packages to install.
Notes
_____
.txt file should provide the full filepath to packages to install and be newline (\n) delimited.
"""
# Verify we have a text file
if not path.endswith('.txt'):
raise RuntimeError("Package List must be a newline(\n) delimited text file.")
# read lines of the file and strip whitespace
with open(path, 'r') as f:
pkg_list = [line.rstrip().rstrip("/") for line in f]
# Verify that we are not given an empty list
if not pkg_list:
raise RuntimeError("No packages found to be installed. "
"Please provide a file with a minimum of 1 package location.")
return pkg_list
|
91ef3e634e98afd116d2be9c803620f672acd950
| 13,832
|
def parse_jcamp_line(line,f):
"""
Parse a single JCAMP-DX line
Extract the Bruker parameter name and value from a line from a JCAMP-DX
file. This may entail reading additional lines from the fileobj f if the
parameter value extends over multiple lines.
"""
# extract key= text from line
key = line[3:line.index("=")]
text = line[line.index("=")+1:].lstrip()
if "<" in text: # string
while ">" not in text: # grab additional text until ">" in string
text = text+"\n"+f.readline().rstrip()
value = text.replace("<","").replace(">","")
elif "(" in text: # array
num = int(line[line.index("..")+2:line.index(")")])+1
value = []
rline = line[line.index(")")+1:]
# extract value from remainer of line
for t in rline.split():
if "." in t or "e" in t:
value.append(float(t))
else:
value.append(int(t))
# parse additional lines as necessary
while len(value) < num:
nline = f.readline().rstrip()
for t in nline.split():
if "." in t or "e" in t:
value.append(float(t))
else:
value.append(int(t))
elif text == "yes":
value = True
elif text == "no":
value = False
else: # simple value
if "." in text or "e" in text:
value = float(text)
else:
value = int(text)
return key,value
|
84061c3f4bc42a62e308d5f93877e5c55d85efc1
| 13,833
|
def unwrapText(text):
"""Unwrap text to display in message boxes. This just removes all
newlines. If you want to insert newlines, use \\r."""
# Removes newlines
text = text.replace("\n", "")
# Remove double/triple/etc spaces
text = text.lstrip()
for i in range(10):
text = text.replace(" ", " ")
# Convert \\r newlines
text = text.replace("\r", "\n")
# Remove spaces after newlines
text = text.replace("\n ", "\n")
return text
|
f37aafa3a003b3b06b8e9e1a03c833d05c5c0deb
| 13,834
|
import argparse
def get_argparser():
"""Manage user parameters"""
parser = argparse.ArgumentParser()
parser.add_argument(
help="evaluation file generated by evaluate_stacks_results.py",
dest="input",
)
parser.add_argument(
help="yaml file with expected values",
dest="expected",
)
return parser
|
59816f23ae0ae834a78363ccac870484b4c154dc
| 13,835
|
def getSubNode(prgNode, NodeName):
""" Find Sub-Node in Programm Node
Arguments:
prgNode {ua PrgNode} -- Programm node to scan
NodeName {[type]} -- Name of Sub-Node to find
Returns:
ua Node -- Sub-Node
"""
for child in prgNode.get_children():
if child.get_display_name().Text == NodeName:
return child
|
2da431eff566d7e2c76d4ca4646e15f762c00d4d
| 13,837
|
import subprocess
def git_diff_pre_commit(fname):
"""Git diff for a pre-commit hook."""
diff = subprocess.check_output(["git",
"diff",
"--cached", fname])
return diff.splitlines()
|
f067b9bdcfdc0010f3840751722e1c5308a62592
| 13,839
|
def stations_by_river(stations):
"""This function returns a Python dict (dictionary) that maps river
names (the key) to a list of station objects on a given river."""
y = {}
for n in stations:
if n.river not in y:
y[n.river] = [n.name]
else: y[n.river].append(n.name)
return y
|
1e1023cdad87a3fdd5921d08448a4a2e9ceb311c
| 13,840
|
def doTest(n):
"""Runs a test. returns score."""
score = 0
l = list(range(1,16))
for i in l:
if input("what is {} to the power of 3? ".format(i)) == str(i**3):
score += 1
print("Correct.")
else:
print("Wrong, the correct answer is {}".format(i**3))
return score
|
83f32bec718e7459218b8863e229d5ecbd479d2c
| 13,841
|
import numpy
def best_dice(l_a, l_b):
"""
Best Dice function
:param l_a: list of binary instances masks
:param l_b: list of binary instances masks
:return: best dice estimation
"""
result = 0
for a in l_a:
best_iter = 0
for b in l_b:
inter = 2 * float(numpy.sum(a * b)) / float(numpy.sum(a) + numpy.sum(b))
if inter > best_iter:
best_iter = inter
result += best_iter
if 0 == len(l_a):
return 0
return result / len(l_a)
|
d00c668fe8c97fa2a78a62ac94492c665dc4a311
| 13,843
|
def preorder(root):
"""Preorder depth-first traverse a binary tree."""
ans = []
stack = [root]
while stack:
node = stack.pop()
if node:
ans.append(node.val)
stack.extend([node.right, node.left])
return ans
|
e322df77a973f30b0745b36540a0f66b2ce29e6d
| 13,844
|
import subprocess
def resolve_name(name):
"""Return the v4/v6 IP addresses for a given name, using local resolver"""
args = ['dig', '+short', name, 'A', name, 'AAAA']
return subprocess.check_output(args).decode().splitlines()
|
2f63d2949bf6e66afa979b00fcb0c1b5a6ea2aed
| 13,846
|
def joinfields(words, sep = ' '):
"""joinfields(list [,sep]) -> string
Return a string composed of the words in list, with
intervening occurences of sep. The default separator is a
single space.
(joinfields and join are synonymous)
"""
res = ''
for w in words:
res = res + (sep + w)
return res[len(sep):]
|
0941a43811c992417a24ea59c77b4172117d3510
| 13,847
|
import socket
import fcntl
import struct
def get_ip_address(ifname):
"""
Get the ip address from the specified interface.
>>> get_ip_address('eth0')
'192.168.0.7'
@type ifname: string
@param ifname: The interface name. Typical names are C{'eth0'},
C{'wlan0'}, etc.
@rtype: string
@return: The IP address of the specified interface.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15]))[20:24])
|
731575a6b642884d7aa74a950ed109fca1e2086a
| 13,849
|
from typing import Counter
def percentile(data: list, p=0.5):
"""
:param data: origin list
:param p: frequency percentile
:return: the element at frequency percentile p
"""
assert 0 < p < 1
boundary = len(data) * p
counter = sorted(Counter(data).items(), key=lambda x: x[0])
keys, counts = zip(*counter)
accumulation = 0
for i, c in enumerate(counts):
accumulation += c
if accumulation > boundary:
return keys[i]
return None
|
ac0a3a4705579c1b6a5165b91e6dcad65afcd1f4
| 13,851
|
def _get_path_from_parent(self, parent):
"""
Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model.
"""
if hasattr(self, 'get_path_from_parent'):
return self.get_path_from_parent(parent)
if self.model is parent:
return []
model = self.concrete_model
# Get a reversed base chain including both the current and parent
# models.
chain = model._meta.get_base_chain(parent) or []
chain.reverse()
chain.append(model)
# Construct a list of the PathInfos between models in chain.
path = []
for i, ancestor in enumerate(chain[:-1]):
child = chain[i + 1]
link = child._meta.get_ancestor_link(ancestor)
path.extend(link.get_reverse_path_info())
return path
|
8f213fcbe3612790d4922d53e0e2a4465b098fe6
| 13,852
|
def strdigit_normalize(digit):
"""Normalizes input to format '0x'. Example: '9' -> '09'"""
assert type(digit) is str, 'Invalid input. Must be a string.'
s = int(digit)
assert s >= 0, 'Invalid input. Must be string representing a positive number.'
if s < 10:
return '0' + str(s)
return digit
|
41b119b4b8b19f978bf4445fc81273f7e62af59a
| 13,853
|
import requests
def request_url(url):
"""
Function to send https or http request to this url and return code result.
Parameters
----------
url : string
This variable contain url must be checked
Returns
-------
status_code : int
Code result
"""
try:
url = "https://" + format(url)
response = requests.head(url, allow_redirects=True, timeout=10)
except:
try:
url = "http://" + format(url)
response = requests.head(url, allow_redirects=True, timeout=10)
except:
return "404"
# print("Request failed")
if response.status_code:
return response.status_code
else:
return "404"
|
884d20d33f02110b31e47a29077cb4e093e93536
| 13,854
|
import os
def get_geant_installation():
"""
By checking low energy data ftom G4 installation,
return path to the G4 setup, typically in /opt/Geant/vXXXX
:returns: formatted string
:rtype: str
"""
try:
g4ledata = os.environ["G4LEDATA"]
except KeyError:
raise RuntimeError("No G4LEDATA found, aborting")
path = g4ledata
while True:
path, tail = os.path.split(path)
if tail == "share":
break
if tail == "":
break
if path == "":
raise RuntimeError("No proper G4 installation found")
return path
|
b3d64d41c519c8d8538348024dc442a6d2cdfa12
| 13,855
|
def pad(array, n):
"""
ISO/IEC 9797-1 pad array to blocks of size n
"""
pads = len(array) % n
return array + [0]*(n - (len(array)%n))
|
cc128428a8c446c1437b5b87d563d67bb2d11f48
| 13,858
|
import hashlib
def hash_file(pathname):
"""Returns a byte string that is the SHA-256 hash of the file at the given pathname."""
h = hashlib.sha256()
with open(pathname, 'rb') as ifile:
h.update(ifile.read())
return h.digest()
|
bdd82aa57abacee91a4631d401af35f0274eb804
| 13,859
|
import importlib
def convert(string, convert_type):
"""Execute convert.
This module call convert module selected from choices.
"""
module = importlib.import_module("converter." + convert_type)
return module.convert(string)
|
b0d89172b813df3f009b904339fe6af672917419
| 13,860
|
def get_resized_size(org_h, org_w, long_size=513):
"""get_resized_size"""
if org_h > org_w:
new_h = long_size
new_w = int(1.0 * long_size * org_w / org_h)
else:
new_w = long_size
new_h = int(1.0 * long_size * org_h / org_w)
return new_h, new_w
|
699b91a1fbdb1e5228872d3ea5812dcc29cfbaa5
| 13,863
|
import random
def is_prime(number, test_count):
"""
Uses the Miller-Rabin test for primality to determine, through TEST_COUNT
tests, whether or not NUMBER is prime.
"""
if number == 2 or number == 3:
return True
if number <= 1 or number % 2 == 0:
return False
d = 0
r = number - 1
while r % 2 == 1:
d += 1
r //= 2
for _1 in range(test_count):
a = random.randrange(2, number - 1)
x = pow(a, r, number)
if x != 1 and x != number - 1:
for _2 in range(d):
x = (x ** 2) % number
if x == 1:
return False
if x == number - 1:
break
if x != number - 1:
return False
return True
|
49b0149dad5f053bbf813845a10267766784c775
| 13,864
|
def filter_entry(line, min_obs, min_indiv):
"""
Check the samples and filter
returns a bit flag with
0 - pass
1 - too-few observations
2 - too-few individuals
"""
data = line.strip().split('\t')
fmt = data[8].split(':')
dp_idx = fmt.index("PAD")
obs = 0
n_indivs = 0
for i in data[9:]:
if i == ".":
continue
n_indivs += 1
#This 1 indexing is a problem...
obs += int(i.split(':')[dp_idx].split(',')[1])
ret = 0
if obs < min_obs:
ret += 1
if n_indivs < min_indiv:
ret += 2
return ret
|
5e8e98b256c8f07aa106dbf7a33df21a797d4bab
| 13,868
|
import math
def data_to_sorted_xy(data, logx):
"""
Return a list of (x, y) pairs with distinct x values and sorted by x value.
Enter: data: a list of (x, y) or [x, y] values.
logx: True to return (log10(x), y) for each entry.
Exit: data: the sorted list with unique x values.
"""
if not logx:
if (len(data) <= 1 or (
data[0][0] < data[1][0] and (len(data) <= 2 or (
data[1][0] < data[2][0] and (len(data) <= 3 or (
data[2][0] < data[3][0] and len(data) == 4)))))):
return data
return sorted(dict(data).items())
return sorted({math.log10(x): y for x, y in data}.items())
|
7e84a3f684dc9a82bf5fd48256e5f5c18a5eedb6
| 13,870
|
def is_catalog_record_owner(catalog_record, user_id):
"""
Does user_id own catalog_record.
:param catalog_record:
:param user_id:
:return:
"""
if user_id and catalog_record.get('metadata_provider_user') == user_id:
return True
return False
|
bb5e649b4cfd38ee17f3ab83199b4736b374d312
| 13,871
|
import requests
def get_transmem_from_uniprot(uniprot_id):
"""
Retains transmembrane regions from Uniprot (first and last residue each).
This function requires internet access.
Parameters
----------
uniprot_id : str
The UNIPROT ID of the protein.
Returns
-------
tm : list
List of all transmembrane regions, represented as tuples with first and last residue ID.
"""
url = 'https://www.uniprot.org/uniprot/'+uniprot_id+'.txt'
r = requests.get(url, allow_redirects=True)
c = r.content
tm = []
for line in c.splitlines():
if line.startswith(b'FT TRANSMEM'):
l = str(line)
l = l.replace('b\'FT TRANSMEM ','')
l = l.replace('\'','')
s = l.split('.')
tm.append((int(s[0]),int(s[-1])))
for tmi in tm: print(*tmi)
return tm
|
3fb8b5061d3ce051d108784a0b9e8d83c875feb3
| 13,872
|
def groupfinder(userid, request):
"""
find the user attached to the current user
"""
# result = None
# user = request.user
# if user is not None :
# result = GROUPS.get(user.email,['group:editors'])
# whatever every body is an editor
return ['group:editors']
|
f23bab45b4170a2897bbd34a1068564b53e10a67
| 13,873
|
import random
def init(size):
"""Creates a randomly ordered dataset."""
# use system time as seed
random.seed(None)
# set random order as accessor
order = [a for a in range(0, size)]
random.shuffle(order)
# init array with random data
data = [random.random() for a in order]
return (order, data)
|
975ab66f4e759973d55a0c519609b6df7086d747
| 13,874
|
def format_permissions(permission_bound_field):
"""
Given a bound field with a queryset of Permission objects, constructs a
list of dictionaries for 'objects':
'objects': [
{
'object': name_of_some_content_object,
'add': (add_permission_for_object, checked_str)
'change': (change_permission_for_object, checked_str)
'delete': (delete_permission_for_object, checked_str)
},
]
and a list of other permissions:
'others': [
(any_non_add_change_delete_permission, checked_str),
]
and returns a table template formatted with this list.
"""
permissions = permission_bound_field.field._queryset
# get a distinct list of the content types that these permissions relate to
content_type_ids = set(permissions.values_list('content_type_id', flat=True))
initial = permission_bound_field.form.initial.get('permissions', [])
object_perms = []
other_perms = []
for content_type_id in content_type_ids:
content_perms = permissions.filter(content_type_id=content_type_id)
content_perms_dict = {}
for perm in content_perms:
checked = 'checked="checked"' if perm.id in initial else ''
# identify the three main categories of permission, and assign to
# the relevant dict key, else bung in the 'other_perms' list
if perm.codename.split('_')[0] in ['add', 'change', 'delete']:
content_perms_dict['object'] = perm.content_type.name
content_perms_dict[perm.codename.split('_')[0]] = (perm, checked)
else:
other_perms.append((perm, checked))
if content_perms_dict:
object_perms.append(content_perms_dict)
return {
'object_perms': object_perms,
'other_perms': other_perms,
}
|
6b4b6f3488af1c5b47d8ae23e0e87d53ac1a925f
| 13,875
|
def Unzip(iterable, container=None):
"""
iterable >> Unzip(container=None)
Same as izip(*iterable) but returns iterators for container=None
>>> [(1, 2, 3), (4, 5, 6)] >> Unzip(tuple) >> Collect()
[(1, 4), (2, 5), (3, 6)]
:param iterable iterable: Any iterable, e.g. list, range, ...
:param container container: If not none, unzipped results are collected
in the provided container, eg. list, tuple, set
:return: Unzip iterable.
:rtype: iterator over iterators
"""
unzipped = zip(*iterable)
return map(container, unzipped) if container else unzipped
|
86a05b353f5fa27aa17282c79e0441166c856f17
| 13,876
|
def v(n):
"""compute u value of the lhs matrix"""
return ((n ** 2) * (n + 1)) / 2
|
897f905b04812711841f06599565343d65af013e
| 13,877
|
def greedy_set_cover(universe, subsets, costs):
"""Approximate greedy algorithm for set-covering. Can be used on large
inputs - though not an optimal solution.
Args:
universe (list): Universe of elements
subsets (dict): Subsets of U {S1:elements,S2:elements}
costs (dict): Costs of each subset in S - {S1:cost, S2:cost...}
##-------------------------------------------------------------------
"""
elements = set(e for s in subsets.keys() for e in subsets[s])
# elements don't cover universe -> invalid input for set cover
if elements != universe:
return None
# track elements of universe covered
covered = set()
cover_sets = []
while covered != universe:
min_cost_elem_ratio = float("inf")
min_set = None
# find set with minimum cost:elements_added ratio
for s, elements in subsets.items():
new_elements = len(elements - covered)
# set may have same elements as already covered -> new_elements = 0
# check to avoid division by 0 error
if new_elements != 0:
cost_elem_ratio = costs[s] / new_elements
if cost_elem_ratio < min_cost_elem_ratio:
min_cost_elem_ratio = cost_elem_ratio
min_set = s
cover_sets.append(min_set)
# union
covered |= subsets[min_set]
return cover_sets
|
aef8aa0b345d5ba01db841bdfb6485661d245d5d
| 13,882
|
def bounding_rect(mask, pad=0):
"""Returns (r, b, l, r) boundaries so that all nonzero pixels in mask
have locations (i, j) with t <= i < b, and l <= j < r."""
nz = mask.nonzero()
if len(nz[0]) == 0:
# print('no pixels')
return (0, mask.shape[0], 0, mask.shape[1])
(t, b), (l, r) = [(max(0, p.min() - pad), min(s, p.max() + 1 + pad))
for p, s in zip(nz, mask.shape)]
return (t, b, l, r)
|
850db378abb0a8e1675e0937b66dfb4061ced50b
| 13,885
|
from os.path import isfile
def isFile(path: str):
""" Check whether path points to a file """
return isfile(path)
|
44808693af570ebe18526830c96276522e5ad4ff
| 13,886
|
import csv
def get_return_fields(filepath):
"""Extract the returnable fields for results from the file with
description of filters in ENA as a dictionary with the key being the field
id and the value a list of returnable fields
filepath: path with csv with filter description
"""
returnable_fields = {}
with open(filepath, "r") as f:
reader = csv.DictReader(f, delimiter=';')
for row in reader:
returnable_fields.setdefault(
row["Result"],
row["Returnable fields"].split(", "))
return returnable_fields
|
d70efe68de8cbd100b66cee58baf6ca542cb81a8
| 13,887
|
def generic_sum(algorithm):
"""compute digest based on algorithm passed in parameter
:param algorithm: hashlib algorithm to use
:return: digest returned by the algorithm
"""
def hash_sum(fileobj):
"""compute digest by chunks multiple of the block_size
:param fileobj: Open file(-like) object (BytesIO buffer)
Note that fileobj will be returned after a seek(0)
"""
fileobj.seek(0)
handle = algorithm()
for chunk in iter(lambda: fileobj.read(4096 * handle.block_size), b''):
handle.update(chunk)
fileobj.seek(0)
return handle.hexdigest()
return hash_sum
|
5f3f3e00b9a1878d416491a06453cfeed4e0884c
| 13,888
|
from typing import Any
def encode_voltage(value: Any) -> bytes:
"""Encode voltage value to raw (2 bytes) payload"""
return int.to_bytes(int(value * 10), length=2, byteorder="big", signed=True)
|
f970783eb7d84b61d9db8c230a6a5622b571635d
| 13,890
|
def merge_dict(x,y):
"""
将字典x合并入字典y
"""
for k,v in x.items():
if k in y.keys():
y[k]=str(y[k])+";"+str(v)
else:
y[k] = v
return y
|
2251af1fe611ba7c2bf9978094ad0f649bd4d4d7
| 13,891
|
import random
def roll_damage(dice_stacks, modifiers, critical=False):
"""
:param dice_stacks: Stacks of Dice to apply
:param modifiers: Total of modifiers affecting the roll
:param critical: If is a critical damage roll
:return: Total damage to apply.
"""
if critical:
for dice_stack in dice_stacks:
dice_stack.amount *= 2
total_dice_result = 0
for dice_stack in dice_stacks:
for i in range(0, dice_stack.amount):
total_dice_result += random.randint(1, dice_stack.dice.sides)
return total_dice_result + modifiers
|
2627f1de0fe0754a4bfc802378ea1950b2b078a2
| 13,892
|
def get_audio_bitrate(bitrate):
"""
Get audio bitrate from bits to human easy readable format in kbits.
Arguments:
:param bitrate: integer -- audio bitrate in bits per seconds
:return: string
"""
return "%s kbps" %(bitrate/1000)
|
847d74e08e8f75b24be1fc144fb3896f5e141daf
| 13,893
|
def _mesh_to_mat_cards(mesh, divs, frac_type):
"""Prepares the material cards for mesh_to_geom."""
mat_cards = ""
idx = mesh.iter_structured_idx('xyz')
for i in idx:
mesh.mats[i].metadata['mat_number'] = int(i + 1)
mat_cards += mesh.mats[i].mcnp(frac_type=frac_type)
return mat_cards
|
2d3cbd8d4a371244938ce1b9aa1e2532801e705d
| 13,894
|
def output_prefix(prefix):
"""Decorate a function to add a prefix on its output.
:param str prefix: the prefix to add (must include trailing whitespace if
desired; Sopel does not assume it should add anything)
Prefix will be added to text sent through:
* :meth:`bot.say <sopel.bot.SopelWrapper.say>`
* :meth:`bot.notice <sopel.bot.SopelWrapper.notice>`
"""
def add_attribute(function):
function.output_prefix = prefix
return function
return add_attribute
|
8f7e333b3fd40725b3d9681cfdf432d01cb8d24e
| 13,896
|
import collections
def load_manifest(manifest_path):
"""Extract sample information from a manifest file.
"""
# pylint: disable=I0011,C0103
Sample = collections.namedtuple("Sample", "id status path")
samples = []
with open(manifest_path, "r") as manifest_file:
for line in manifest_file:
sample_id, status, path = line.split()
if status not in ["case", "control"]:
message = (
'Sample status must be either "case" or "control";'
' instead got "{}"'
)
raise Exception(message.format(status))
sample = Sample(id=sample_id, status=status, path=path)
samples.append(sample)
return samples
|
21e15fa8de75d963f8d35c4b5f939d3fcee45c99
| 13,898
|
def _get_ascii_token(token):
"""Removes non-ASCII characters in the token."""
chars = []
for char in token:
# Try to encode the character with ASCII encoding. If there is an encoding
# error, it's not an ASCII character and can be skipped.
try:
char.encode('ascii')
except UnicodeEncodeError:
continue
chars.append(char)
return ''.join(chars)
|
e77f9fe9d59cbcee8cd1893e56a988b70a15542a
| 13,899
|
import json
def list_response():
"""JSON:API list pytest fixture."""
return json.load(open('tests/responses/example_list.json'))
|
f4e815f452ae3e0eb806bbd816c429ad0a63c413
| 13,900
|
def _get_all_osc(centers, osc_low, osc_high):
"""Returns all the oscillations in a specified frequency band.
Parameters
----------
centers : 1d array
Vector of oscillation centers.
osc_low : int
Lower bound for frequency range.
osc_high : int
Upper bound for frequency range.
Returns
-------
osc_cens : 1d array
Osc centers in specified frequency band.
"""
# Get inds of desired oscs and pull out from input data
osc_inds = (centers >= osc_low) & (centers <= osc_high)
osc_cens = centers[osc_inds]
return osc_cens
|
9199283080bd0111d8ca3cb74f4c0865de162027
| 13,903
|
def keras_decay(step, decay=0.0001):
"""Learning rate decay in Keras-style"""
return 1.0 / (1.0 + decay * step)
|
b922e999ff678127535988eaf04b35e2be2a9117
| 13,904
|
def PublicAPI(obj):
"""Annotation for documenting public APIs.
Public APIs are classes and methods exposed to end users of RLlib. You
can expect these APIs to remain stable across RLlib releases.
Subclasses that inherit from a ``@PublicAPI`` base class can be
assumed part of the RLlib public API as well (e.g., all trainer classes
are in public API because Trainer is ``@PublicAPI``).
In addition, you can assume all trainer configurations are part of their
public API as well.
"""
return obj
|
62f5567fe67e6a7048ae9c6fc11ae58bcec49fb5
| 13,905
|
def get_dtd_menu_texts(has_dtd):
"""determine text to update dtd menu with
"""
# how to use in gui.<xx>.py:
# def adjust_dtd_nemu(self):
# label, helpstr = shared.get_dtd_menu_texts(self.dtdmenu.has_dtd)
textdict = {'label': '{} &DTD', 'help': '{} the Document Type Declaration'}
replacements = {True: 'Remove', False: 'Add'}
return [textdict[key].format(replacements[has_dtd]) for key in ('label', 'help')]
|
359a91ec3b0bfe5cc28f1b3cf0ec44e9f34bd210
| 13,907
|
from typing import OrderedDict
def load_section(cp, section, ordered=True):
"""
Returns a dict of the key/value pairs in a specified section of a configparser instance.
:param cp: the configparser instance.
:param section: the name of the INI section.
:param ordered: if True, will return a <collections.OrderedDictionary>; else a <dict>.
:param kwargs: passed through to the load_config_file() function.
:return: a dict containing the specified section's keys and values.
"""
items = cp.items(section=section)
if bool(ordered):
return OrderedDict(items)
else:
return dict(items)
|
9b819efb75082138eb9e13405ac256908112c744
| 13,908
|
def match_task(question: str, keywords: list):
"""Match question words with the keywords.
Return True and the matched word if at least one word is matched.
"""
for word in question.split(" "):
for kw in keywords:
if word == kw:
return word
return ""
|
52b5f3188cd4d3faffcebf66572c3d3a40a43de8
| 13,909
|
import json
def open_json(path):
"""Open the db from JSON file previously saved at given path."""
fd = open(path, 'r')
return json.load(fd)
|
c0c0c4857b4582091a145a71767bc0168808593a
| 13,911
|
import torch
def prep_fft_channel(x):
""" Rotates complex image dimension from channel to last position. """
x = torch.reshape(x, x.shape[:-3] + (x.shape[-3] // 2, 2) + x.shape[-2:])
return x.permute(*range(x.ndim - 3), -2, -1, -3)
|
ad6be1660239a87b60cf6be838caf37988ef19ca
| 13,912
|
def consolidate_events(ev_list):
"""
Consolidates the busy times of multiple calenders so there is no overlap in busy events
"""
consolidated = []
for i in range(len(ev_list)-1):
ev = ev_list[i]
next = ev_list[i+1]
#If the next busy event starts before the current event ends and ends before the current event does.
if (ev['end'] > next['start'] and ev['end'] > next['end']):
consolidated.append({'start': ev['start'], 'end': ev['end']})
ev_list[i+1]['end'] = ev['end']
#If the next busy event starts before the event ends
elif ev['end'] > next['start']:
consolidated.append({'start': ev['start'], 'end': next['start']})
else:
consolidated.append({'start': ev['start'], 'end': ev['end']})
#Adds the final event
consolidated.append(ev_list[len(ev_list)-1])
return consolidated
|
612015c2200eb29c3f2303807e449da465354927
| 13,913
|
def convert_resolv_conf(nameservers, searchdomains):
"""Returns a string formatted for resolv.conf."""
result = []
if nameservers:
nslist = "DNS="
for ns in nameservers:
nslist = nslist + '%s ' % ns
nslist = nslist + '\n'
result.append(str(nslist))
if searchdomains:
sdlist = "Domains="
for sd in searchdomains:
sdlist = sdlist + '%s ' % sd
sdlist = sdlist + '\n'
result.append(str(sdlist))
return result
|
47175fb2dddac151a94b99b1c51942a3e5ca66a1
| 13,915
|
def password_okay_by_char_count(pwd_row):
"""
Process list of rows from a file, where each row contains pwd policy and pwd.
Pwd is only valid if the indicated character is found between x and y times (inclusive) in the pwd.
E.g. 5-7 z: qhcgzzz
This pwd is invalid, since z is only found 3 times, but minimum is 5.
"""
# Each input row looks like "5-7 z: qhcgzzz"
# Convert each row to a list that looks like ['5-7 z', 'qhcgzzz']
pwd_policy_and_pwd = [item.strip() for item in pwd_row.split(":")]
#print(pwd_policy_and_pwd)
pwd = pwd_policy_and_pwd[1]
char_counts, _, char_match = pwd_policy_and_pwd[0].partition(" ")
min_chars, _, max_chars = char_counts.partition("-")
actual_char_count = pwd.count(char_match)
if (actual_char_count < int(min_chars)) or (actual_char_count > int(max_chars)):
return False
return True
|
4571d1d6e47aef1c31257365cd0db4240db93d6c
| 13,917
|
import itertools
def tee():
"""Duplicate a sequence by making independent iterators."""
duplicates = itertools.tee('ABC', 2)
duplicates = [''.join(copy) for copy in duplicates]
return duplicates
|
33ce3884589c14b789243eb50d74b1f19e942356
| 13,918
|
def _getSubjectivityFromScore( polarity_score ):
"""
Accepts the subjectivity score and returns the label
0.00 to 0.10 - Very Objective
0.10 to 0.45 - Objective
0.45 to 0.55 - Neutral
0.55 to 0.90 - Subjective
0.90 to 1.00 - Very Subjective
"""
status = "unknown"
if ( 0.00 <= polarity_score <= 0.10 ):
return "Very Objective"
elif( 0.10 < polarity_score < 0.45 ):
return "Objective"
elif( 0.45 <= polarity_score <= 0.55 ):
return "Neutral"
elif( 0.55 < polarity_score < 0.90 ):
return "Subjective"
elif( 0.90 <= polarity_score <= 1.00 ):
return "Very Subjective"
return status
|
16e126032fea92d0eac2d4e6b35c9b6666196ad1
| 13,919
|
import base64
def extract_basic_auth(auth_header):
"""
extract username and password from a basic auth header
:param auth_header: content of the Authorization HTTP header
:return: username and password extracted from the header
"""
parts = auth_header.split(" ")
if parts[0] != "Basic" or len(parts) < 2:
return None, None
auth_parts = base64.b64decode(parts[1]).split(b":")
if len(auth_parts) < 2:
return None, None
return auth_parts[0].decode(), auth_parts[1].decode()
|
8f3830bc78b0e9fb6182f130e38acea4bd189c86
| 13,920
|
def xor(b1, b2):
"""Expects two bytes objects of equal length, returns their XOR"""
assert len(b1) == len(b2)
return bytes([x ^ y for x, y in zip(b1, b2)])
|
3376df85b52cea276417e29ae81c80208dc28b86
| 13,921
|
def setOfWordsToVecTor(vocabularyList, smsWords):
"""
SMS内容匹配预料库,标记预料库的词汇出现的次数
:param vocabularyList:
:param smsWords:
:return:
"""
vocabMarked = [0] * len(vocabularyList)
for smsWord in smsWords:
if smsWord in vocabularyList:
vocabMarked[vocabularyList.index(smsWord)] += 1
return vocabMarked
|
f14d8fed8a59deb44fc14161de8ae1eaf3ee8b00
| 13,922
|
import re
def is_valid_file(filename):
"""
Validates file is of type obj
:param filename: str
"""
return re.match(r'^.+\.?(obj|asm)$', filename) is not None, filename[-3:]
|
6f0032693f0097f85ec09e802a2ab870095a232e
| 13,923
|
import re
def clean_description(text):
"""The original datasource introduces some common, incorrect encodings.
Fix them here"""
text = text.replace("\u00c2\u00bf", "'")
text = re.sub(r"<\s*br\s*/?>", "\n", text, flags=re.IGNORECASE)
text = re.sub(r"</\s*br>", "", text, flags=re.IGNORECASE)
return text
|
6778c32058137c7af1d99bf52baaa0889a559e95
| 13,924
|
def LinearTransformation(translation, rotation, x, y):
"""
Apply linear transformations to a set of positions in 2 dimensions.
INPUTS
translation : translation vector
rotation : rotation matrix
x : x-coordinates
y : y-coordinates
OUTPUTS
x : transformed x coordinates
y : transformed y coordinates
"""
try: unit, x, y = x.unit, x.value, y.value
except: unit = 1
x, y = (translation + rotation.dot([x,y]).T*unit).T
return x, y
|
e674f215c46ecc6f46da0c1e7074210583f59088
| 13,925
|
def get_safe(dic, *keys):
"""
Safely traverse through dictionary chains
:param dict dic:
:param str keys:
:return:
"""
no_d = dict()
for key in keys:
dic = dic.get(key, no_d)
if dic is no_d:
return None
return dic
|
17f810ba995314088f93152179e988df436adb47
| 13,926
|
import random
import string
def random_string(length=1, unicode=False):
"""
Returns random ascii or unicode string.
"""
if unicode:
def random_fun():
return chr(random.choice((0x300, 0x2000)) + random.randint(0, 0xff))
else:
def random_fun():
return random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(random_fun() for _ in range(length))
|
45760b3e3c42e484d51abf76f9eb2ae0c8132fd1
| 13,928
|
import os
import subprocess
def is_mysql_running():
"""
Returns True if mysql is running, False otherwise.
"""
# We need to check whether or not mysql is running as a process
# even if it is not daemonized.
with open(os.devnull, 'w') as os_devnull: # lint-amnesty, pylint: disable=bad-option-value
#pgrep returns the PID, which we send to /dev/null
returncode = subprocess.call("pgrep mysqld", stdout=os_devnull, shell=True)
return returncode == 0
|
56aa092863f9de700952c423901326660bd29383
| 13,929
|
import math
def probability(ec, en, t):
"""
Probability function
:param ec: current energy
:param en: next energy
:param t: temperature ratio
:return: probability value
"""
return math.exp((ec - en) / t)
|
c055081cd93473ecf4abeab1c8b5cc36fb38f0a4
| 13,930
|
def getGamesListUrl(user_name, page, block_size=100):
"""Returns lichess url for downloading list of games"""
URL_TEMPLATE = "https://en.lichess.org/api/user/{}/games?page={}&nb={}"
return URL_TEMPLATE.format(user_name, page, block_size)
|
314bc8771628944dde1e52299697362482afd9b1
| 13,932
|
def add_ext(name, ext, seq=-1):
"""add_ext constructs file names with a name, sequence number, and
extension.
Args:
name (string) - the filename
ext (string) - the file extension
seq (int) - the number in a sequence with other files
(Default -1) means that it is a standalone
file
"""
#add period if not
if '.' not in ext[0]:
ext = '.' + ext
if seq != -1:
ext = str(seq) + ext
return name + ext
|
650eee088ae58d182c49f35f37e5b8deac57fa1d
| 13,933
|
def get_coding_annotation_fasta(seq_record):
"""
When passed a sequence record object returns an array of FASTA strings for each annotation.
:param seq_record: A Biopython sequence record object.
:return: A FASTA file string containing all sequences record object CDS sequence features.
"""
fasta = []
features = seq_record.features # Each sequence has a list (called features) that stores seqFeature objects.
for feature in features: # For each feature on the sequence
if feature.type == "CDS": # CDS means coding sequence (These are the only feature we're interested in)
feat_qualifiers = feature.qualifiers # Each feature contains a dictionary called qualifiers which contains
# data about the sequence feature (for example the translation)
start = int(feature.location.start) # Type-casting to int strips fuzzy < > characters.
end = int(feature.location.end)
strand = feature.location.strand
if strand is None:
strand = "?"
elif int(strand) < 0:
strand = "-"
elif int(strand) > 0:
strand = "+"
else:
strand = "?"
location = "[" + str(start) + ":" + str(end) + "](" + strand + ")"
# Gets the required qualifiers. Uses featQualifiers.get to return the qualifiers or a default value if the qualifiers
# is not found. Calls strip to remove unwanted brackets and ' from qualifiers before storing it as a string.
protein_id = str(feat_qualifiers.get('protein_id', 'no_protein_id')).strip('\'[]')
if protein_id == 'no_protein_id':
continue # Skips the iteration if protein has no id.
protein_locus = str(feat_qualifiers.get('locus_tag', 'no_locus_tag')).strip('\'[]')
gene = str(feat_qualifiers.get('gene', 'no_gene_name')).strip('\'[]')
product = str(feat_qualifiers.get('product', 'no_product_name')).strip('\'[]')
translated_protein = str(feat_qualifiers.get('translation', 'no_translation')).strip('\'[]')
fasta_part_one = ">" + protein_id + " " + gene + "-" + product + " (Locus: " + protein_locus + ")"
fasta_part_two = " (Location: " + location + ")" + "\n" + translated_protein + "\n"
fasta.append(fasta_part_one + fasta_part_two)
fasta_string = "".join(fasta)
return fasta_string
|
4fa24279ebb89ea7c61eeae6614c1fa309ffad87
| 13,936
|
from typing import Union
def bars_to_atmospheres(bar: float, unit: str) -> Union[float, str]:
"""
This function converts bar to atm
Wikipedia reference: https://en.wikipedia.org/wiki/Standard_atmosphere_(unit)
Wikipedia reference: https://en.wikipedia.org/wiki/Bar_(unit)
>>> bars_to_atmospheres(36, "bar")
35.529237601776465
>>> bars_to_atmospheres("57.6", "bar")
56.84678016284234
>>> bars_to_atmospheres(0, "bar")
0.0
>>> bars_to_atmospheres(35, "Pa")
'Invalid unit'
>>> bars_to_atmospheres("barrs", "bar")
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'barrs'
"""
if unit == "bar":
atm = float(bar) / 1.01325
return atm
else:
return "Invalid unit"
|
d460021395af77acda01296710f145e9b52d8594
| 13,937
|
def get_ast(token):
"""
Recursively unrolls token attributes into dictionaries (token.children
into lists).
Returns:
a dictionary of token's attributes.
"""
node = {}
# Python 3.6 uses [ordered dicts] [1].
# Put in 'type' entry first to make the final tree format somewhat
# similar to [MDAST] [2].
#
# [1]: https://docs.python.org/3/whatsnew/3.6.html
# [2]: https://github.com/syntax-tree/mdast
node['type'] = token.__class__.__name__
node.update(token.__dict__)
if 'header' in node:
node['header'] = get_ast(node['header'])
if 'children' in node:
node['children'] = [get_ast(child) for child in node['children']]
return node
|
3074b6a64061dd5cfb2209d803ab060203dfec06
| 13,938
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.