content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def get_tuple(string, n, as_int = False):
"""
Splits a string into n tuples, ignoring the rest if there are more, and replicating
if there is only one
@param string The string
@param n The number of elements
@param as_int If true, it will cast to int
"""
t = string.split(",")
if len(t) == n:
t = tuple(t)
elif len(t) > n:
t = tuple(t[:n - 1])
elif len(t) == 1:
t = tuple(t * n)
else:
raise IndexError("Invalid number of values")
if as_int:
return tuple([int(i) for i in t])
else:
return t
|
59b339c561bccc428f4e2882c369a7dcda86aaff
| 10,948
|
def ParseDupHit(text):#{{{
"""
Parse a duplication hit
e.g.
1-575(nTM=2) 2-571(nTM=2)
"""
li = []
strs = text.split()
for ss in strs:
strs1 = ss.rstrip(')').split('(nTM=') # ['35-345', '7']
li1 = [int(x) for x in strs1[0].split('-')]
li1.append(int(strs1[1])) # li1 is a list of three values (begin, end, numTM)
li.append(li1)
return li
|
d488076fcaf581376730bb404090d16c99137baa
| 10,949
|
def common_chain_res(model1, model2):
"""
Given a pair of Bio.PDB.Chain objects, returns the common residues and their respective CA atoms.
Returns a tuple with two lists: The first one contains the list of atoms corresponding
to the first chain. The second list contains the list of atoms of the second chain, both with the
same length.
"""
res1 = []
res2 = []
chain1_atoms_list = []
chain2_atoms_list = []
for res in model1:
res1.append(res)
for res in model2:
res2.append(res)
# Looking for the common atoms
common_res1 = [res1 for res1, res2 in zip(res1, res2) if res1.get_resname() == res2.get_resname()]
common_res2 = [res2 for res1, res2 in zip(res1, res2) if res1.get_resname() == res2.get_resname()]
for res in common_res1:
for atom in res:
if atom.id == "CA" or atom.id == "C1'":
chain1_atoms_list.append(atom)
for res in common_res2:
for atom in res:
if atom.id == "CA" or atom.id == "C1'":
chain2_atoms_list.append(atom)
common_atoms = (chain1_atoms_list, chain2_atoms_list) # Tuple with same-length lists
return common_atoms
|
8809e23d23237310ef530fb0c4d4509c93553a2a
| 10,951
|
def _filter_out(message: str) -> bool:
"""Return True when message should be ignored.
Args:
message (str): message to analyze
Returns:
bool: True when message should be ignored, False otherwise
"""
for authorized_function in ("input", "print", "pprint"):
if f"Found wrong function call: {authorized_function}" in message:
return True
return False
|
461c0f36aed22d80384202093bc1cfc40b88242c
| 10,952
|
import re
def byteform_to_num(byte_format):
"""Converts a string expressing a size of a file in bytes into the
corresponding number of bytes. Accepts commas and decimal points in nums.
Allows 'b', 'mb', 'gb', and variants like 'bytes', but not 'tb', 'zb', etc.
Note that format_bytes is a lossy function (it doesn't retain all sigfigs
by default), so byteform_to_num(format_bytes(x)) does not always equal x.
"""
x = re.findall("([\d,\.]+)\s*([a-z]*)", byte_format, re.I)[0]
num, suf = float(x[0].replace(",", "")), x[1].lower()
if suf == "" or suf[0] == "b":
return num
elif suf[:2] == "kb":
return num * 1e3
elif suf[:2] == "mb":
return num * 1e6
elif suf[:2] == "gb":
return num * 1e9
raise ValueError(f"byteform_to_num couldn't recognize quantifier '{suf}'")
|
300ede4ef120b9e3a8db85effcb6611dd9299953
| 10,953
|
def test_identifier(recipe):
"""Test recipe identifier for proper construction.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
name = recipe["Input"].get("NAME")
if name:
# The identifier may not have spaces.
name = name.replace(" ", "")
description = ("Recipe identifier follows convention. "
"('com.github.novaksam.jss.%s')" % name)
result = False
identifier = recipe.get("Identifier")
if identifier and name:
if (str(identifier).startswith("com.github.novaksam.jss.") and
str(identifier).rsplit(".", 1)[1].startswith(name)):
result = True
return (result, description)
|
800b7a9d07ef7094d32a17033da519c9edb5039b
| 10,955
|
import six
def get_probs_for_labels(labels, prediction_results):
""" Given ML Workbench prediction results, get probs of each label for each instance.
The prediction results are like:
[
{'predicted': 'daisy', 'probability': 0.8, 'predicted_2': 'rose', 'probability_2': 0.1},
{'predicted': 'sunflower', 'probability': 0.9, 'predicted_2': 'daisy', 'probability_2': 0.01},
...
]
Each instance is ordered by prob. But in some cases probs are needed for fixed
order of labels. For example, given labels = ['daisy', 'rose', 'sunflower'], the
results of above is expected to be:
[
[0.8, 0.1, 0.0],
[0.01, 0.0, 0.9],
...
]
Note that the sum of each instance may not be always 1. If model's top_n is set to
none-zero, and is less than number of labels, then prediction results may not contain
probs for all labels.
Args:
labels: a list of labels specifying the order of the labels.
prediction_results: a pandas DataFrame containing prediction results, usually returned
by get_prediction_results() call.
Returns:
A list of list of probs for each class.
"""
probs = []
if 'probability' in prediction_results:
# 'probability' exists so top-n is set to none zero, and results are like
# "predicted, predicted_2,...,probability,probability_2,...
for i, r in prediction_results.iterrows():
probs_one = [0.0] * len(labels)
for k, v in six.iteritems(r):
if v in labels and k.startswith('predicted'):
if k == 'predict':
prob_name = 'probability'
else:
prob_name = 'probability' + k[9:]
probs_one[labels.index(v)] = r[prob_name]
probs.append(probs_one)
return probs
else:
# 'probability' does not exist, so top-n is set to zero. Results are like
# "predicted, class_name1, class_name2,...
for i, r in prediction_results.iterrows():
probs_one = [0.0] * len(labels)
for k, v in six.iteritems(r):
if k in labels:
probs_one[labels.index(k)] = v
probs.append(probs_one)
return probs
|
f18580a5aba09df56fbb9b45b8ca5753eeba0d62
| 10,956
|
import math
def convert_local_to_global_vector(v: list, yaw: float):
"""
Converts the given vector in vehicle coordinate system to the global one under the given vehicle yaw.
"""
vx = math.cos(math.radians(yaw)) * v[0] - math.sin(math.radians(yaw)) * v[1]
vy = math.sin(math.radians(yaw)) * v[0] + math.cos(math.radians(yaw)) * v[1]
return [vx, vy]
|
62f96a22c85f22125165e387bfeea76d78e5c519
| 10,958
|
def humanReadable(size_in_bytes):
"""Returns sizes in human-readable units."""
try:
size_in_bytes = int(size_in_bytes)
except ValueError:
size_in_bytes = 0
units = [(" KB", 10**6), (" MB", 10**9), (" GB", 10**12), (" TB", 10**15)]
for suffix, limit in units:
if size_in_bytes > limit:
continue
else:
return str(round(size_in_bytes/float(limit/2**10), 1)) + suffix
|
29dd9e46d535943e83cd65c34f4135af898f0bbc
| 10,961
|
import os
def get_cwd():
"""Allows use to patch the cwd when needed.
"""
return os.getcwd()
|
601ca5210e08c28cc307c1c86414b30ce72bbb50
| 10,965
|
def _agent_has_email_address(agent_obj):
"""Check if agent has email.
Arguments:
agent_obj (list/dict): The specified field from the research dataset. If publisher then dict else list
Returns:
bool: True if has emails, False if not.
"""
if agent_obj:
if isinstance(agent_obj, list) and len(agent_obj) > 0:
return 'email' in agent_obj[0]
elif isinstance(agent_obj, dict):
return 'email' in agent_obj
return False
|
a662a9a874e607d90db644c93515632606ae83fc
| 10,966
|
def list_acquisition_dates(dc, product):
"""Get a list of acquisition dates for a given product"""
dataset = dc.load(product, dask_chunks={})
if not dataset:
return []
return dataset.time.values.astype('M8[ms]').tolist()
|
97012b354d835ad94e7037f9c68fcfa72d143930
| 10,967
|
import torch
def coord_map(shape,device, start=-1, end=1):
"""
Gives, a 2d shape tuple, returns two mxn coordinate maps,
Ranging min-max in the x and y directions, respectively.
"""
m, n = shape
x_coord_row = torch.linspace(start, end, steps=n).to(device)
y_coord_row = torch.linspace(start, end, steps=m).to(device)
x_coords = x_coord_row.unsqueeze(0).expand(torch.Size((m, n))).unsqueeze(0)
y_coords = y_coord_row.unsqueeze(1).expand(torch.Size((m, n))).unsqueeze(0)
return torch.cat([x_coords, y_coords], 0)
|
056dddd9442cef58dca7cc6cc30fd6b8cca9bc85
| 10,968
|
from typing import Tuple
from typing import Dict
def criar_base_quarentena(dados: dict) -> Tuple[Dict[str, int], Dict[str, int]]:
"""Criar a estrutura para lidar com função de contenção
de adaptações sucessivas no KubeKepam
Args:
dados (dict): nome do namespace do Kubernetes.
Returns:
Dicionário com a estrutura da função adaptação
sucessivas KubeKepam.
Por exemplo:
'AdaptationStatus': {'currencyservice': 0, 'emailservice': 0}
'TimeAdaptation': {'currencyservice': 0, 'emailservice': 0}
"""
adaptation_status = {}
time_after_adaptation = {}
for key in dados[list(dados.keys())[0]].keys():
adaptation_status[key] = 0
time_after_adaptation[key] = 0
return adaptation_status, time_after_adaptation
|
7e4124092760575edb963606f88da7c38dfe0455
| 10,969
|
def heart_speed(speed='half'):
"""Set the low-health warning beep interval.
Keyword Arguments:
speed {str} -- Chose the speed at which the low health warning beeps.
Options are 'off', 'double', 'normal', 'half', and 'quarter'. (default: {'half'})
Returns:
list -- a list of dictionaries indicating which ROM address offsets to write and what to write to them
"""
if speed is None:
speed = 'normal'
sbyte = {
'off': 0,
'double': 16,
'normal': 32,
'half': 64,
'quarter': 128,
}
patch = [{
'1572915': [sbyte[speed]]
}]
return patch
|
62bab25f6221e1260048fac3126280cd4791a9b3
| 10,970
|
def upsample_kernel_size_solver(
in_size, out_size, stride=1, padding=0, output_padding=0, dilation=1,
):
"""
Returns kernel size needed to upsample a tensor of some input size to
a desired output size.
The implementation solves for kernel size in the equation described the
the "Shape" section of the pytorch docs:
https://pytorch.org/docs/stable/generated/torch.nn.ConvTranspose1d.html
"""
x = out_size - 1 - output_padding - (in_size - 1) * stride + 2 * padding
x = int(x / dilation + 1)
return (x, )
|
58bcf58c3781b195801e1239b9fa688d19c89288
| 10,971
|
def get_key_from_dict (dictionary, key, group, required=True):
"""
Grab a value from dictionary
:param dictionary: Dictionary to utilize
:param key: Key to fetch
:param group: Group this key belongs to, used for error reporting purposes
:param required: Boolean indicating whether this key is necessary for a valid manifest
:return Value if found
"""
if key not in dictionary:
if required:
raise KeyError ("Failed to generate manifest: {0} missing {1}".format (group, key))
else:
return None
else:
return dictionary[key]
|
81245795296c2fe6d21bb84f0cccfa197a966b24
| 10,972
|
import codecs
import re
def get_bookmarks_tree(bookmarks_filename):
"""Get bookmarks tree from TEXT-format file
Bookmarks tree structure:
>>> get_bookmarks_tree('sample_bookmarks.txt')
[(u'Foreword', 0, []), (u'Chapter 1: Introduction', 1, [(u'1.1 Python', 1, [(u'1.1.1 Basic syntax', 1, []), (u'1.1.2 Hello world', 2, [])]), (u'1.2 Exercises', 3, [])]), (u'Chapter 2: Conclusion', 4, [])]
The above test result may be more readable in the following format:
[
(u'Foreword', 0, []),
(u'Chapter 1: Introduction', 1,
[
(u'1.1 Python', 1,
[
(u'1.1.1 Basic syntax', 1, []),
(u'1.1.2 Hello world', 2, [])
]
),
(u'1.2 Exercises', 3, [])
]
),
(u'Chapter 2: Conclusion', 4, [])
]
Thanks Stefan, who share us a perfect solution for Python tree.
See http://stackoverflow.com/questions/3009935/looking-for-a-good-python-tree-data-structure
Since dictionary in Python is unordered, I use list instead now.
Also thanks Caicono, who inspiring me that it's not a bad idea to record bookmark titles and page numbers by hand.
See here: http://www.caicono.cn/wordpress/2010/01/%E6%80%9D%E8%80%83%E5%85%85%E5%88%86%E5%86%8D%E8%A1%8C%E5%8A%A8-python%E8%AF%95%E6%B0%B4%E8%AE%B0.html
And I think it's the only solution for scan version PDFs to be processed automatically.
"""
# bookmarks tree
tree = []
# the latest nodes (the old node will be replaced by a new one if they have the same level)
#
# each item (key, value) in dictionary represents a node
# `key`: the level of the node
# `value`: the children list of the node
latest_nodes = {0: tree}
offset = 0
prev_level = 0
for line in codecs.open(bookmarks_filename, 'r', encoding='utf-8'):
line = line.strip()
if line.startswith('//'):
try:
offset = int(line[2:])
except ValueError:
pass
continue
res = re.match(r'(\+*)\s*?"([^"]+)"\s*\|\s*(\d+)', line)
if res:
pluses, title, page_num = res.groups()
cur_level = len(pluses) # plus count stands for level
cur_node = (title, int(page_num) - 1 + offset, [])
if not (0 < cur_level <= prev_level + 1):
raise Exception('plus (+) count is invalid here: %s' % line)
else:
# append the current node into its parent node (with the level `cur_level` - 1)
latest_nodes[cur_level - 1].append(cur_node)
latest_nodes[cur_level] = cur_node[2]
prev_level = cur_level
return tree
|
a1cd349be3570900ae48ffc5e6ae217c8655cc42
| 10,974
|
import jinja2
def render_template(authors, configuration):
"""
Renders a template in `adoc`, `html`, `md`,
`rst`, or `txt` format.
Parameters
----------
authors : list
The authors to include in the rendered
template.
configuration : dict
Configuration settings relevant to the
rendered template (`heading`, `opening`,
and `closing`).
Returns
-------
str
The rendered template.
"""
loader = jinja2.PackageLoader("authors", "templates")
environment = jinja2.Environment(
loader=loader,
lstrip_blocks=True,
trim_blocks=True
)
source_file = "template.{}".format(configuration["kind"])
template = environment.get_template(source_file)
return template.render(authors=authors, **configuration)
|
879693831529fb1786a04df3df1173601adebd63
| 10,975
|
import re
def ranks_unclassified(desc):
"""
Returns number of unclassified ranks in description
:param desc:
:return:
"""
regex = re.compile(".+(_[X]+)$")
return sum(
[1 if x else 0 for x in [regex.match(s) for s in desc.split(";")]])
|
3a016315019b03e0456e7315afd5b3fa303e265a
| 10,976
|
def expand_output(out):
"""Plotly callback outputs for `update_simulator` function"""
return [
*out["alert"],
*out["mrsim"],
*out["children"],
*out["mrsim_config"],
*out["processor"],
]
|
b1d1135c8fb16d2245f882a29c622e1da42f501a
| 10,977
|
def drop_table_if_exists():
""" Removes all tables on app start so as to start working with no data """
drop_users = """ DROP TABLE IF EXISTS users """
drop_meetups = """ DROP TABLE IF EXISTS meetups """
drop_questions = """ DROP TABLE IF EXISTS questions """
drop_comments = """ DROP TABLE IF EXISTS comments """
drop_rsvp = """ DROP TABLE IF EXISTS rsvp """
drop_votes = """ DROP TABLE IF EXISTS votes """
return [drop_votes, drop_rsvp, drop_comments, drop_meetups, drop_questions,
drop_users]
|
d3e79992c88becd5f398162938bd22990e238803
| 10,978
|
import os
def create_tempdir(path):
"""Create temp folder"""
temp_path = str(path + "/temp")
if not os.path.exists(temp_path):
os.makedirs(temp_path)
return temp_path
|
ad3e8d483d841f73d02885eaea826ea71cf15b8d
| 10,979
|
def get_body_part_colour(shot):
"""
Decide the colour of a plot element based on the shooter's body part.
"""
body_part = shot['shot']['body_part']['name']
if body_part == 'Right Foot':
return 'orange'
if body_part == 'Left Foot':
return 'red'
if body_part == 'Head':
return 'magenta'
# Else, body part == "Other"
return 'cyan'
|
89a6187840ee2f830f2c07579a555fc1944ea086
| 10,980
|
import os
import base64
def transfer_file_to_str(file_path):
"""Transfer image to a string
:param file_path: file path
:return: msg_dict including file name and file str encoded by base64 package
"""
file_name = os.path.basename(file_path)
with open(file_path, 'rb') as f:
img_byte = base64.b64encode(f.read()) # 二进制读取后变base64编码
img_str = img_byte.decode('ascii')
msg_dict = {
'file_name': file_name,
'file_str': img_str
}
return msg_dict
|
c68021e13b9be162f36a621e92bc6078d2f10de2
| 10,982
|
import random
def generate_key(num_digit):
"""Generate key using user email and random str"""
min_val = 10 ** (num_digit - 1)
max_val = (10 ** num_digit) - 1
otp = random.randint(min_val, max_val)
return otp
|
132716dd643af309a8e7e90a72fbed60e47bd255
| 10,983
|
def check_3(sigs):
"""
Repository managed by both SIG and Private.
"""
print("Repository managed by both SIG and Private.")
supervisors = {}
for sig in sigs:
for repo in sig["repositories"]:
# Gitee requries strict case senstive naming for direct access
# repo = repo.lower()
supervisor = supervisors.get(repo, set())
supervisor.add(sig["name"])
supervisors[repo] = supervisor
print("There're " + str(len(supervisors)) + " repositories in total.")
co_managed = 0
private_only = 0
for repo in supervisors:
sigs = supervisors[repo]
if "Private" in sigs:
if len(sigs) != 1:
co_managed += 1
else:
private_only += 1
print("There're " + str(co_managed) + " repositories co-managed by Private")
print("There're " + str(private_only) + " repositories managed by Private only")
return supervisors
|
908737c960ce54bb8c724a792977e23b36775174
| 10,987
|
def _HasOption(_, option):
"""Validate the option exists in the config file.
Args:
option: string, the config option to check.
Returns:
bool, True if test is not in the option name.
"""
return 'test' not in option
|
5c8304b8e4abe91ec8e3e55f14201a1a3d7f5c57
| 10,988
|
def get_parameters_nodes(input_nodes):
"""Find operations containing the parameters of the model.
Args:
input_nodes (:obj:`list` of :obj:`Node`): the input operations of the
model.
Returns:
parameters (:obj:`list` of :obj:`Node`): the operations containing
the parameters of the model.
"""
parameters = list()
for node in input_nodes:
if node.is_trainable:
parameters.append(node)
return parameters
|
a4185512a577521f0ed3d7cc9b098800fda58974
| 10,990
|
def get_method(interface, method):
"""
Get a specific method
Parameters:
----------
interface : interface of CST Studio
method : string
Specific method that you want
Returns:
----------
instance : function
Instance of the method
"""
return getattr(interface, method)
|
bd0a8322e2a47f8c0b760894617c9ed2429fcb02
| 10,992
|
def read_file_header( filename ):
"""
A header is:
1. Any number of blank lines before the header
2. Any number of KEY=value pairs (anything else is ignored)
3. One or more blank lines stops the header
Returns a tuple (format type, version integer, header dict, hdr lines),
where the format type and version integer may be None if the header key
"FILE_VERSION" was not found. The header lines is the number of lines
of header data in the file.
"""
fp = open( filename, 'r' )
cnt = 0
hdr = {}
line = fp.readline()
while line:
line = line.strip()
if line[:5] == 'TEST:':
break
elif line:
cnt += 1
L = line.split('=',1)
if len(L) == 2 and L[0].strip():
if L[1].strip() == 'None':
hdr[ L[0].strip() ] = None
else:
hdr[ L[0].strip() ] = L[1].strip()
elif cnt > 0:
break
line = fp.readline()
if type(filename) == type(''):
fp.close()
vers = hdr.get( 'FILE_VERSION', None )
if vers:
i = len(vers) - 1
while i >= 0 and vers[i] in '0123456789':
i -= 1
t = vers[:i+1]
n = 0
sn = vers[i+1:]
if sn:
n = int(sn)
return t,n,hdr,cnt
return None,None,hdr,cnt
|
54bec5d22db1d04ba081786b287abe678c71e487
| 10,993
|
def get_pip_command_packages(command):
"""Return packages included in a pip command."""
return command.split()[2:]
|
63757643862b590f27eed927b2037ad45eaf4792
| 10,994
|
def gini(k_proportions):
"""
Gini impurity function. This is used to determine the impurity of a given
set of data, given the proportions of the classes in the dataset.
This is equivalent to:
H = ∑ pk(1-pk) for all k classes.
k_proportions, in this case, is an array of pk's
:param k_proportions: array containing proportions of different classes. Proportions sum to 1.
:return: the impurity of the dataset.
"""
return (k_proportions*(1-k_proportions)).sum()
|
db0e74b29166603ed2bda3aa5fa9614ba9206b67
| 10,996
|
from pathlib import Path
def fixture_project_dir() -> Path:
"""Return path to the Python project directory.
:return: a path
"""
return Path(__file__).parent.parent
|
08ef4059aa5fa4b125928e9843b83f2befff8576
| 10,997
|
def return_element_from_list(i, l):
"""
Returns an element from the list
@param: i is an integer corresponding to the index of the element in the list
@param: l is a list of elements
return:
element of the list if 0 <= i <= len(l) - 1
None otherwise
"""
if(i < 0 or i >= len(l)):
return None
else:
return l[i]
|
7d57263d67fe85c13f34428c23cdaf9ae7671855
| 10,998
|
def integer_identical_to_index(nums):
"""
:param nums: array
:return: the number equals its index
"""
left, right = 0, len(nums) - 1
while left <= right:
mid = (left + right) >> 1
if nums[mid] < mid:
left = mid + 1
elif nums[mid] > mid:
right = mid - 1
else:
break
else:
raise Exception('NO SUCH NUMBER')
return mid
|
8f289bdf268839659d68565a69e61eb36a717b40
| 10,999
|
def map_nested(f, itr):
"""apply a function to every element in a 2D nested array"""
return [[f(pt)
for pt in lst]
for lst in itr]
|
6546781478ef0f1b53a257c3571a56dd77942014
| 11,001
|
def process_template_file(self, request, template_file, variable_encoding = None):
"""
Processes the given template file, using the given
variable encoding.
:type request: Request
:param request: The request to be used in the template
file processing.
:type template_file: Template
:param template_file: The template file to be processed.
:type variable_encoding: String
:param variable_encoding: The encoding to be used to encode the variables
in the template file processing.
:rtype: String
:return: The processed template file.
"""
# sets the template file variable encoding
template_file.set_variable_encoding(variable_encoding)
# creates the process methods list
process_methods_list = [
("process_stylesheet_link", self.get_process_method(request, "process_stylesheet_link")),
("process_javascript_include", self.get_process_method(request, "process_javascript_include")),
("process_ifacl", self.get_process_method(request, "process_ifacl")),
("process_ifaclp", self.get_process_method(request, "process_ifaclp")),
("process_ifnotacl", self.get_process_method(request, "process_ifnotacl")),
("process_request_time", self.get_process_method(request, "process_request_time"))
]
# attaches the process methods to the template file so that
# they may be used for "extra" composite operations
template_file.attach_process_methods(process_methods_list)
# processes the template file, returning the resulting
# contents to the caller function, the returning value
# should be a string containing the results
processed_template_file = template_file.process()
return processed_template_file
|
036f28e44008f97498cbd5dcc756ee795f1c6d30
| 11,003
|
import re
def is_indvar(expr):
"""
An individual variable must be a single lowercase character other than 'e', 't', 'n', 's',
followed by zero or more digits.
@param expr: C{str}
@return: C{boolean} True if expr is of the correct form
"""
assert isinstance(expr, str), "%s is not a string" % expr
return re.match(r'^[a-df-mo-ru-z]\d*$', expr)
|
c00e62199263214596a0b9519868ffdeb86e9580
| 11,004
|
import sys
def compare( value1, value2, epsilonFactor = 0 ) :
"""
This function compares two floats (or objects that can be converted to floats) in a fuzzy way
where the fuzz factor is epsilonFactor * sys.float_info.epsilon. This function returns
0 if the floats are comparable as given by epsilonFactor (see below),
otherwise, it returns 1 (-1) if value1 is greater (less) than value2.
Two floats are comparable if the magnitude of the 'relative difference' between them is less than or equal to
epsilonFactor * sys.float_info.epsilon. The relative difference is defined as
( value1 - value2 ) / max( abs( value1 ), abs( value2 ) )
Hence, two floats are comparable if
( value1 - value2 ) <= epsilonFactor * sys.float_info.epsilon * max( abs( value1 ), abs( value2 ) )
For the default epsilonFactor = 0, a 0 is return (i.e., the two floats are 'equal') only if they have the same value
:param value1: value to compare to value2
:type value1: any object which float() accepts
:param value2: value to compare to value1
:type value2: any object which float() accepts
:param epsilonFactor: The factor to scale sys.float_info.epsilon to determine the fuzz factor.
:type epsilonFactor: any object which float() accepts
:returns: 1 if value1 is deemed greater than value2, 0 if equal and -1 otherwise
:rtype: `int`
"""
valueOfSelf, valueOfOther = float( value1 ), float( value2 )
delta = valueOfSelf - valueOfOther
if( delta == 0 ) : return( 0 )
_max = max( abs( valueOfSelf ), abs( valueOfOther ) )
if( abs( delta ) <= ( float( epsilonFactor ) * sys.float_info.epsilon * _max ) ) : return( 0 )
if( delta < 0. ) : return( -1 )
return( 1 )
|
2cb563f4785280c6827d321d98406dbb3c17cc43
| 11,005
|
def uniq_count(data):
"""
Count number of unique elements in the data.
Args:
data (list): values.
Returns the number of unique elements in the data.
"""
uniq_atom_list = list(set(data))
return len(uniq_atom_list)
|
d61eff27aed7d788fa6cc80eb25661f0ebae7dfd
| 11,006
|
def try_float(x):
"""
Convert to float
:param x:
:return:
"""
if x is None:
return None
try:
return try_float(x)
except:
pass
|
905b1cdedc086a2f6f6572bb720b9d04ef1aa2b6
| 11,007
|
import pickle
def load_dt(filename):
"""
加载保存好的决策树
:param filename: 文件名
:return: python dict
"""
# 'b' 表示二进制模式
fr = open(filename, 'rb')
return pickle.load(fr)
|
c61772d6c8606e45ef323bd8dd30cb0c9e6ebf35
| 11,008
|
import argparse
def parse_arguments():
"""
Parses the command-line arguments passed to the assembler.
"""
parser = argparse.ArgumentParser(
description="File listing and editing utility. See README.md for more "
"information, and LICENSE for terms of use."
)
parser.add_argument(
"host_filename", help="the host file to process (DSK, CAS, VDK, etc)"
)
parser.add_argument(
"--append", action="store_true", help="append to host file if it already exists"
)
parser.add_argument(
"--list", action="store_true", help="list all of the files on the specified host file"
)
parser.add_argument(
"--to_bin", action="store_true", help="extracts all the files from the host file, and saves them as BIN files"
)
parser.add_argument(
"--to_cas", action="store_true", help="extracts all the files from the host file, and saves them as CAS files"
)
parser.add_argument(
"--files", nargs="+", type=str, help="list of file names to extract"
)
return parser.parse_args()
|
264a9b2c9a608d3a63704881d449d82f6a556565
| 11,009
|
from typing import MutableSequence
from typing import Any
def swap(seq: MutableSequence[Any], frst_idx: int, snd_idx: int) -> MutableSequence[Any]:
"""Swaps two elements in the `seq`."""
if seq[frst_idx] == seq[snd_idx]:
return seq
seq[frst_idx], seq[snd_idx] = seq[snd_idx], seq[frst_idx]
return seq
|
1d4c3260e57f827293d849b490bd35c2ae9f9698
| 11,012
|
def parse_problems(lines):
""" Given a list of lines, parses them and returns a list of problems. """
problems = []
i = 0
while i < len(lines):
h, w = map(int, lines[i].split(" "))
problems.append((w, h, lines[i + 1:i + h + 1]))
i += h + 1
return problems
|
883a6a7cfaa8104c171a6e166fcbb8f4403f4c01
| 11,013
|
import argparse
import sys
def get_args():
""" Get args """
parser = argparse.ArgumentParser(description='Head program',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n',
'--num',
help = 'Num. of lines',
metavar='lines',
type = int,
default=10)
parser.add_argument('file',
help = 'Input file(s)',
nargs='+', #one or more
metavar='FILE',
type = argparse.FileType('rt'), #need a readable file
default=[sys.stdin])
return parser.parse_args()
|
9b00939d776baa70fbe0f26df698258b1dbca4a1
| 11,014
|
def read_input(fpath):
"""
Read a specified file, and return a list of the file's contents
separated by new lines.
Args:
fpath (str): Path of the file to read.
Returns:
list
"""
with open(fpath, 'r') as f:
return f.read().splitlines()
|
ac10f24d7b5769ee85b3f13f3fde278d56ef1741
| 11,015
|
def safe_get(dictionary, key, default_value, can_return_none=True):
"""
Safely perform a dictionary get,
returning the default value if the key is not found.
:param dict dictionary: the dictionary
:param string key: the key
:param variant default_value: the default value to be returned
:param bool can_return_none: if ``True``, the function can return ``None``;
otherwise, return ``default_value`` even if the
dictionary lookup succeeded
:rtype: variant
"""
return_value = default_value
try:
return_value = dictionary[key]
if (return_value is None) and (not can_return_none):
return_value = default_value
except (KeyError, TypeError):
# KeyError if key is not present in dictionary
# TypeError if dictionary is None
pass
return return_value
|
eb53ad7a17db3f2c66c8b16b20ae2aac9b1e34e3
| 11,016
|
def parse_grid(grid):
"""
converts a grid like
K F A
L G B
M H C
N I D
O J E
to
ABCDEFGHIJKLMNO
"""
rows = [row.strip().split(" ") for row in grid]
return "".join(rows[row][col] for col in range(2, -1, -1) for row in range(0, 5))
|
f1cb825e3d20edd2db92fee4104204e9bcb1f54a
| 11,018
|
from typing import List
from typing import Tuple
from typing import Optional
import os
def _read_existing_krb5_conf_file_lines(locations: List[str]) -> Tuple[Optional[str], List[str]]:
""" Read in existing krb5 configuration file lines and return them """
for loc in locations:
if os.path.isfile(loc):
with open(loc) as fp:
krb5_lines = fp.readlines()
# be careful of empty files
if krb5_lines:
return loc, krb5_lines
return None, []
|
3238c0cf35e699e02b4267fd80039ef9d4c26335
| 11,020
|
def predicate_contains_hello(x):
"""Predicate True when 'hello' is in value."""
return 'hello' in x
|
884ef0a9a925865d5bd9093d52f1c248e498aa80
| 11,021
|
def get_tile_url(xtile, ytile, zoom):
"""
Return a URL for a tile given some OSM tile co-ordinates
"""
return "http://tile.openstreetmap.org/%d/%d/%d.png" % (zoom, xtile, ytile)
|
20af9f5b4065d96c285e6c4e5c64123f0343b659
| 11,022
|
def split_into_words_by_char_count(s, chunk_size, max_from_end=None):
"""
Split a string into an array of strings each of length at most chunk_size.
Try to split on whitespace if possible (possibly resulting in chunks of size
less than chunk_size), except if it would make the last word longer
than max_from_end in which case just split it when we exceed the chunk size.
:param s:
:param chunk_size:
:param max_from_end:
:return:
"""
if max_from_end is None:
max_from_end = chunk_size / 10
chunk_start = 0
if len(s) == 0:
return [""]
chunks = []
while chunk_start < len(s):
chunk = s[chunk_start:chunk_start+chunk_size]
if len(chunk) < chunk_size or \
(chunk[-1].isspace() or s[chunk_start+chunk_size].isspace()):
chunks.append(chunk)
chunk_start += len(chunk)
else:
subchunks = chunk.rsplit(None, 1)
if len(subchunks) == 1 or len(subchunks[1]) > max_from_end:
chunks.append(chunk)
chunk_start += len(chunk)
else:
chunks.append(subchunks[0])
chunk_start += len(subchunks[0])
return chunks
|
ae3391536bd6e92f0c8ae9ce5cce76865485f689
| 11,023
|
def set_arguments(self, _id):
"""
Set arguments
"""
args = list()
cwl_wf_url = {
"name": self.cwl_static_keys[0],
"description": self.cwl_static_keys[1],
"help": self.cwl_static_keys[1] + " for " + _id,
"type": self.cwl_static_keys[2],
"value": self.cwl_wf,
"required": True
}
args.append(cwl_wf_url) # add arguments
self.arguments = args # save arguments
return self.arguments
|
7965d86438a43a44f40784805a09c4320233b2be
| 11,024
|
import json
def json_to_dict(json_file_path):
"""
Convert a .json file to a Python dictionary.
Parameters
----------
json_file_path: str
Path of the JSON file
Returns
-------
dictionary: dict
The original JSON file as a Python dictionary
"""
with open(json_file_path, "r") as json_data:
dictionary = json.load(json_data, encoding="utf8")
return dictionary
|
d8ef47ab43c3477212795e795690bad4081e0321
| 11,025
|
def format_bases(bases):
"""
Generate HTML that colours the bases in a string.
Args:
bases: A string containing a genetic sequence.
Returns:
An HTML string.
"""
formatted = ''
for b in bases:
formatted += '<span class="base-{}">{}</span>'.format(b,b)
return formatted
|
a94f34a53178ceb5dd1640eaa8897a128c7c4d67
| 11,026
|
def get_services(services):
"""
Get all services from the response and make the comma-separated string.
:param services: List of services.
:return: comma-separated list.
"""
return ', '.join(services)
|
6c965dff4c85f772b41d10e12170547f156ee86e
| 11,028
|
def check_for_default_value_for_missing_params(missing_params, method_params):
"""
:param missing_params: Params missing from Rule
:param method_params: Params defined on method, which could have default value for missing param
[{
'label': 'action_label',
'name': 'action_parameter',
'fieldType': 'numeric',
'defaultValue': 123
},
...
]
:return Params that are missing from rule but have default params: {'action_parameter'}
"""
missing_params_with_default_value = set()
if method_params:
for param in method_params:
if param['name'] in missing_params and param.get('defaultValue', None) is not None:
missing_params_with_default_value.add(param['name'])
return missing_params_with_default_value
|
bad583976d4c88af93540c9d64f7b0711fd24e12
| 11,029
|
import random
import string
def mock_tweet():
"""Generate some random tweet text."""
count = random.randint(70, 140)
return ''.join([random.choice(string.ascii_letters) for _ in range(count)])
|
1a8f9937408acaaaf2534be0721a8e3203525eb3
| 11,030
|
def date_comparison(input_series, output_series):
""" Compare the start and end dates for the input and output series and
obtain the dates that completes the output series
:param input_series:
:param output_series:
:return:
"""
first_input = input_series['data']['first']
last_input = input_series['data']['last']
first_output = output_series['data']['first']
last_output = output_series['data']['last']
# if output series is not left aligned, something happened and the series
# must be re insert
if first_input != first_output:
ans = {'first': first_input,
'last': last_input}
else:
ans = {'first': last_output,
'last': last_input}
return ans
|
5910c2e3552d84a15e0b03bfb1dd97cf0cec0d66
| 11,031
|
def longitud_palabra(palabra, n):
"""
-Recibe: una palabra y un numero entero positivo(n)
-Devuelve:
- True, si la palabra tiene longitud n
- False, en caso contrario
"""
return len(palabra) == n
|
33065b4302cee42d3921d7b75344c12c43d54cd6
| 11,032
|
import math
def ph(concentration):
"""Returns the pH from the hydronium ion concentration."""
return -math.log(concentration)
|
46e59ed147006ac4cc3aaefea5b10c015a9e18b9
| 11,033
|
def hash_file(upload_context):
"""
Function run by HashFileCommand to calculate a file hash.
:param upload_context: PathData: contains path to a local file to hash
:return HashData: result of hash (alg + value)
"""
path_data = upload_context.params
hash_data = path_data.get_hash()
return hash_data
|
3819e9617e5726cf4178a8382eb22b0ec8bd7da0
| 11,034
|
import struct
import ctypes
def encipher(v, k):
"""
TEA coder encrypt 64 bits value, by 128 bits key,
QQ uses 16 round TEA.
http://www.ftp.cl.cam.ac.uk/ftp/papers/djw-rmn/djw-rmn-tea.html .
>>> c = encipher('abcdefgh', 'aaaabbbbccccdddd')
>>> b2a_hex(c)
'a557272c538d3e96'
"""
n=16 #qq use 16
delta = 0x9e3779b9
k = struct.unpack('!LLLL', k[0:16])
y, z = map(ctypes.c_uint32, struct.unpack('!LL', v[0:8]))
s = ctypes.c_uint32(0)
for i in range(n):
s.value += delta
y.value += (z.value << 4) + k[0] ^ z.value+ s.value ^ (z.value >> 5) + k[1]
z.value += (y.value << 4) + k[2] ^ y.value+ s.value ^ (y.value >> 5) + k[3]
r = struct.pack('!LL', y.value, z.value)
return r
|
57d01a534f645f2464d16e90201c2ed1d37c77b9
| 11,035
|
def get_attr_of_pset(_id, ifc_file):
""" Get all attributes of an instance by given Id
param _id: id of instance
return: dict of dicts of attributes
"""
dict_psets = {}
try:
defined_by_type = [x.RelatingType for x in ifc_file[_id].IsDefinedBy if x.is_a("IfcRelDefinesByType")]
defined_by_properties = [x.RelatingPropertyDefinition for x in ifc_file[_id].IsDefinedBy if
x.is_a("IfcRelDefinesByProperties")]
except:
dict_psets.update({ifc_file[_id].GlobalId: "No Attributes found"})
else:
for x in defined_by_type:
if x.HasPropertySets:
for y in x.HasPropertySets:
for z in y.HasProperties:
dict_psets.update({z.Name: z.NominalValue.wrappedValue})
for x in defined_by_properties:
if x.is_a("IfcPropertySet"):
for y in x.HasProperties:
if y.is_a("IfcPropertySingleValue"):
dict_psets.update({y.Name: y.NominalValue.wrappedValue})
# this could be usefull for multilayered walls in Allplan
if y.is_a("IfcComplexProperty"):
for z in y.HasProperties:
dict_psets.update({z.Name: z.NominalValue.wrappedValue})
if x.is_a("IfcElementQuantity"):
for y in x.Quantities:
dict_psets.update({y[0]: y[3]})
finally:
dict_psets.update({"IfcGlobalId": ifc_file[_id].GlobalId})
return dict_psets
|
6de5772ee0b86894c0986a631c20042751e0eb3e
| 11,036
|
def rebuild_paral_results(results):
"""Rebuild the correct way to store the results."""
scores = [results[i][0] for i in range(len(results))]
best_pars_info = [results[i][1] for i in range(len(results))]
times = [results[i][2] for i in range(len(results))]
return scores, best_pars_info, times
|
ae7a1ab996c49053332702b44482bc1b440aeee1
| 11,038
|
def create_turkey_season(source_df, target_df, feature_name):
"""
Winter: December - February
Spring: March - May
Summer: June - August
Autumn: September - November
"""
month_to_season_map = {
"jan": "winter",
"feb": "winter",
"mar": "spring",
"apr": "spring",
"may": "spring",
"jun": "summer",
"jul": "summer",
"aug": "summer",
"sep": "autumn",
"oct": "autumn",
"nov": "autumn",
"dec": "winter",
}
target_df.loc[:, "season"] = source_df.loc[:, feature_name].map(month_to_season_map)
return target_df
|
65f996d80554a35fff6341ca500a0c09c64186bd
| 11,041
|
def value_is_float_not_int(value):
"""Return if value is a float and not an int"""
# this is klugy and only needed to display deprecation warnings
try:
int(value)
return False
except ValueError:
try:
float(value)
return True
except ValueError:
return False
except TypeError:
return False
|
f08f55dfc5e8b4eefef102f7fe097e4285d9dc7c
| 11,044
|
def policy_settings(request):
"""TLS termination policy settings"""
return request.getfixturevalue(request.param)
|
3a65251ea7866e25c02bf8354cc7cc9bbb5552e8
| 11,045
|
def process_lower(cont):
""" Make the value in lowercase """
return cont.lower()
|
f863852b0aff952bce080e20360d6fcc571acc21
| 11,046
|
def get_word_counter(word_vocab):
"""
Convert a list of tuple of words and their frequencies in word vocabulary to dictionary.
Key is word and value is its frequency.
Args:
word_vocab: A list of tuple of words and their frequencies.
Returns: A dictionary with word as the key and its frequency in word vocabulary as the value.
"""
return {word: count for word, count in word_vocab}
|
8e76652c721d9ca175f79d9bb0acfccfb90da647
| 11,051
|
import torch
def format_metric(val):
"""Format a tensor/number as a float with 4 digits"""
if isinstance(val, torch.Tensor):
val = val.detach().data
return str('{:.4f}'.format(val))
|
337e266bca4ff0433e2c69864b1a493976d12c44
| 11,052
|
import pickle
def load_ontology(ontology_file):
""" loads an ontology pickle file """
ontology = pickle.load(open(ontology_file, 'rb'))
return ontology
|
8e3e9c9017bee76795a98435a68d3f01fa0d40e9
| 11,053
|
import argparse
def check_arg():
"""Make the parsing of CLI call
Returns:
[tupla] -- [Namespaces of the parameters passed to the cli]
"""
parse = argparse.ArgumentParser(
prog="git-sync",
usage="%(prog)s [-h|--help] ",
description="keep sync remote git repo with a local repo",
epilog="",
allow_abbrev=False,
)
parse.add_argument(
"-r", "--remote", type=str, help="Remote git repo to sync", required=True
)
parse.add_argument("-b", "--branch", type=str, help="Branch to sync", required=True)
parse.add_argument(
"-u",
"--username",
type=str,
help="Username to authenticate with remote git repo",
required=True,
)
parse.add_argument(
"-t",
"--token",
type=str,
help="Token to authenticate with remote git repo",
required=True,
)
parse.add_argument(
"-d",
"--dst",
type=str,
help="Directory where local repo will reside",
required=True,
)
args = parse.parse_args()
return args
|
5277583ee6c191af6d32b2bc466005ede7b4aea2
| 11,054
|
import re
def get_csrf_token(res):
"""Extract the CSRF token from a response."""
for header in res.headers:
m = re.search('token=(.+?);', header[1])
if m:
return m.group(1)
raise RuntimeError('Could not find CSRF token in response headers: ' + str(res.headers))
|
2c2670a6909ed87d60b44a5cf1cbaeffdc4fc653
| 11,055
|
from pathlib import Path
import os
def path_to_db_path(path: Path):
"""Converts a pathlib.Path to a DBFS path"""
db_path = str(path).replace("\\", "/")
db_path = os.path.splitext(db_path)[0]
return db_path
|
023703da2652f64de39491a64252a692d7aef886
| 11,056
|
def create_to_idx_dict(source):
""" Creates a dictionary of item-specific indices form a tuple of lists. """
idx_dict = dict()
for i in source:
if i not in idx_dict:
idx_dict[i] = len(idx_dict)
return idx_dict
|
0c8de2455b4fa78b4c29b75879c87f1ee2a2de40
| 11,059
|
def homepage():
"""List all available api routes."""
return (
f"Welcome to Hawaii - Climate Page<br/>"
f"<br/>"
f"This site has data from 01-01-2010 to 08-23-2017<br/>"
f"<br/>"
f"Available Pages:<br/>"
f"<br/>"
f"<br/>"
f" Station Information<br/>"
f" /api/v1.0/stations<br/>"
f"<br/>"
f" Percipitation Information<br/>"
f" /api/v1.0/percipitation<br/>"
f"<br/>"
f" Temperature Observations<br/>"
f" /api/v1.0/tobs<br/>"
f"<br/>"
f" Start Date information - complete url is '/api/v1.0//yyyy-mm-dd'<br/>"
f" /api/v1.0/start<br/>"
f"<br/>"
f" Start and End Date information - complete url is '/api/v1.0/yyyy-mm-dd/yyyy-mm-dd'<br/>"
f" /api/v1.0/start/end"
)
|
5c781d229d475ab4253b21c8f5002a020928fae9
| 11,060
|
def chomp_empty(seq):
"""Return slice of sequence seq without trailing empty tuples."""
n = len(seq)
while (n > 0) and seq[n - 1] == ():
n -= 1
return seq[:n]
|
1c6f5f58bb2e73d44b2638d796b1b0a38bba414c
| 11,061
|
import os
def builtin_name(path):
"""Return the builtin function named in the path."""
name = os.path.basename(path).strip()
if name.startswith('<') and name.endswith('>'):
return name
return None
|
6dc701e193eb721957b538dc5bfae311ab46d4d8
| 11,062
|
def _delta(x, y):
"""Computes |y|/|x|."""
return max(float(len(y))/float(len(x)), 1.0)
|
22a55950406daeb3e7653d9a0a232d52a7bd76e4
| 11,064
|
def get_token_time(token_index, sentence, duration):
"""
Linearly interpolate to guess the time a token was utterred
"""
return (token_index + 1) / max(1, len(sentence)) * duration
|
d227e782083ae6265e1bc4e286269c421e809985
| 11,065
|
def forward_method_kwargs(**kwargs) -> dict:
"""Return all the keyword-arguments of a method, excluding the 'self' argument"""
retval = {}
for key, value in kwargs.items():
if key == 'self' or key.startswith('_'):
continue
elif key == 'kwargs':
retval.update(value)
else:
retval[key] = value
return retval
|
571ad0c61f33e608ce253c16f452e488257cbf31
| 11,067
|
def _get_keywords_with_score(extracted_lemmas, lemma_to_word):
"""Get words of `extracted_lemmas` and its scores, words contains in `lemma_to_word`.
Parameters
----------
extracted_lemmas : list of (float, str)
Given lemmas with scores
lemma_to_word : dict
Lemmas and corresponding words.
Returns
-------
dict
Keywords as keys and its scores as values.
"""
keywords = {}
for score, lemma in extracted_lemmas:
keyword_list = lemma_to_word[lemma]
for keyword in keyword_list:
keywords[keyword] = score
return keywords
|
385a54878e508a56ab772fec31f4ff9838048492
| 11,068
|
import requests
def export_highlights_to_readwise(readwise_token, highlight_objects):
""" Submits highlights to Readwise via their API and return the API response.
The function takes 2 parameters:
- readwise_token: the token used for accessing the user's Readwise highlights through the Readwise API.
- highlight_objects: a list of json objects containing the highlights details.
"""
print('Uploading highlights to Readwise...')
api_response = requests.post(
url='https://readwise.io/api/v2/highlights/',
headers={'Authorization': 'Token ' + readwise_token},
json={
'highlights': highlight_objects
}
)
if api_response.status_code != 200:
print("Error: Upload of highlights to Readwise failed with status code " +
str(api_response.status_code) + '. Please check that the provided Readwise token is correct.')
return api_response
return api_response
|
c6b2928aa488fb62d40470e441232ed67d61a020
| 11,070
|
def h(p1, p2):
"""Heuristic function"""
x1, y1 = p1
x2, y2 = p2
return abs(x1 - x2) + abs(y1 - y2)
|
0d7522a3f92ae851bfa9b1a316a1fa97baccf98f
| 11,072
|
import torch
def pdist(X, Y):
""" Computes all the pairwise distances
Parameters
----------
X : torch.tensor
shape [n, d]
Y : torch.tensor
shape [m, d]
Returns
-------
torch.tensor
shape [n, m] of all pairwise distances
"""
n, m = X.shape[0], Y.shape[0]
X_norm2 = (X ** 2).sum(1)
Y_norm2 = (Y ** 2).sum(1)
X_dot_Y = X @ Y.T
return (
X_norm2.unsqueeze(1) @ torch.ones((1, m), device=X.device)
- 2 * X_dot_Y
+ torch.ones((n, 1), device=Y.device) @ Y_norm2.unsqueeze(0)
)
|
10fce38b390211999344bd1b0daf0a4484aee2a8
| 11,073
|
def detab(self, elem="", lab="", v1="", v2="", v3="", v4="", v5="", v6="",
**kwargs):
"""Modifies element table results in the database.
APDL Command: DETAB
Parameters
----------
elem
Element for which results are to be modified. If ALL, modify all
selected elements [ESEL] results. If ELEM = P, graphical picking
is enabled and all remaining command fields are ignored (valid only
in the GUI). A component name may also be substituted for ELEM.
lab
Label identifying results. Valid labels are as defined with the
ETABLE command. Issue ETABLE,STAT to display labels and values.
v1
Value assigned to this element table result in the database. If
zero, a zero value will be assigned. If blank, value remains
unchanged.
v2, v3, v4, . . . , v6
Additional values (if any) assigned to consecutive element table
columns.
Notes
-----
Modifies element table [ETABLE] results in the database. For example,
DETAB,35,ABC,1000,2000,1000 assigns 1000, 2000, and 1000 to the first
three table columns starting with label ABC for element 35. Use the
PRETAB command to list the current results. After deleting a column of
data using ETABLE,Lab,ERASE, the remaining columns of data are not
shifted to compress the empty slot. Therefore, the user must allocate
null (blank) values for V1, V2...V6 for any ETABLE entries which have
been deleted by issuing ETABLE,Lab,ERASE. All data are stored in the
solution coordinate system but will be displayed in the results
coordinate system [RSYS].
"""
command = f"DETAB,{elem},{lab},{v1},{v2},{v3},{v4},{v5},{v6}"
return self.run(command, **kwargs)
|
01f13f7e971c2c24e291712a7e634838e2e2ac5a
| 11,074
|
def convert(s):
"""
Convert a probability string to a float number.
:param s: probability string.
:return: a float probability.
"""
try:
return float(s)
except ValueError:
num, denom = s.split('/')
return float(num) / float(denom)
|
84333f21edfdcb2f3c917f23c23b9d399c5f6e56
| 11,075
|
def git_log_file(file_name):
"""要求文件路径是相对于git仓库更目录到文件完整路径"""
cmd = "git log -1 %s |grep Date" % file_name
return cmd
|
e7d0481d4ee7e1a68492df07ed9c35e95a87ab5c
| 11,076
|
def count_cigar_ops(cigar):
"""
For curious people: regexes are very slow for parsing CIGAR strings.
cigar: Unicode
"""
b = 0
num_m, num_i, num_d = 0, 0, 0
for i in range(len(cigar)):
if cigar[i] <= '9':
continue
# Check if there are no digits before the op char.
assert(b < i)
count = int(cigar[b:i])
op = cigar[i]
b = i + 1
if op == 'D':
num_d += count
elif op == 'I':
num_i += count
elif op in ['M', '=', 'X']:
num_m += count
else: # pragma: no cover
pass # pragma: no cover
# Check if there are dangling ops.
assert(b == len(cigar))
total_len = num_d + num_i + num_m
return num_m, num_i, num_d, total_len
|
7efacce3a9b13249718ae140e79a5800b8682ff5
| 11,078
|
def get_dwtype_floor_area(dwtype_floorarea_by, dwtype_floorarea_ey, sim_param):
"""Calculates the floor area per dwelling type for every year
Parameters
----------
dwtype_distr_by : dict
Distribution of dwelling types base year
assump_dwtype_distr_ey : dict
Distribution of dwelling types end year
sim_param : list
Simulation parameters
Returns
-------
dwtype_floor_area : dict
Contains the floor area change per dwelling type
Note
-----
- A linear change over time is assumed
Example
-------
out = {year: {'dwtype': 0.3}}
"""
dwtype_floor_area = {}
for curr_yr in sim_param['sim_period']:
nr_sim_yrs = curr_yr - sim_param['base_yr']
if curr_yr == sim_param['base_yr']:
y_distr = dwtype_floorarea_by
else:
y_distr = {}
for dwtype in dwtype_floorarea_by:
val_by = dwtype_floorarea_by[dwtype]
val_ey = dwtype_floorarea_ey[dwtype]
diff_val = val_ey - val_by
# Calculate linear difference up to sim_yr
diff_y = diff_val / sim_param['sim_period_yrs']
y_distr[dwtype] = val_by + (diff_y * nr_sim_yrs)
dwtype_floor_area[curr_yr] = y_distr
return dwtype_floor_area
|
3b87386bbf5bf051aec5d43ebd47f3359885c8b4
| 11,079
|
def look_and_say(n):
"""Returns the nth term of the "look and say" sequence, which is defined as follows: beginning
with the term 1, each subsequent term visually describes the digits appearing in the previous term.
The first few terms are as follows:
1
11
21
1211
111221
Ex: the fourth term is 1211, since the third term consists of one 2 and one 1.
"""
assert n, "There is no zeroth term."
if n == 1:
return 1
else:
s = str(look_and_say(n-1))
res = ''
i = 0
while i in range(len(s)):
count = 1
num = s[i]
while i in range(len(s)-1) and s[i] == s[i+1]:
count += 1
i += 1
res += str(count) + num
i += 1
return int(res)
|
5cb341d0bce8bc363b2b734e9da66a07b7da2434
| 11,081
|
def compound_alternative_forms(forms, sec_pos, forms_ind_or_con, forms_imp):
"""
compuond all alternative forms into a set
:return:
"""
if not forms:
if forms_imp:
forms = {sec_pos: forms_ind_or_con, 'imp': forms_imp}
else:
forms = {sec_pos: forms_ind_or_con}
else:
for pos in forms:
for number in forms[pos]:
for person in forms[pos][number]:
old = forms[pos][number][person]
try:
new = forms_ind_or_con[number][person]
except KeyError:
new = []
new.extend(old)
forms[pos][number][person] = new
if forms_ind_or_con == 'modal':
return forms
if forms_imp == 'modal':
forms_imp = None
for pos in forms:
for number in forms[pos]:
for person in forms[pos][number]:
old = forms[pos][number][person]
new = set(old)
forms[pos][number][person] = new
return forms
|
9b81aea6ef9edbe71560d2ca1d001a3971928c00
| 11,082
|
def get_text_ls(filename):
"""Returns text of file as a list of strings"""
with open(filename, 'r') as f_in:
return f_in.readlines()
|
5b788f38e683c82648224b7cc1b5875a0f602dce
| 11,083
|
def get_title(soup):
"""Given a soup, pick out a title"""
if soup.title:
return soup.title.string
if soup.h1:
return soup.h1.string
return ''
|
bcec80e0c6e5163ed90a4975b81a5c5a3d418132
| 11,086
|
def add_motif(genome, motif, index):
"""
Replace from the index to index+len(motif) in the genome
with the motif selected
"""
try:
# If this can be done is because index is an integer
index = index-len(motif)
return genome[:index]+motif+genome[index+len(motif):]
except:
# If we arrive here is becuse index is a list
for i in index:
i = i-len(motif)
genome=genome[:i]+motif+genome[i+len(motif):]
return genome
|
b4fa3f957fc440a837a0f65fd431f1fc9119f077
| 11,087
|
def featurizeDoc(doc):
"""
get key : value of metadata
"""
doc_features = []
for key in doc:
doc_features.append("{0}: {1}".format(key, doc[key]))
return doc_features
|
7204fe1ded828112e69c120c944a5a16cc00f628
| 11,089
|
def f(f_prob):
"""Deterministic forecast matching observation o."""
return f_prob.isel(member=0, drop=True)
|
4163e9a40916bd6c1dbf00e6ed635cf2b78bcfc7
| 11,090
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.