content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def sort_quick(data):
"""ๅฟซ้ๆๅบใ
้ๆฉ็ฌฌไธไธชๅ
็ด ไธบไธปๅ
`pivot`๏ผๅ
ถไฝๅ
็ด ่ฅๅฐไบ `pivot`๏ผๅๆพๅ
ฅ `left_list`๏ผๅฆๅๆพๅ
ฅ
`right_list`ใ้ๅฝๅฐๅฏนๅญๅ่กจ่ฐ็จ `sort_quick`๏ผ็ด่ณๆๅบๅฎๆใ
"""
if len(data) <= 1:
return data
pivot = data[0]
left_list = []
right_list = []
for i in data[1:]: # `data` ไธญ่ณๅฐๆ 2 ไธชๅ
็ด
if i < pivot:
left_list.append(i)
else:
right_list.append(i)
return sort_quick(left_list) + [pivot] + sort_quick(right_list)
|
edf1aabddc992aa04e0db6631011a498f7aa65be
| 697,082
|
import cProfile
import io
import pstats
def profile(func):
"""
Decorator to profile functions with cProfile
Args:
func: python function
Returns:
profile report
References:
https://osf.io/upav8/
"""
def inner(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
res = func(*args, **kwargs)
pr.disable()
s = io.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats()
print(s.getvalue())
return res
return inner
|
081c544261bded127bb59743a6ba9da38997a4eb
| 697,083
|
def model_to_vector(model, emb_layer_name='input_emb'):
"""
get the wordvec weight
:param model:
:param emb_layer_name:
:return:
"""
sd = model.state_dict()
return sd[emb_layer_name + '.weight'].cpu().numpy().tolist()
|
65f1a70022a9d1077fe959f8dbfd7504d66be6c6
| 697,084
|
import torch
def finalize(s0, s1, s2):
"""Concatenate scattering of different orders.
Parameters
----------
s0 : tensor
Tensor which contains the zeroth order scattering coefficents.
s1 : tensor
Tensor which contains the first order scattering coefficents.
s2 : tensor
Tensor which contains the second order scattering coefficents.
Returns
-------
s : tensor
Final output. Scattering transform.
"""
if len(s2)>0:
return torch.cat([torch.cat(s0, -2), torch.cat(s1, -2), torch.cat(s2, -2)], -2)
else:
return torch.cat([torch.cat(s0, -2), torch.cat(s1, -2)], -2)
|
f9c1bd7f9072b7e34c5e90ca1c245a98b9d7bf8c
| 697,085
|
def quarter_for_month(month):
"""return quarter for month"""
if month not in range(1, 13):
raise ValueError('invalid month')
return {1: 1, 2: 1, 3: 1,
4: 2, 5: 2, 6: 2,
7: 3, 8: 3, 9: 3,
10: 4, 11: 4, 12: 4}[month]
# return (month + 2) // 3
|
559bb2d194753efa3c2ee18aee3040dba05a4f85
| 697,086
|
import os
import csv
def load_samples(dataset_name, output_dir):
"""
Returns {sample_id: [feature,]} for the named dataset.
"""
samples_dir = os.path.join(output_dir, 'samples')
assert os.path.exists(samples_dir)
file_path = os.path.join(samples_dir, dataset_name +'.tsv')
assert os.path.exists(file_path)
samples = {}
with open(file_path) as f:
reader = csv.reader(f, delimiter='\t')
next(reader)
for line in reader:
samples[line[0]] = line[1:7]
assert len(samples[line[0]]) == 6
return samples
|
1bb74346897b06d2a5a7b4c180e8a627f0834541
| 697,087
|
def is_equal(x, y, tolerance=0.000001):
"""
Checks if 2 float values are equal withing a given tolerance
:param x: float, first float value to compare
:param y: float, second float value to compare
:param tolerance: float, comparison tolerance
:return: bool
"""
return abs(x - y) < tolerance
|
2773cb526d00149e735853310986b581e1eec1e8
| 697,088
|
import copy
def get_key_from_mission_info(info, key, default, inst=None, mode=None):
"""Get the name of a header key or table column from the mission database.
Many entries in the mission database have default values that can be
altered for specific instruments or observing modes. Here, if there is a
definition for a given instrument and mode, we take that, otherwise we use
the default).
Parameters
----------
info : dict
Nested dictionary containing all the information for a given mission.
It can be nested, e.g. contain some info for a given instrument, and
for each observing mode of that instrument.
key : str
The key to read from the info dictionary
default : object
The default value. It can be of any type, depending on the expected
type for the entry.
Other parameters
----------------
inst : str
Instrument
mode : str
Observing mode
Returns
-------
retval : object
The wanted entry from the info dictionary
Examples
--------
>>> info = {'ecol': 'PI', "A": {"ecol": "BLA"}, "C": {"M1": {"ecol": "X"}}}
>>> get_key_from_mission_info(info, "ecol", "BU", inst="A", mode=None)
'BLA'
>>> get_key_from_mission_info(info, "ecol", "BU", inst="B", mode=None)
'PI'
>>> get_key_from_mission_info(info, "ecol", "BU", inst="A", mode="M1")
'BLA'
>>> get_key_from_mission_info(info, "ecol", "BU", inst="C", mode="M1")
'X'
>>> get_key_from_mission_info(info, "ghghg", "BU", inst="C", mode="M1")
'BU'
"""
filt_info = copy.deepcopy(info)
if inst is not None and inst in filt_info:
filt_info.update(info[inst])
filt_info.pop(inst)
if mode is not None and mode in filt_info:
filt_info.update(info[inst][mode])
filt_info.pop(mode)
if key in filt_info:
return filt_info[key]
return default
|
02dc51526bf0843f2c24d1c51d41791cd722de88
| 697,089
|
from typing import List
def _create_cmd_header(commands: List[str]):
"""
Generate a command header.
Note:
here we always allow to generate HTML as long as we have it between CLICK-WEB comments.
This way the JS frontend can insert it in the correct place in the DOM.
"""
def generate():
yield '<!-- CLICK_WEB START HEADER -->'
yield '<div class="command-line">Executing: {}</div>'.format('/'.join(commands))
yield '<!-- CLICK_WEB END HEADER -->'
# important yield this block as one string so it pushed to client in one go.
# so the whole block can be treated as html.
html_str = '\n'.join(generate())
return html_str
|
54ed275c83a711dcfaaed9cda504afc5b8defdca
| 697,090
|
def get_list():
""" """
coffee_shops = {
'header': [
{
'key': 'name',
'name': 'Name'
},
{
'key': 'star',
'name': 'Star'
},
{
'key': 'sf-location',
'name': 'SF Location'
}
],
'rows': [
{ 'name': 'Ritual Coffee Roasters', 'star': 'โ
โ
โ
โ
โ
', 'sf-location': 'Hayes Valley'},
{ 'name': 'Blue Bottle', 'star': 'โ
โ
โ
โ
โ
', 'sf-location': 'Hayes Valley' },
{ 'name': 'CoffeeShop', 'star': 'โ
โ
โ
', 'sf-location': 'Bernal Heights' },
{ 'name': 'Spike\'s Coffee & Teas', 'star': 'โ
โ
โ
', 'sf-location': 'Castro' },
{ 'name': 'La Boulange', 'star': 'โ
โ
', 'sf-location': 'Cole Valley' },
{ 'name': 'Dynamo Donut and Coffee', 'star': 'โ
โ
โ
โ
โ
', 'sf-location': 'Cow Hollow' },
{ 'name': 'The Mill', 'star': 'โ
โ
โ
โ
', 'sf-location': 'Divisadero' },
{ 'name': 'Piccino Coffee Bar', 'star': 'โ
โ
โ
', 'sf-location': 'Dogpatch' },
{ 'name': 'Philz', 'star': 'โ
โ
โ
', 'sf-location': 'Downtown' },
{ 'name': 'Duboce Park Cafe', 'star': 'โ
โ
', 'sf-location': 'Duboce Triangle' },
{ 'name': 'Blue Bottle', 'star': 'โ
โ
โ
โ
โ
', 'sf-location': 'Embarcadero' },
{ 'name': 'Four Barrel', 'star': 'โ
โ
โ
', 'sf-location': 'Excelsior' },
{ 'name': 'Coffee Bar', 'star': 'โ
โ
โ
โ
โ
', 'sf-location': 'FiDi' },
{ 'name': 'Biscoff Coffee Corner', 'star': 'โ
โ
โ
', 'sf-location': 'Fishermanโs Wharf' },
{ 'name': 'Fifty/Fifty Coffee and Tea', 'star': 'โ
โ
โ
', 'sf-location': 'Inner Richmond' },
{ 'name': 'Beanery', 'star': 'โ
โ
โ
', 'sf-location': 'Inner Sunset' },
{ 'name': 'Cafe du Soleil', 'star': 'โ
โ
', 'sf-location': 'Lower Haight' },
{ 'name': 'Dimmi Tutto Cafe', 'star': 'โ
โ
โ
', 'sf-location': 'North Beach' },
{ 'name': 'Peet\'s', 'star': 'โ
', 'sf-location': 'The Marina' },
{ 'name': 'Sightglass', 'star': 'โ
โ
โ
โ
', 'sf-location': 'The Mission' },
{ 'name': 'Contraband Coffee Bar', 'star': 'โ
โ
โ
โ
', 'sf-location': 'Nob Hill' },
{ 'name': 'Martha & Bros Coffee', 'star': 'โ
โ
โ
', 'sf-location': 'Noe Valley' },
{ 'name': 'Rรฉveille', 'star': 'โ
โ
โ
', 'sf-location': 'North Beach' },
{ 'name': 'Cup Coffee Bar', 'star': 'โ
โ
โ
', 'sf-location': 'Outer Mission' },
{ 'name': 'Garden House Cafe', 'star': 'โ
โ
โ
', 'sf-location': 'Outer Richmond' },
{ 'name': 'Andytown Coffee Roasters', 'star': 'โ
โ
โ
', 'sf-location': 'Outer Sunset' },
{ 'name': 'Jane on Fillmore', 'star': 'โ
โ
', 'sf-location': 'Pacific Heights' },
{ 'name': 'Saint Frank Coffee', 'star': 'โ
โ
โ
', 'sf-location': 'Polk' },
{ 'name': 'Farleyโs', 'star': 'โ
โ
โ
', 'sf-location': 'Potrero Hill' },
{ 'name': 'House of Snacks', 'star': 'โ
โ
โ
', 'sf-location': 'The Presidio' },
{ 'name': 'The Brew', 'star': 'โ
โ
โ
', 'sf-location': 'Russian Hill' },
{ 'name': 'Wicked Grounds', 'star': 'โ
โ
โ
', 'sf-location': 'SOMA' },
{ 'name': 'Starbucks', 'star': 'โ
', 'sf-location': 'Union Square' },
{ 'name': 'Flywheel Coffee Roasters', 'star': 'โ
โ
โ
โ
โ
', 'sf-location': 'Upper Haight' }
]
}
data = dict(success=True, coffee_shops=coffee_shops)
return data
|
48be88339107e7e871da44c80c431c22a6ffe246
| 697,091
|
def use_model_attr(attr):
"""Use the validator set on a separate attribute on the class."""
def use_model_validator(instance, attribute, value):
getattr(instance, attr)(instance, attribute, value)
return use_model_validator
|
7947c13bf413a0d5dbdced110dccb4c1765ecea6
| 697,092
|
def _find_seam_what(paths, end_x):
"""
Parameters
==========
paths: 2-D numpy.array(int64)
Output of cumulative_energy_map. Each element of the matrix is the offset of the index to
the previous pixel in the seam
end_x: int
The x-coordinate of the end of the seam
Returns
=======
1-D numpy.array(int64) with length == height of the image
Each element is the x-coordinate of the pixel to be removed at that y-coordinate. e.g.
[4,4,3,2] means "remove pixels (0,4), (1,4), (2,3), and (3,2)"
"""
height, width = paths.shape[:2]
seam = [end_x]
for i in range(height-1, 0, -1):
cur_x = seam[-1]
offset_of_prev_x = paths[i][cur_x]
seam.append(cur_x + offset_of_prev_x)
seam.reverse()
return seam,sum([paths[r,seam[r]] for r in range(height)])
|
dcff07b965f17ffc2fb5b5607fc88338301ca076
| 697,093
|
def read_scalar(group, dataset_name):
"""
Read a HDF5 `SCALAR` as a dict.
All attributes will be assigned as key: value pairs, and the
scalar value will be assigned the key name 'value'.
:param group:
A h5py `Group` or `File` object from which to write the
dataset to.
:param dataset_name:
A `str` containing the pathname of the dataset location.
:return:
A `dict` containing the SCALAR value as well as any
attributes coupled with the SCALAR dataset.
"""
dataset = group[dataset_name]
data = {k: v for k, v in dataset.attrs.items()}
data["value"] = dataset[()]
return data
|
6d9908e64f6584d0128756778679f87ffc8cb46f
| 697,094
|
def ma(error, params, offset):
""" Calculate the moving average part.
:param error: list of error terms
:param params: list of coefficients
:param offset: index of last predicted value
:return: float
"""
return sum([params[i] * error[offset - i] for i in range(len(params))])
|
5ac853fca6a57dd4f349e79445b75b29f62ccbd7
| 697,095
|
import os
def get_local_path(fname: str) -> str:
"""Returns relative path for the local file
Args:
fname (str): path of the file. Generally __file__ is passed which
contains path form where function is directly or indirectly invoked
Returns:
str: relative path
"""
return os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(fname)))
|
04787b8bb972971ab8aa3647d7c72d16d0e3467a
| 697,096
|
import torch
def my_sample_gumbel(shape, eps=1e-20):
"""Samples arbitrary-shaped standard gumbel variables.
Args:
shape: list of integers
eps: float, for numerical stability
Returns:
A sample of standard Gumbel random variables
"""
#Sample from Gumbel(0, 1)
U = torch.rand(shape).float()
return -torch.log(eps - torch.log(U + eps))
|
21e471bf5fca80316d93ec2a96230471ddc83a45
| 697,097
|
def _get_package_dict1(reqs: str) -> dict:
"""Helper to parse requirements str into a dict of
(package, version) k, v pairs
"""
return dict(line.split("==") for line in reqs.strip().splitlines())
|
8b851a871a2e2ef9d9c984831393bf30b29d7a02
| 697,098
|
def lowercase(lista):
"""Function to lowercase list of texts
Args:
lista ([list]): list of texts
Returns:
[list]: List of texts lowercased
"""
return [text.lower() for text in lista]
|
2be877aa3b80c5e01eb4237625b426123d5b9976
| 697,099
|
import re
def get_text_between_parenthesis(string):
"""
Return a list with all the found substrings between parenthesis.
If the strings doesn't contains parenthesis the function return an empty list
:param string: a string
:return: a list
"""
try:
text_list = re.findall('\(([\d\w\s-]+)\)', string)
except TypeError as e:
text_list = []
return text_list
|
cce6abdb5618474848d9cff0f0ace1fab01f5b84
| 697,101
|
def get_elevated_session_input(response):
"""Create input for get_elevated_session."""
return {
'aws_access_key_id': response['Credentials']['AccessKeyId'],
'aws_secret_access_key': response['Credentials']['SecretAccessKey'],
'aws_session_token': response['Credentials']['SessionToken']
}
|
cede7d9a695953b8b0f57c6b58e5c69bf84d7887
| 697,102
|
def reverse_array(arr):
"""
Reverse an array along all axes, so arr[i,j] -> arr[-(i+1),-(j+1)].
"""
reverse_slice = [slice(None, None, -1) for ii in arr.shape]
return arr[reverse_slice]
|
659e4aea2f62287aeef6d3d4d80b3c58f937ace2
| 697,104
|
from functools import reduce
def split_camel_case(text) -> list:
"""Splits words from CamelCase text."""
return list(reduce(
lambda a, b: (a + [b] if b.isupper() else a[:-1] + [a[-1] + b]),
text,
[]
))
|
5a6f2fcfcdb378295554cc38a88679e054341068
| 697,105
|
def init_ground_station():
"""initialize the configurations for the ground station
author: zack
step 1: figure out all the commands we need to do (on paper)
step 2: after write_to_ground_station is complete then write out code"""
return 0
|
d8b4e776c00f8714aaeccb5cded71c3103ace719
| 697,106
|
def pageurl_template(category, pagenum, date):
"""
Function to return formatted URL given category, page number, date
"""
assert len(str(date)) == 8, 'Invalid input date format'
return f"https://news.daum.net/breakingnews/{category}?page={pagenum}®Date={date}"
|
e6a7c9c86c16cd775919588b918c6d49b8d3a5bd
| 697,107
|
def parse_transaction_weights(config_root):
"""
Parse the transaction types and weights from the XML configuration.
Parameters
----------
config_root : xml.Element
The root of the XML config file.
Returns
-------
transaction_weights : [dict]
An array of dictionaries formatted as
{"name": str(transaction_name), weight: int(value)}
that corresponds to the transaction types and weights.
Raises
-------
KeyError
If works.work.weights or transactiontype is not a key in the XML file.
RuntimeError
If there there are a different number of transaction types and weights.
"""
try:
transaction_types = \
config_root.find('transactiontypes').findall('transactiontype')
weights_values = \
config_root.find('works').find('work').findall('weights')
except:
raise KeyError(
"Couldn't parse the config file for txn types and weights.")
if len(transaction_types) != len(weights_values):
raise RuntimeError("Mismatched number of txn types and weights.")
weights = []
for txn_type, weight in zip(transaction_types, weights_values):
txn_name = txn_type.find('name').text
weights.append({'name': txn_name, 'weight': int(weight.text)})
return weights
|
085a963d95d7e14fcaf688317374f33373dc6c0d
| 697,109
|
def unmangle(cls, name):
"""Given a name mangled using cls unmangles it.
Undefined output (probably an empty string or an error) if it is not a
mangled name or wasn't mangled using cls.
"""
return name[3+len(cls.__name__):]
|
2c29eb31d56043e2174b45f07161dcf9885c7007
| 697,110
|
def cumsum(vals):
"""Return a cumalative sum of vals (as a list)."""
lst = []
tot = 0
for v in vals:
tot += v
lst.append(tot)
return lst
|
e9ea4065fa7a044ae738e33dbd855cd1948e8c45
| 697,111
|
def subtract_and_increment(a, b):
""""
Return a minus b, plus 1
"""
c = a - b + 1
return c
|
8307b0c5a58571bcaa34909aca90194781ebaddc
| 697,112
|
import os
def walk_files(path):
"""
Returns all files within a directory recursively
- If path is a file, returns a singleton list containing only path
"""
if not os.path.isdir(path):
return [path]
paths = []
for dirpath, _, files in os.walk(path):
for f in files:
paths.append(dirpath + '/' + f)
return paths
|
e76f49a05a5e60d14893a5d377e939e4e7067fdb
| 697,113
|
import requests
def send_wechat(title, content):
"""Send Message."""
payload = {
"summary": title,
"content": content,
"appToken": "AT_xx",
"contentType": 1,
"topicIds": [1411],
"url": "https://live.bilibili.com/34027"
}
url = 'http://wxpusher.zjiecode.com/api/send/message'
return requests.post(url, json=payload).json()
|
c783d4ecaea84bcf7fb9b1cfc275550d4012ccc2
| 697,114
|
def count_measurements(report_uuid: str, database) -> int:
"""Return the number of measurements."""
return database.measurements.count_documents(filter={"report_uuid": report_uuid})
|
c9f20149ec975134ad7f0abab28a2c632f5295a1
| 697,115
|
import os
import glob
import logging
def check_required_files(dir_to_validate):
"""Check if required files exists."""
REQUIRED_FILES = ["01-*.md",
"discussion.md",
"index.md",
"instructors.md",
"LICENSE.md",
"motivation.md",
"README.md",
"reference.md"]
valid = True
for required in REQUIRED_FILES:
req_fn = os.path.join(dir_to_validate, required)
if not glob.glob(req_fn):
logging.error(
"Missing file {0}.".format(required))
valid = False
return valid
|
ec1345cb749f0b7c89881a357b4a016d277cbdf1
| 697,116
|
def normalize_letters(one_letter_code) :
"""Convert RAF one-letter amino acid codes into IUPAC standard codes.
Letters are uppercased, and "." ("Unknown") is converted to "X".
"""
if one_letter_code == '.' :
return 'X'
else :
return one_letter_code.upper()
|
ad1d80a4663859194f84a31dc03001481b477503
| 697,117
|
def is_original_process_func(clsdict, bases, base_class=None):
"""Only wrap the original `process` function.
Without these (minimal) checks, the `process` function would be
wrapped at least twice (the original `process` function from the
user's DoFn, and our wrapped/decorated one), essentially causing
any call to `process` (and the decorator) to be called at least
twice.
Args:
clsdict (dict): dictionary of items for the class being
instantiated.
bases (tuple(class)): base class(es) of the class being
instantiated.
Returns:
(bool) whether or not to wrap the `process` method of the class
being instantiated.
"""
if "process" not in clsdict:
return False
# ignore classes that don't inherit from our base class
base_cls_names = [b.__name__ for b in bases]
if base_class and base_class not in base_cls_names:
return False
# if the value of clsdict["process"] is not a meth/func
if not callable(clsdict["process"]):
return False
# if the value of clsdict["process"] is already "new_process"
if getattr(clsdict["process"], "__name__") != "process":
return False
return True
|
17598fa632fc4dc9c8e52613325e6448b3064559
| 697,119
|
def int32(buffer, byte):
"""Retorna o valor inteiro de 4 bytes da posiรงรฃo byte, contido em buffer"""
# return int(buffer[(byte-1):(byte-1)+4].encode('hex'), 16)
return int(buffer[(byte - 1):(byte - 1) + 4].hex(), 16)
|
5d038fc68990297f691f1ed744d4e086d6742f3e
| 697,121
|
def parse_identifiers(identifiers):
""" Reads identifiers, which may be a string or list/tuple/set of objects
instances with name instances as string, returning a frozen set of names.
"""
if isinstance(identifiers, str):
return frozenset(identifiers.split(','))
if not isinstance(identifiers, (list, tuple, set)):
identifiers = identifiers,
keys = list(identifiers)
for i, key in enumerate(keys):
if not isinstance(key, str):
assert hasattr(key, 'name'), \
"Each element in hashable tuple must be a string or have name attribute"
key_name = key.name
assert isinstance(key_name, str), \
"Each non-string hashable tuple element must a string name attribute"
keys[i] = key_name
return frozenset(keys)
|
285a12352607ecc6b4a0c9ceb6f0ca7e2f56988e
| 697,122
|
def dhash_hamming_distance(dhash1, dhash2):
"""
Calculate the hamming distance between two dhash values
:param dhash1: str, the dhash of an image returned by `calculate_dhash`
:param dhash2: str, the dhash of an image returned by `calculate_dhash`
:return: int, the hamming distance between two dhash values
"""
difference = (int(dhash1, 16)) ^ (int(dhash2, 16))
return bin(difference).count("1")
|
14c5f44750e008edea59cef9d1950cb24d959630
| 697,123
|
def has_metadata(trait, metadata, value=None, recursive=True):
"""
Checks if a given trait has a metadata (and optionally if it is set to particular value)
"""
count = 0
if (
hasattr(trait, "_metadata")
and metadata in list(trait._metadata.keys())
and (trait._metadata[metadata] == value or value is None)
):
count += 1
if recursive:
if hasattr(trait, "inner_traits"):
for inner_trait in trait.inner_traits():
count += has_metadata(inner_trait.trait_type, metadata, recursive)
if hasattr(trait, "handlers") and trait.handlers is not None:
for handler in trait.handlers:
count += has_metadata(handler, metadata, recursive)
return count > 0
|
9318bd6705ceb6ed5f2c9401d77bdf3dc74103dd
| 697,124
|
def csc_cumsum_i(p, c, n):
"""
p [0..n] = cumulative sum of c [0..n-1], and then copy p [0..n-1] into c
@param p: size n+1, cumulative sum of c
@param c: size n, overwritten with p [0..n-1] on output
@param n: length of c
@return: sum (c), null on error
"""
nz = 0
nz2 = 0.0
for i in range(n):
p[i] = nz
nz += c[i]
nz2 += c[i] # also in double to avoid CS_INT overflow
c[i] = p[i] # also copy p[0..n-1] back into c[0..n-1]
p[n] = nz
return int(nz2) # return sum (c [0..n-1])
|
ab567b6b357fc7e5e1b0a961b9b66d88487a0e7f
| 697,125
|
def boolean_flag(name, configurable, set_help='', unset_help=''):
"""Helper for building basic --trait, --no-trait flags.
Parameters
----------
name : str
The name of the flag.
configurable : str
The 'Class.trait' string of the trait to be set/unset with the flag
set_help : unicode
help string for --name flag
unset_help : unicode
help string for --no-name flag
Returns
-------
cfg : dict
A dict with two keys: 'name', and 'no-name', for setting and unsetting
the trait, respectively.
"""
# default helpstrings
set_help = set_help or "set %s=True" % configurable
unset_help = unset_help or "set %s=False" % configurable
cls, trait = configurable.split('.')
setter = {cls: {trait: True}}
unsetter = {cls: {trait: False}}
return {name: (setter, set_help), 'no-' + name: (unsetter, unset_help)}
|
dabb654a75123a79b865c1570e6ed74568c5ae41
| 697,127
|
import pathlib
import argparse
def _parse_args():
"""Define and parse the command line arguments.
Args:
None
Return:
argparse.Namespace Command line arguments specified by the user.
"""
BASE_PATH = pathlib.Path(__file__).parent.absolute().parents[0]
CONFIG_PATH = pathlib.Path.joinpath(BASE_PATH, "config", "default.yaml")
parser = argparse.ArgumentParser(description="LIT - labeling iteratively")
parser.add_argument(
"-m",
"--model",
default=None,
help="The initial model to be used throughout the process. Defaulting to 'None'.",
)
parser.add_argument(
"-t",
"--train",
default=None,
help="Path to the data that should be used for training the model. Defaulting to 'None'.",
)
parser.add_argument(
"-u",
"--unlabeled",
default=None,
help="Path to the so far unlabeled data. Defaulting to 'None'.",
)
parser.add_argument(
"-c",
"--config",
default=str(CONFIG_PATH),
help="Path to the config file. Defaulting to {c}".format(c=str(CONFIG_PATH)),
)
parser.add_argument(
"-e",
"--example",
default=None,
help="If this flas is set, instead run the examples. Defaulting to 'None'.",
)
return parser.parse_args()
|
3accb68e0cb23cba18801a99ccf8f5a8afd090cf
| 697,128
|
from typing import Callable
def recursive_map(func: Callable, obj: object) -> object:
"""
Map `func` recursively, which affects all items in lists
and all values in dictionaries.
"""
if isinstance(obj, dict):
return {k: recursive_map(func, v) for (k, v) in obj.items()}
if isinstance(obj, list):
return [recursive_map(func, v) for v in obj]
return func(obj)
|
a7638f68ec7c1d4f57418fba03d4ae251cb5e9f0
| 697,129
|
def get_all_users(conn):
"""Get all users from table"""
cursor = conn.cursor()
cursor.execute('SELECT * FROM users')
data = cursor.fetchall()
return data
|
aedbca17a40912f37843b8ef48a6abbe7db9b980
| 697,130
|
def PassN(s,substring,iFrom,iN):
"""Find index after passing substring N times.
"""
while iN>0:
iIndex= s.find(substring,iFrom)
if iIndex==-1: return -1
else: iFrom=iIndex+1
iN-=1
return iFrom
|
1c98cb95ae95ffcb510dd38b1a6b7300f10448e7
| 697,131
|
def LevenshteinCost(a, b):
"""Cost function for Levenshtein distance with substitutions.
Cost function for what is now thought of as the classical
Levenshtein distance, which is the minimum number of insertions,
deletions, and substitutions required to edit one sequence into
another. Returns zero for matches, unity for indels and
substitutions.
Args:
a: input symbol, or None for insertions
b: output symbol, or None for deletions
Returns:
0 for exact match
1 for mismatch / substitution
1 for insertion or deletion
"""
if a == b:
return 0
else:
return 1
|
8d82b0003d1fa00720270c33cfd4a119dd5f00c7
| 697,132
|
def str_to_dict(headers):
"""
ๅฐ"Host: mp.weixin.qq.com"ๆ ผๅผ็ๅญ็ฌฆไธฒ่ฝฌๆขๆๅญๅ
ธ็ฑปๅ
่ฝฌๆขๆๅญๅ
ธ็ฑปๅ
:param headers: str
:return: dict
"""
headers = headers.split("\n")
d_headers = dict()
for h in headers:
h = h.strip()
if h:
k, v = h.split(":", 1)
d_headers[k] = v.strip()
return d_headers
|
62d4b3f5a2fbeb42428bdc138a3db0b87d1873b9
| 697,134
|
def array2SpecMca(data):
"""Write a python array into a Spec array.
Return the string containing the Spec array
"""
tmpstr = "@A "
length = len(data)
for idx in range(0, length, 16):
if idx + 15 < length:
for i in range(0, 16):
tmpstr += "%.8g " % data[idx + i]
if idx + 16 != length:
tmpstr += "\\"
else:
for i in range(idx, length):
tmpstr += "%.8g " % data[i]
tmpstr += "\n"
return tmpstr
|
fa4934ab38fab9f6f07369877956214041c34f2c
| 697,135
|
import requests
def get_repo_version():
"""
Get the current version on GitHub
"""
url = 'https://raw.githubusercontent.com/aquatix/ns-notifications/master/VERSION'
try:
response = requests.get(url)
if response.status_code != 404:
return response.text.replace('\n', '')
except requests.exceptions.ConnectionError:
#return -1
return None
return None
|
523e8b79f4621406f42810a03237094fd334c412
| 697,137
|
def floor_division(dividend: int, divisor: int):
"""Describes and gives an example of how floor division and the "//" symbols are used in Python.
Examples:
>>> floor_division(15, 6)\n
Floor division returns the nearest whole number as the quotient, rounding down when there is a remainder.\n
dividend // divisor\n
15 // 6\n
2
Args:
dividend (int): Supply a dividend
divisor (int): Supply a divisor
Returns:
int: Returns the quotient of the floor division operation
"""
print(
"Floor division returns the nearest whole number as the quotient, rounding down when there is a remainder."
)
print(" dividend // divisor ")
print(f" {dividend} // {divisor}")
print("")
return dividend // divisor
|
962bb32f4f6ad28819a8178e9e70824ae91fd514
| 697,138
|
def pedersenOpen(n,g,h,m,r,c):
"""Open a pedersen commit. Arguments:
n modulus (i.e. Z*_n)
g generator 1
h generator 2
m message
r random
c commit generated by pedersenCommit() to verify"""
if c == g**m*h**r % n:
return True
else:
return False
|
4657cd68214566f4e2e8231b61067963478af9a1
| 697,139
|
def extend_indices(segments, margin):
"""
Decrease and increase the values of the first and last elements
respectively in each list of segments by a given margin.
The first indice of the first segment and the last indice of the
last segments are not modified.
Parameters
----------
segments : list
The list of lists of first and last indices of segments.
margin : int
The extra extend to add on each side of segments.
Example
-------
>>> segments = split_range(16, 4)
>>> extend_indices(segments, margin=1)
[[0, 5], [3, 9], [7, 13], [11, 16]]
"""
if len(segments) == 1:
return segments
else:
# first process the first and last segments
segments[0][-1] += margin
segments[-1][0] -= margin
# if there are more than 2 segments
for i in range(len(segments))[1:-1]:
segments[i][0] -= margin
segments[i][-1] += margin
return segments
|
34da9377af9342a811fb40bdadcadce174fcb605
| 697,140
|
def _set_delta_rbound(d, size):
"""Truncate the given delta to the given size
:param size: size relative to our target offset, may not be 0, must be smaller or equal
to our size
:return: d"""
d.ts = size
# NOTE: data is truncated automatically when applying the delta
# MUST NOT DO THIS HERE
return d
|
d756bcbcb0cadcee02989a2ab417e35dc9d851c7
| 697,141
|
import os
def _get_filepath(in_dir, basename):
"""Find correct name of file (extend basename with timestamp)."""
all_files = [
f for f in os.listdir(in_dir)
if os.path.isfile(os.path.join(in_dir, f))
]
for filename in all_files:
if filename.endswith(basename):
return os.path.join(in_dir, filename)
raise OSError(
f"Cannot find input file ending with '{basename}' in '{in_dir}'")
|
8ee2800a2686fb08d1b99cd236b6d7829b804f81
| 697,142
|
def z_norm(result):
""" here we get mean,standard deviation and returns the same """
result_mean = result.mean() #.mean() is a predifined func which finds the mean
result_std = result.std() # .std() is a predefined func which finds the standard deviation
result -= result_mean # result = result- result mean
result /= result_std # result = result/result std
return result, result_mean
|
6117bd15f9b5f7d0634366e36c0c8e2ef90d7cdd
| 697,143
|
import multiprocessing
import hashlib
import subprocess
import os
def system_parallel(cmdL, nproc=None, verbose=True):
"""
Run a list of commands (each a string) via the shell using GNU parallel with nproc processes, return all outputs in a single str instance.
"""
if nproc is None:
nproc = multiprocessing.cpu_count()
sh_filename = '_run_parallel_' + hashlib.md5('\n'.join(cmdL).encode('utf-8')).hexdigest()
with open(sh_filename, 'wt') as f:
f.write('\n'.join(cmdL))
out = subprocess.check_output('parallel -j%d %s--keep-order < %s' % (nproc, '--verbose ' if verbose else '', sh_filename), shell=True)
out = out.decode('utf-8')
if verbose:
print('-'*80)
print('system_parallel output:')
print('-'*80)
print(out)
os.remove(sh_filename)
return out
|
d456cadf20bc6f9435a516649282770fa6ff630e
| 697,144
|
def greet(name, /, greeting="Hello"):
"""
the slash '/' is placed between 'name' and 'greeting' to signify that
'name' is a positional-only argument while 'greeting' is a regular
argument that can be passed either by position or keyword
"""
return f"{greeting}, {name}"
|
4fd1ff5620a245535f3c701f1ce452780804faaa
| 697,145
|
def get_longest_soft_clipped_bases(read):
"""Function that takes as input the cigar string and returns a dictionary containing the longest soft-clipped part of
the read, the quality values and the read mapping quality"""
read_cigar = read.cigar
#get index of the soft-clipped in the cigar
match_index = [x for x in range(len(read_cigar)) if read_cigar[x][0] == 4]
# soft-clipped in only one side
if len(match_index) == 1:
#return first n soft-clipped
if match_index == [0]:
return{'seq': read.seq[0:read_cigar[0][1]],'qual': read.query_qualities[0:read_cigar[0][1]],'mapq':read.mapq}
#return last n nucleotides
elif match_index[0] == (len(read_cigar)-1):
return {'seq':read.seq[-read_cigar[match_index[0]][1]:],
'qual':read.query_qualities[-read_cigar[match_index[0]][1]:],'mapq':read.mapq}
#soft-clipped in both sides of the read
else:
#make sure that is soft-clipped on both sides
try:
assert read_cigar[0][0] == 4 and read_cigar[-1][0] == 4
# longest soft-clipped are first n nucleotides
if read_cigar[0][1] >= read_cigar[-1][1]:
return {'seq': read.seq[0:read_cigar[0][1]],'qual': read.query_qualities[0:read_cigar[0][1]],
'mapq':read.mapq}
else:
return{'seq':read.seq[-read_cigar[-1][1]:],'qual': read.query_qualities[-read_cigar[-1][1]:],
'mapq': read.mapq}
except AssertionError as e:
print(e)
|
9aa6a10006dd8271232776f0c7de1046a62e4e3a
| 697,146
|
import os
def file_checksum(file_path, hash_callable):
"""Given path of the file and hash function, calculates file digest"""
if os.path.isfile(file_path) and callable(hash_callable):
hash_obj = hash_callable()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_obj.update(chunk)
return hash_obj.hexdigest()
return None
|
dda5abb734aebdb67ac895c60eb7317d75991e66
| 697,147
|
from datetime import datetime
def datetime_to_ms(dt: datetime) -> int:
"""Convert a datetime to milliseconds."""
epoch = datetime.fromtimestamp(0, dt.tzinfo)
return int((dt - epoch).total_seconds() * 1000.0)
|
c14128887dfa7201d68b055c2af5be56f5f8ab37
| 697,148
|
def get_length(key_list):
"""
Get length in the TLV
:param key_list: Key to parse
:return: Length in the TLV
"""
# Find tag value
if 2 == int(key_list[0], 16): # pylint: disable=misplaced-comparison-constant
# check MSB is zero, if yes then this byte indicates length
# else this byte indicates number of bytes for length calculation
if int(key_list[1], 16) & 0x80 == 0x00:
length = int(key_list[1], 16)
length += 2
else:
length_bytes = int(key_list[1], 16) & 0x7f # Mask MSB
length_header = 2 + length_bytes
length = int(key_list[2], 16)
for i in range(1, length_bytes):
length = length * 0x100
length = length | int(key_list[2 + i], 16)
length += length_header
return length
return 0
|
70b2b2069b9cd55f49da5711cfcd816e371affb8
| 697,149
|
from typing import List
from typing import Tuple
from typing import Optional
from typing import Match
import re
def parse(mod_path: str) -> List[Tuple[str, str]]:
"""
Given a module name (file path, relative to ., eg src/acko/utils.rs), this
function returns:
List (arg, type)
"""
with open(mod_path, "r") as f:
args_str_: Optional[Match[str]] = re.compile(
r"(?<=pub fn layout\()[^{]*(?=\) ->)"
).search(f.read().replace("\n", " "))
args_str: str = "" if not args_str_ else args_str_[0].replace(" ", "")
return [
(r.split(":")[0], r.split(":")[1]) for r in args_str.split(",") if r != ""
]
|
0c85bdd780c6abc43cf2c9c1c1b4cb3ad4a8bccc
| 697,150
|
def get_skidl_spice_ref(skidle_element):
"""
Helper function to retrieve SKiDL element name as appears in the final netlist
Args:
skidle_element (skidl.Part.Part): SKiDl part to get the netlist name from
Returns:
returns a string with the netlist name of `skidle_element`, or throws an
error if `skidle_element` is not a SKiDl part
"""
#need to do this for now since in newer version skidl parts class is skidl.Part
# older is skidl.part
if repr(type(skidle_element))=="<class 'skidl.part.Part'>":
assert repr(type(skidle_element))=="<class 'skidl.part.Part'>", '`skidle_element` must be a SKiDl part'
else:
assert repr(type(skidle_element))=="<class 'skidl.Part.Part'>", '`skidle_element` must be a SKiDl part'
if skidle_element.ref_prefix!=skidle_element.ref[0]:
return skidle_element.ref_prefix+skidle_element.ref
else:
return skidle_element.ref
|
4eacd423cbcdbc5171eb1f4b5ef81d6482f34843
| 697,151
|
def admit_util_getplain(formula):
""" Method to make a chemical formula more readable for embedding in filenames
Examples:
CH3COOHv=0 -> CH3COOH
g-CH3CH2OH -> CH3CH2OH
(CH3)2COv=0 -> (CH3)2CO
cis-CH2OHCHOv= -> CH2OHCHO
g'Ga-(CH2OH)2 -> (CH2OH)2
Parameters
----------
formula : str
The chemical formula to process
Returns
-------
String of the more readable formula
"""
pos = formula.find("-")
if pos != -1:
if not(-1 < formula.find("C") < pos or -1 < formula.find("N") < pos \
or -1 < formula.find("O") < pos or -1 < formula.find("H") < pos):
formula = formula[pos + 1:]
pos = formula.find("v")
if pos != -1:
formula = formula[:pos]
pos = formula.find("&Sigma")
if pos != -1:
return formula[:pos]
formula = formula.replace(";","")
return formula.replace("&","-")
|
1f5f704da7fda9f4266a169f01c3790203087809
| 697,152
|
import subprocess
def conn_status():
"""Checks, parses and returns status of VPN connection as True or False"""
result = subprocess.check_output(["expressvpn", "status"])
if b"Connected" in result:
print("ExpressVPN connection was checked and is live.")
if b"A new version" in result:
print("ExpressVPN reports there is a new version available.")
return True
else:
print("ExpressVPN connection was checked and is down.")
return False
|
76e7c96fa825436a643e081ae56db51d593f2fab
| 697,153
|
def weighted_values(values, weights):
"""Returns paired lists of results and their GCM weights.
Args:
values: Dictionary of GCM -> result value
weights: Dictionary of GCM -> GCM weight
"""
models = values.keys()
values_list = [values[model] for model in models if model in weights]
weights_list = [weights[model] for model in models if model in weights]
return (values_list, weights_list)
|
6bd59e99d19d68535148f5bdf34bcec2f0cb4736
| 697,154
|
from typing import Tuple
def set_add(arr: Tuple[str, ...]) -> int:
"""
>>> set_add(('UK', 'China', 'USA', 'France', 'New Zealand', 'UK', 'France'))
5
"""
return len(set(arr))
|
e47705878c34400e3763a887cba4568dd5d621e4
| 697,155
|
def rank_index(square):
"""Gets the rank index of the square where ``0`` is the first rank."""
return square >> 3
|
d4d3f896b5a7cd8b7184320efd69a652c61ccbcf
| 697,156
|
def html_summary_table(data):
"""Generate HTML table of novelty detection statistics.
data: dict
A JSON like structure of the table data that has the following format:
data = {
'modelA': {
'inlier_name': 'MNIST'
'outliers': {
'Fashion MNIST': {
'fpr_at_95_tpr': 0.02123,
'detection_error': 0.02373,
'auroc': 0.96573,
'aupr_in': 0.91231,
'aupr_out': 0.9852
},
'EMNIST Letters': {
'fpr_at_95_tpr': 0.02123,
'detection_error': 0.02373,
'auroc': 0.96573,
'aupr_in': 0.91231,
'aupr_out': 0.9852,
}
}
},
'modelB': {
'inlier_name': 'MNIST'
'outliers': {...}
}
}
"""
table = """
<table>
<tr>
<th>Model</th>
<th>Out-of-distribution dataset</th>
<th>FPR (95% TPR)</th>
<th>Detection Error</th>
<th>AUROC</th>
<th>AUPR In</th>
<th>AUPR Out</th>
</tr>
"""
for i, (model, model_data) in enumerate(data.items()):
table += "<tr>"
table += "<td rowspan={}><b>{}</b> ({})</td>".format(len(model_data['outliers']), model, model_data['inlier_name'])
for j, (outlier_name, scores) in enumerate(model_data['outliers'].items()):
if j != 0:
table += "<tr>"
table += "<td>{}</td>".format(outlier_name)
table += "<td>{:.1f}</td>".format(scores['fpr_at_95_tpr'] * 100)
table += "<td>{:.1f}</td>".format(scores['detection_error'] * 100)
table += "<td>{:.1f}</td>".format(scores['auroc'] * 100)
table += "<td>{:.1f}</td>".format(scores['aupr_in'] * 100)
table += "<td>{:.1f}</td>".format(scores['aupr_out'] * 100)
table += "</tr>"
table += "</table>"
return table
|
53162dfab20859b03408ff67c3a2421b0e84e33e
| 697,157
|
def hello ( name ):
""" Say hello ."""
return " hello " + name
|
3fd85b4c2f719e6c2abb7cd21bd7df2fe38c0272
| 697,158
|
def getSize(l):
"""
Returns size of list
Example
>>> getSize([0,4,32,21])
4
"""
c=0
for i in l:
c=c+1
return c
|
ab59a55778ed4b8034debea20d33c1cb609a4243
| 697,159
|
import os
def is_executable(filename, mode=os.F_OK | os.X_OK):
"""
Check whether the given file is executable.
:param filename: A relative or absolute pathname (a string).
:returns: :data:`True` if the file is executable,
:data:`False` otherwise.
"""
return os.path.exists(filename) and os.access(filename, mode) and not os.path.isdir(filename)
|
420de83f5d5126d4538f72524ab7a60848f35790
| 697,160
|
def _miriam_identifiers(type_, namespace, identifier):
"""
Fix the MetaNetX identifiers into miriam equivalents.
MetaNetX doesn't use correct miriam identifiers. This function maps the
known namespace and entity identifiers used by MetaNetX to valid miriam
identifiers.
Parameters
----------
type_ : string
"compartment", "reaction" or "metabolite"
namespace : string
The MetaNetX namespace identifier
identifier : string
The object identifier
Returns
-------
namespace : string
The corrected namespace
identifier : string
The corrected identifier
"""
if type_ == "compartment":
ns_map = {
"bigg": "bigg.compartment",
"cco": "cco",
"go": "go",
"name": "name", # unconfirmed
"seed": "seed",
}
return (ns_map[namespace], identifier)
elif type_ == "reaction":
ns_map = {
"bigg": "bigg.reaction",
"deprecated": "metanetx.reaction",
"kegg": "kegg.reaction",
"metacyc": "metacyc.reaction",
"reactome": "reactome",
"rhea": "rhea",
"sabiork": "sabiork.reaction",
"seed": "seed.reaction",
}
return (ns_map[namespace], identifier)
elif type_ == "metabolite":
if namespace == "kegg":
kegg_map = {
"C": "kegg.compound",
"D": "kegg.drug",
"E": "kegg.environ",
"G": "kegg.glycan",
}
return (kegg_map[identifier[0]], identifier)
elif namespace == "slm":
return ("swisslipid", f"SLM:{identifier}")
elif namespace == "chebi":
return (namespace, f"CHEBI:{identifier}")
else:
ns_map = {
"bigg": "bigg.metabolite",
"deprecated": "metanetx.chemical",
"envipath": "envipath", # unconfirmed
"hmdb": "hmdb",
"lipidmaps": "lipidmaps",
"metacyc": "metacyc.compound",
"reactome": "reactome",
"sabiork": "sabiork.compound",
"seed": "seed.compound",
}
return (ns_map[namespace], identifier)
|
19218dbe7e086dabf6527b26e770986462428ecb
| 697,161
|
import re
import sys
import os
def parse_otool_output(output):
"""Search otool output for MKL dependencies.
Return (mkl_dirs, mkl_libs)."""
# like "@rpath/libmkl_intel.dylib (compatibility version 0.0.0, current version 0.0.0)"
re1 = re.compile(r"\s*@rpath/(.+) \(.+\)")
# like "/usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 111.0.0)"
re2 = re.compile(r"\s*(.+) \(.+\)")
re_fname = re.compile(r"lib(mkl.*|iomp.*)\.(so|dylib)(\.[^ ]*)?")
# we assume for now that @rpath == <sys.prefix>/lib
if hasattr(sys,'base_prefix'):
prefix_dir = sys.base_prefix
else:
prefix_dir = sys.prefix
sys_lib_dir = os.path.join(prefix_dir, "lib")
mkl_dirs = []
mkl_libs = []
for l in output.splitlines():
m = re1.match(l)
if m:
fname = m.group(1)
m_fname = re_fname.match(fname)
if m_fname:
# we assume that @rpath is equal to sys.prefix
mkl_libs.append(os.path.join(sys_lib_dir,m.group(1)))
mkl_dirs.append(sys_lib_dir)
continue
m = re2.match(l)
if m:
path = m.group(1)
fname = os.path.basename(path)
m_fname = re_fname.match(fname)
if m_fname:
mkl_libs.append(path)
mkl_dirs.append(os.path.dirname(path))
return mkl_dirs,mkl_libs
|
112a7aa1f5f222165a899e96061e013bd4cab6f7
| 697,163
|
from typing import List
def add_to_rightmost_int(stack: List, x: int) -> List:
"""
Add x to rightmost int in l
if no int in l, do nothing
return modified l
"""
int_locations = [isinstance(i, int) for i in stack]
if not any(int_locations):
return stack
int_locations.reverse()
last_index = len(int_locations) - 1 - int_locations.index(True)
stack[last_index] += x
return stack
|
84240d412539cc221edd24462f97bb84a1eca051
| 697,164
|
def mapToRange(val, src, dst):
"""
Map the given value from the range of src to the range of dst.
"""
return ((val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]
|
769dd6f6c52b0c8cdc2b0358457820c0ed15a1f8
| 697,165
|
def is_true(boolstring: str):
""" Converts an environment variables to a Python boolean. """
if boolstring.lower() in ('true', '1'):
return True
return False
|
466c839ff5ea25e970c9ce1a86fbcbc7d5fec3a1
| 697,166
|
def get_pr_filenames(fj):
"""
Parse filenames from file json
:param fj: pull requests files JSON
:type fj: JSON
:returns: list of filenames from one pull request
:rtype: list
"""
fns = []
for i in range(len(fj)):
fns.append(fj[i]['filename'])
return fns
|
23adfe04b83d1829851cacb63e841cba18022a75
| 697,167
|
def get_sm_from_descriptor(descr):
"""
This method returns a list of specific managers based on
a received desriptor
"""
sm_dict = {}
if "service_specific_managers" in descr:
sm_dict = {}
for ssm in descr["service_specific_managers"]:
for option in ssm["options"]:
if option["key"] == "type":
sm_dict[option["value"]] = {}
sm_dict[option["value"]]["id"] = ssm["id"]
sm_dict[option["value"]]["image"] = ssm["image"]
if "function_specific_managers" in descr:
sm_dict = {}
for fsm in descr["function_specific_managers"]:
for option in fsm["options"]:
if option["key"] == "type":
sm_dict[option["value"]] = {}
sm_dict[option["value"]]["id"] = fsm["id"]
sm_dict[option["value"]]["image"] = fsm["image"]
return sm_dict
|
53d06e9f399689a715e812b849337e04555f95cf
| 697,168
|
import optparse
def parse_shards_into(option, opt, value, parser):
"""Parse lists of shard or shard ranges into a set().
Examples:
0-2
0,1-3,5
1,3,5
"""
def shard_range_parser(shards):
result = set()
for part in shards.split(','):
x = part.split('-')
result.update(range(int(x[0]), int(x[-1]) + 1))
return sorted(result)
try:
setattr(parser.values, option.dest, shard_range_parser(value))
except ValueError as e:
raise optparse.OptionValueError('Failed to parse: %s' % e)
|
1262db211fe698f3e38b5b61e8dce1bddd2dcb21
| 697,169
|
def add(x, y):
"""Compute the sum of x and y."""
return x+y-y+y-y+y-y+y-y+y-y+y-y+y-y+y-y+y-y+y-y+y-y+y-y+y-y+y-y+y-y+y-y+y
|
a107334c4b1d7da50302555f18c3a4488a2423c5
| 697,170
|
def edits2str(edits):
"""
get a printable representation for a list of edits
"""
output_str = [edit.__str__() for edit in edits]
return output_str
|
599afcd8636e0939b72ebc88ebb88743fd4fe6a6
| 697,171
|
def summerB(n: int) -> int:
"""
Iterates over the multiples only. Uses a set to avoid repeats.
"""
total = 0
fizzvisits = set()
for i in range(0, n, 3):
total += i
fizzvisits.add(i)
for j in range(0, n, 5):
if j not in fizzvisits:
total += j
return total
|
7b968a859fa42a0a72cc1a7352506f60c7e0f97a
| 697,172
|
import re
def _get_enclosed_str(text, prefix, suffix):
"""
Remove prefix and suffix from the string if the string contains both prefix and suffix.
"""
success = False
text = text.strip()
pattern = f"^{prefix}.+{suffix}$"
if re.search(pattern, text):
pattern1 = f"^{prefix}"
pattern2 = f"{suffix}$"
text = re.sub(pattern1, "", text)
text = re.sub(pattern2, "", text)
success = True
return text, success
|
4a4305d18308491f477ae929c4009af39f345472
| 697,173
|
import os
def diff_path(top, subdir):
"""difference between two absolute paths"""
top = os.path.normpath(top).replace('\\', '/').split('/')
subdir = os.path.normpath(subdir).replace('\\', '/').split('/')
if len(top) == len(subdir): return ''
diff = subdir[len(top) - len(subdir):]
return os.path.join(*diff)
|
ed5073d6759c04a46dd41e3bfb634b03a29755b1
| 697,174
|
import re
import textwrap
def process_description(description_fce):
"""Removes special character delimiters, titles
and wraps paragraphs.
"""
def inner(description):
clear_description = \
re.sub(r'\s+', ' ', # multiple whitespaces
# general URLs
re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '',
# delimiters
re.sub('(#|=|---|~|`)*', '',
# very short lines, typically titles
re.sub('((\r?\n)|^).{0,8}((\r?\n)|$)', '',
# PyPI's version and downloads tags
re.sub(
'((\r*.. image::|:target:) https?|(:align:|:alt:))[^\n]*\n', '',
description_fce(description))))))
return ' '.join(textwrap.wrap(clear_description, 80))
return inner
|
1a8f39704a025669c1b70ae279f7d9db8de84855
| 697,175
|
def demo_app_name(name):
""" Returns a capitalized title for the app, with "Dash"
in front."""
return 'Dash ' + name.replace('app_', '').replace('_', ' ').title()
|
58e39a80c1940f784daa6e0f6ee35df2757c5ca6
| 697,178
|
import logging
def check_input(channel_name, vods_clips, start, end, tracker, download, rename, try_muted, workers, test):
"""checks if input is valid returns 0 or 1"""
logger = logging.getLogger(__name__)
if (len(channel_name) < 4 or
(not (vods_clips == "clips" or vods_clips == "vods" or vods_clips == "both")) or
(not (start == "" or (len(start) == 10 and len(start.split("-")[0]) == 4))) or
(not (end == "" or (len(end) == 10 and len(end.split("-")[0]) == 4))) or
(not (rename == "no" or rename == "yes")) or
(not (try_muted == "no" or try_muted == "yes")) or
(not (tracker == "TT" or tracker == "SC")) or
(not (download == "no" or download == "yes")) or
(not (test == "no" or test == "yes")) or
(not isinstance(workers, int))):
logger.critical("invalid input, please try again")
return 0
return 1
|
b833a8a3c67b6c1320323f083a963010c3d1b476
| 697,180
|
def parse_italic(soup):
""" Replace i tags with text:span with automatic style """
italic_tags = soup.find_all("em")
for i_tag in italic_tags:
i_tag.name = 'text:span'
i_tag.attrs['text:style-name'] = "ITALIC"
return soup
|
c2c58afa0a68328088900e5324f4908d74ddbb94
| 697,181
|
import argparse
def parse_args():
"""Parse arguments for module.
Returns:
argparse.Namespace: contains accessible arguments passed in to module
"""
parser = argparse.ArgumentParser()
parser.add_argument("--task",
help=("task for neural network to train on; "
"either prediction or conversion"))
parser.add_argument("--data-path",
help=("path to h5 files containing data "
"(must contain training.h5 and validation.h5)"))
parser.add_argument("--representation",
help=("will normalize if quaternions, will use expmap "
"to quat validation loss if expmap"),
default="quaternion")
parser.add_argument("--full-transformer",
help=("will use Transformer with both encoder and "
"decoder if true, will only use encoder "
"if false"),
default=False,
action="store_true")
parser.add_argument("--model-file-path",
help="path to model file for saving it after training")
parser.add_argument("--batch-size",
help="batch size for training", default=32)
parser.add_argument("--learning-rate",
help="initial learning rate for training",
default=0.001)
parser.add_argument("--beta-one",
help="beta1 for adam optimizer (momentum)",
default=0.9)
parser.add_argument("--beta-two",
help="beta2 for adam optimizer", default=0.999)
parser.add_argument("--seq-length",
help=("sequence length for model, will be divided "
"by downsample if downsample is provided"),
default=20)
parser.add_argument("--downsample",
help=("reduce sampling frequency of recorded data; "
"default sampling frequency is 240 Hz"),
default=1)
parser.add_argument("--in-out-ratio",
help=("ratio of input/output; "
"seq_length / downsample = input length = 10, "
"output length = input length / in_out_ratio"),
default=1)
parser.add_argument("--stride",
help=("stride used when reading data in "
"for running prediction tasks"),
default=3)
parser.add_argument("--num-epochs",
help="number of epochs for training", default=1)
parser.add_argument("--num-heads",
help="number of heads in Transformer")
parser.add_argument("--dim-feedforward",
help=("number of dimensions in feedforward layer "
"in Transformer"))
parser.add_argument("--dropout",
help="dropout percentage in Transformer")
parser.add_argument("--num-layers",
help="number of layers in Transformer")
args = parser.parse_args()
if args.data_path is None:
parser.print_help()
return args
|
9fc925aa04a6ea376ad6993419629cc3f2a5f1ac
| 697,182
|
def csvheader(parent, nodelist):
"""
Gives the header for the CSV
"""
header = ""
for subnode in nodelist:
if (subnode.nodeType == subnode.ELEMENT_NODE):
header = header + "," + parent + "." + subnode.tagName
return header[1:] + "\n"
|
9a488d29c52e421f5a7e2e2f9929274422f41ffc
| 697,183
|
def calc_time_cost_function_total(natom, nkpt, kmax, niter, nspins=1):
"""Estimates the cost of simulating a all iteration of a system"""
costs = natom**3 * kmax**3 * nkpt * nspins * niter
return costs
|
bba431933e38c035f3e5285536beecbb677c7175
| 697,184
|
def to_dict(condset: set) -> dict:
"""
Create a dictionary of conditions with a unique integer value for each
condition.
:param condset: Conditions set.
:return: Dictionary of all conditions with integer values.
"""
conds = {}
index = 0
for item in condset:
conds[str(item)] = index
index += 1
return conds
|
3fd9953139ac3785aeb2ba527bc0365e3908f376
| 697,185
|
import struct
async def probe(stream): # type: ignore
"""Probe for img dimensions."""
w, h = None, None
chunk = await stream.readexactly(26)
if chunk.startswith(b"\x89PNG\r\n\x1a\n"):
if chunk[12:16] == b"IHDR":
w, h = struct.unpack(">LL", chunk[16:24])
elif chunk[12:16] == b"CgBI":
# fried png http://www.jongware.com/pngdefry.html
chunk += await stream.readexactly(40 - len(chunk))
w, h = struct.unpack(">LL", chunk[32:40])
else:
w, h = struct.unpack(">LL", chunk[8:16])
return {"type": "png", "width": w, "height": h}
if chunk.startswith(b"GIF89a") or chunk.startswith(b"GIF87a"):
w, h = struct.unpack("<HH", chunk[6:10])
return {"type": "gif", "width": w, "height": h}
if chunk.startswith(b"\xff\xd8"):
start = 2
data = chunk
while True:
if data[start : start + 1] != b"\xff":
return
if data[start + 1] in b"\xc0\xc2":
h, w = struct.unpack(">HH", data[start + 5 : start + 9])
return {"type": "jpg", "width": w, "height": h}
(segment_size,) = struct.unpack(">H", data[start + 2 : start + 4])
data += await stream.readexactly(segment_size + 9)
start = start + segment_size + 2
if chunk.startswith(b"\x00\x00\x01\x00") or chunk.startswith(b"\x00\x00\x02\x00"):
img_type = "ico" if chunk[2:3] == b"\x01" else "cur"
num_images = struct.unpack("<H", chunk[4:6])[0]
w, h = struct.unpack("BB", chunk[6:8])
w = 256 if w == 0 else w
h = 256 if h == 0 else h
return {"type": img_type, "width": w, "height": h, "num_images": num_images}
if chunk.startswith(b"BM"):
headersize = struct.unpack("<I", chunk[14:18])[0]
if headersize == 12:
w, h = struct.unpack("<HH", chunk[18:22])
elif headersize >= 40:
w, h = struct.unpack("<ii", chunk[18:26])
else:
return
return {"type": "bmp", "width": w, "height": h}
if chunk.startswith(b"MM\x00\x2a") or chunk.startswith(b"II\x2a\x00"):
w, h, orientation = None, None, None
endian = ">" if chunk[0:2] == b"MM" else "<"
offset = struct.unpack(endian + "I", chunk[4:8])[0]
chunk += await stream.readexactly(offset - len(chunk) + 2)
tag_count = struct.unpack(endian + "H", chunk[offset : offset + 2])[0]
offset += 2
for _ in range(tag_count):
if len(chunk) - offset < 12:
chunk += stream.readexactly(12)
type_ = struct.unpack(endian + "H", chunk[offset : offset + 2])[0]
data = struct.unpack(endian + "H", chunk[offset + 8 : offset + 10])[0]
offset += 12
if type_ == 0x100:
w = data
elif type_ == 0x101:
h = data
elif type_ == 0x112:
orientation = data
if all([w, h, orientation]):
break
if orientation >= 5: # type: ignore
w, h = h, w
return {"type": "tiff", "width": w, "height": h, "orientation": orientation}
if chunk[:4] == b"RIFF" and chunk[8:15] == b"WEBPVP8":
w, h = None, None
type_ = chunk[15:16]
chunk += await stream.readexactly(30 - len(chunk))
if type_ == b" ":
w, h = struct.unpack("<HH", chunk[26:30])
w, h = w & 0x3FFF, h & 0x3FFF
elif type_ == b"L":
w = 1 + (((ord(chunk[22:23]) & 0x3F) << 8) | ord(chunk[21:22]))
h = 1 + (
((ord(chunk[24:25]) & 0xF) << 10)
| (ord(chunk[23:24]) << 2)
| ((ord(chunk[22:23]) & 0xC0) >> 6)
)
elif type_ == b"X":
w = 1 + struct.unpack("<I", chunk[24:27] + b"\x00")[0]
h = 1 + struct.unpack("<I", chunk[27:30] + b"\x00")[0]
return {"type": "webp", "width": w, "height": h}
if chunk.startswith(b"8BPS"):
h, w = struct.unpack(">LL", chunk[14:22])
return {"type": "psd", "width": w, "height": h}
|
f42dfbbbe766b1a655f1f6b299172d6568c66498
| 697,187
|
import os
import glob
def get_files(filepath: str) -> list:
"""
Load all *.json files found in filepath and its subdirectories.
"""
all_files = []
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root,'*.json'))
for f in files :
all_files.append(os.path.abspath(f))
return all_files
|
197aa244ec55a343b28a7d9ae2b88fb5eed47bba
| 697,188
|
def GL2PL(gl):
""" Converts Genotype likelyhoods to phred scaled (PL) genotype likelyhoods. """
return -int(gl * 10)
|
d842a4a25ee6b095cfb456ab21965df4ccc24c1d
| 697,189
|
import socket
def port_to_int(port):
"""Convert a port string to an integer."""
try:
return int(port)
except ValueError:
return socket.getservbyname(port)
|
9f4ca2d80fdb70e16aaa0f0ccfa027fed772a986
| 697,191
|
def expand_markings(granular_markings):
"""Expand granular markings list.
If there is more than one selector per granular marking. It will be
expanded using the same marking_ref.
Example:
>>> expand_markings([
... {
... "selectors": [
... "description",
... "name"
... ],
... "marking_ref": "marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9"
... }
... ])
[
{
"selectors": [
"description"
],
"marking_ref": "marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9"
},
{
"selectors": [
"name"
],
"marking_ref": "marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9"
}
]
Args:
granular_markings: The granular markings list property present in a
SDO or SRO.
Returns:
list: A list with all markings expanded.
"""
expanded = []
for marking in granular_markings:
selectors = marking.get('selectors')
marking_ref = marking.get('marking_ref')
lang = marking.get('lang')
if marking_ref:
expanded.extend(
[
{'marking_ref': marking_ref, 'selectors': [selector]}
for selector in selectors
],
)
if lang:
expanded.extend(
[
{'lang': lang, 'selectors': [selector]}
for selector in selectors
],
)
return expanded
|
1d97c93b7953293cd71a86d06e988373f0a9150d
| 697,192
|
def first_nonequal_idx(left: str, right: str) -> int:
"""
Find first string index where left and right strings do not match
In [1]: first_nonequal_idx("", "californian")
Out[1]: 0
In [2]: first_nonequal_idx("aba", "abc")
Out[2]: 2
Note, if the strings match, the first-non-equal index will be equal to the length of the string:
In [3]: first_nonequal_idx("aba", "aba")
Out[3]: 3
"""
idx = 0
max_search_len = min(len(left), len(right))
while idx < max_search_len and left[idx] == right[idx]:
idx += 1
return idx
|
7bc0397dd290c6005adc96b8b45f14e04385a257
| 697,194
|
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
mem = set()
while n != 1:
n = sum([int(i) ** 2 for i in str(n)])
if n in mem:
return False
else:
mem.add(n)
else:
return True
|
cacc997c4e3241abc7e288bd1eb95890dfca6b99
| 697,195
|
def parse_coref_idx(string):
"""
return: ([starting], [ending], [singleton])
"""
starting = []
ending = []
singleton = []
idxes = string.strip().split('|')
for idx in idxes:
if idx.startswith('(') and idx.endswith(')'):
singleton.append(int(idx[1:-1]))
elif idx.startswith('('):
starting.append(int(idx[1:]))
elif idx.endswith(')'):
ending.append(int(idx[:-1]))
return starting, ending, singleton
|
f651a880ad28586569c78ad0f2aa702b91a0ffe4
| 697,196
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.