content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def remove_object(objs, label, none_val=0):
"""Remove object specified by id value"""
objs[objs==label]=none_val
return True | c62df57aebc323f85f318db982cc4de795c30e9a | 46,822 |
def fmt_date(convert_date):
"""
将时间格式如20160101转换为2016-01-01日期格式, 注意没有对如 201611
这样的做fix适配,外部需要明确知道参数的格式,针对特定格式,不使用时间api,
直接进行字符串解析,执行效率高
:param convert_date: 时间格式如20160101所示,int类型或者str类型对象
:return: %Y-%m-%d日期格式str类型对象
"""
if isinstance(convert_date, float):
# float先转换int
convert_date = int(convert_date)
convert_date = str(convert_date)
if len(convert_date) > 8 and convert_date.startswith('20'):
# eg '20160310000000000'
convert_date = convert_date[:8]
if '-' not in convert_date:
if len(convert_date) == 8:
# 20160101 to 2016-01-01
convert_date = "%s-%s-%s" % (convert_date[0:4],
convert_date[4:6], convert_date[6:8])
elif len(convert_date) == 6:
# 201611 to 2016-01-01
convert_date = "%s-0%s-0%s" % (convert_date[0:4],
convert_date[4:5], convert_date[5:6])
else:
raise ValueError('fmt_date: convert_date fmt error {}'.format(convert_date))
return convert_date | d03be09bdb9b737f9be6bcc4b1d48c35bca44dc6 | 46,823 |
def distanceSquared(p1, p2):
"""
Pythagorean distance formula WITHOUT the square root. Since
we just want to know if the distance is less than some fixed
fudge factor, we can just square the fudge factor once and run
with it rather than compute square roots over and over.
"""
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
return dx * dx + dy * dy | 7ab6d39e2e5aca9544364ac42433e9aa9cb41633 | 46,824 |
def positions_at_t(points, t):
"""
Given a list of Points and a time t, find their positions at time t
:param points: the list of Points
:param t: the time t
:return: a list of pairs indicating the position of the points at time t
"""
return [p.at_time(t) for p in points] | 750379504392e10994614ec0952d91782c073a58 | 46,826 |
from collections import namedtuple
from typing import List
def collections_namedtuple(n: int, spreadsheet: List[str],
rows: List[List[str]]) -> float:
"""
>>> collections_namedtuple(5, ['ID', 'MARKS', 'NAME', 'CLASS'],
... [['1', '97', 'Raymond', '7'],
... ['2', '50', 'Steven', '4'],
... ['3', '91', 'Adrian', '9'],
... ['4', '72', 'Stewart', '5'],
... ['5', '80', 'Peter', '6']])
78.0
"""
Student = namedtuple('Student', spreadsheet) # type: ignore
total_marks = 0
for row in rows:
temp_student = Student(*row) # type: ignore
total_marks += int(temp_student.MARKS) # type: ignore
return total_marks / n | 84a7973a0c645aab3f1ae907fd955b4bd5e2e04f | 46,828 |
def calculateLevelAndFallFreq(score):
"""based on the score, return the level the player is on
and how many seconds pass until a falling piece falls one step
"""
level = int(score / 10) + 1
fallFreq = 0.27 - (level * 0.02)
return level, fallFreq | 8721d46508cbdebf77c2d59fbf544df043628223 | 46,831 |
def lookup_table(value, df):
"""
:param value: value to find the dataframe
:param df: dataframe which constains the lookup table
:return:
A String representing a the data found
"""
# Variable Initialization for non found entry in list
out = None
for index, row in df.iterrows():
if value in row.tolist():
out = row['Project ID']
break
return out | 45cbb5ade2b9a47362927badb9477d00f0fc477d | 46,832 |
import unicodedata
import re
def reduce_display_name(name):
"""Return a reduced version of a display name for comparison with a
username.
"""
# Strip out diacritics
name = ''.join(char for char in unicodedata.normalize('NFD', name)
if not unicodedata.combining(char))
name = re.sub(r'\s+', '_', name)
name = name.lower()
return name | 82d4163ff2a3c73eece890d5c94aee1af6ff0a92 | 46,833 |
def is_in(val, lvals):
""" Replace the standard 'in' operator but uses 'is' to check membership.
This method is mandatory if element to check overloads operator '=='.
Args:
val: Value to check
lvals: List of candidate values
Returns:
True if value is in the list of values (presence checked with 'is')
"""
return any(val is v for v in lvals) | e667c756e16f35d8dbba8311fa98f193f02a05b9 | 46,837 |
from typing import Tuple
def layer_column_names_additional_column(
layer_column_names: Tuple[str, ...]
) -> Tuple[str, ...]:
"""Shared column names but with extra column name."""
return layer_column_names + ("additional",) | 3c793b167b67bd5a68fc72ee432c25e55e693306 | 46,838 |
def base_digits_decoder(alist: list, b: int) -> int:
"""
The inverse function of 'to_base'. This
function will take a list of integers, where each
element is a digit in a number encoded in base 'b',
and return an integer in base 10
"""
p = 0
ret = 0
for n in alist[::-1]:
ret += n * b ** p
p += 1
return ret | ae136b738716245b93f40668e4045df2ffd38a01 | 46,839 |
def fix_rectangle_overlap(rect1, rect2):
"""Calculate the minimum vector required to move rect1 to fix the overlap between
rect1 and rect2.
Arguments:
rect1 -- The first rectangle. Has form (x0, y0, x1, y1).
rect2 -- The second rectangle. Has the same form as rect1.
"""
ax0, ay0, ax1, ay1 = rect1
bx0, by0, bx1, by1 = rect2
left, right = max(0, ax1 - bx0), min(0, ax0 - bx1)
down, up = max(0, ay1 - by0), min(0, ay0 - by1)
move_x = min(left, right, key=abs)
move_y = min(down, up, key=abs)
if abs(move_x) < abs(move_y):
return (-move_x, 0)
else:
return (0, -move_y) | c538d7f6e1e51c84694c3fc7f4c09baa8527d77e | 46,842 |
import torch
def pytorch_preprocess(batch):
"""
The scaling procedure for all the pretrained models from torchvision is described in the docs
https://pytorch.org/docs/stable/torchvision/models.html
"""
batch = (batch + 1) * 0.5 # [-1, 1] -> [0, 1]
batch_color_transformed = []
batch = torch.stack(batch_color_transformed, 0)
batch = torch.clamp(batch, 0, 1)
mean = torch.tensor([.485, .456, .406], dtype=batch.dtype, device=batch.device)[None, :, None, None]
batch = batch.sub(mean) # subtract mean
std = torch.tensor([.229, .224, .225], dtype=batch.dtype, device=batch.device)[None, :, None, None]
batch = batch.div(std)
return batch | 008573834a0348cae5229c9d42d7293eb58242ca | 46,843 |
def bed_map_region_id_to_seq_id(in_bed_file):
"""
Read in .bed file, and store for each region ID (column 4) the sequence
ID (column 1)
Return dictionary with mappings region ID -> sequence ID
>>> test_bed = "test_data/test3.bed"
>>> bed_map_region_id_to_seq_id(test_bed)
{'CLIP2': 'chr1', 'CLIP1': 'chr2', 'CLIP3': 'chr1'}
"""
regid2seqid_dic = {}
# Open input .bed file.
with open(in_bed_file) as f:
for line in f:
cols = line.strip().split("\t")
seq_id = cols[0]
site_id = cols[3]
regid2seqid_dic[site_id] = seq_id
f.closed
return regid2seqid_dic | cc0adee384b954b1a33e61ed76f560026b0303e2 | 46,844 |
import os
def count_files(dir_):
""" Counter the file numbers of given directory. If the directory does not exist, return None. """
if not os.path.isdir(dir_):
return None
return len(os.listdir(dir_)) | 74f4cd7abd126ddbbf8b995089630ce28d476461 | 46,846 |
def get_used_materials(objects):
""" Collect Materials used in the selected object.
"""
m_list = []
for obj in objects:
if obj.type == 'MESH':
for f in obj.data.polygons:
if f.material_index < len(obj.data.materials):
if not obj.data.materials[f.material_index]:
continue
m_list.append(obj.data.materials[f.material_index].yabee_name)
return set(m_list) | 7689e4db3b5c770ee70fb56aa824cb22121bb66a | 46,847 |
import time
def _get_time_str():
"""Get a str for the current time"""
return time.strftime('%Y-%m-%dT%H:%M:%S.000Z') | b483b5e5bd54fb752b1fe88d35f30a17160ea2e2 | 46,848 |
import os
def _get_hash_from_path(p):
"""Extract hash from path."""
basename = os.path.basename(p)
return basename[0: basename.find(".story")] | 92ac8fa3083cdb0ee3cb16463a785325b0cb46a8 | 46,849 |
def base_id(nid):
"""
BASEノードに格納するID値をエンコードするためのメソッド
BASEノードに格納されているID値をデコードするためにも用いられる
"""
return (-nid) - 1 | 2c87b999bd38e8d8fccc0a454b7ea19283645fa3 | 46,850 |
import os
def file_exists(path: str) -> bool:
"""
Check whether file exists or not.
Args:
path: File path.
Returns:
True if file exists, False otherwise.
"""
return os.path.isfile(path) | 226d2dd48041b8a3422c7becad375b689e5014bf | 46,854 |
import sys
def check_candidate_option_type(option, value):
"""
verifies if a given option value has the correct type corresponding allowed for this option
:param option: the treated option
:param value: the value assigned to the option
:return: True if the value has an appropriate type, False or exist otherwise
"""
options_types = {'name': 'string',
'enable': 'string',
'candidate-type': 'string',
'investment-type': 'string',
'link': 'string',
'annual-cost-per-mw': 'non-negative',
'unit-size': 'non-negative',
'max-units': 'non-negative',
'max-investment': 'non-negative',
'relaxed': 'string',
'link-profile': 'string',
'already-installed-capacity': 'non-negative',
'already-installed-link-profile': 'string',
'has-link-profile': 'string'}
obsolete_options = ["has-link-profile"]
option_type = options_types.get(option)
if option_type is None:
print('check_candidate_option_type: %s option not recognized in candidates file.' % option)
sys.exit(1)
else:
if obsolete_options.count(option):
print('%s option is no longer used by antares-xpansion' % option)
return True
if option_type == 'string':
return True
elif option_type == 'numeric':
return value.isnumeric()
elif option_type == 'non-negative':
try:
return float(value) >= 0
except ValueError:
return False
print('check_candidate_option_type: Non handled data type %s for option %s'
% (option_type, option))
sys.exit(1) | 9496fb7e928d752928d10aaa2727cf080ab5c558 | 46,856 |
def normalise_text_score(query: str, score: float) -> float:
"""Approximate a mongo text score to the range [0, 1].
Args:
query: Query which was used
score: Text score
Returns:
An approximation of the normalised text score which is guaranteed
to be in the closed interval [0, 1].
"""
words = len(query.split())
expected_max_score = (words + 1) * .5
return min(score / expected_max_score, 1) | 2aa8378c2a1e8ef1e99c87f6ee3a03e3d4b1967d | 46,857 |
def get_photo_info(driver):
"""Get Size, name and date of photo"""
try:
metadata = driver.find_elements_by_class_name("rCexAf")
metadata = [m.text for m in metadata if m.text != '']
date = metadata[0].replace(" ", "_").replace("\n", "_")
photo_name = metadata[1].split('\n')[0]
size = metadata[1].split('\n')[-1].replace(' ','')
print(f"date: {date}, name: {photo_name}, size: {size}")
return date, photo_name, size
except Exception as e :
print(f"Error: {e}") | 7fdab9acab1cfa548d2c79ae48cacc11a5ddfc4a | 46,858 |
def is_abundant(number):
"""Determine if sum of divisors of number is greater than number."""
factors = [x for x in range(1, (number // 2) + 1) if number % x == 0]
if sum(factors) > number:
return True
return False | d895d562c359cd36e9ee3f7f1785c716475ae736 | 46,859 |
def summarize_repos(events):
"""Generate list of all repos in the iterable of events."""
repos = set(event.repo for event in events)
tmpl = '[{0}/{1}](https://github.com/{0}/{1})'
return [tmpl.format(*repo) for repo in sorted(repos)] | 524000f40ae6f637fcbb809e110a1b36dee9a103 | 46,860 |
import os
import json
def read_config(path):
"""Reads config file from disk."""
file = os.path.join(path, 'settings.json')
assert os.path.exists(file), "settings not found: '%s'" % file
with open(file, 'r') as file:
settings = json.load(file)
return settings | 5d345f48f52dddb176e63683d9d2f890e12877f1 | 46,861 |
def parse_noun_line(line):
"""
For a Leo dictionary noun entry line:
(aktive) Langzeitverbindung {f}
returns a list of the form:
['Langzeitverbindung', 'f']
"""
gender_start = line.find('{') + 1
gender_end = line.find('}')
word_end = line.find('{') - 1
paren_end = line.find(')')
word_start = paren_end + 2 if paren_end > -1 else 0
word = line[word_start:word_end]
gender = line[gender_start:gender_end]
return [word, gender] | f993cd786229a6d02a61ede833bd280300f7550a | 46,862 |
def getUser(line):
"""obtains the username from the given line"""
separate = line.split(":")
user = separate[1].split("!")[0]
return user | b5203633833c83ea914f5ed29c4aab6ed8d304ca | 46,863 |
def filter_seq2seq_output(string_pred, eos_id=-1):
"""Filter the output until the first eos occurs (exclusive).
Arguments
---------
string_pred : list
A list containing the output strings/ints predicted by the seq2seq system.
eos_id : int, string
The id of the eos.
Returns
------
list
The output predicted by seq2seq model.
Example
-------
>>> string_pred = ['a','b','c','d','eos','e']
>>> string_out = filter_seq2seq_output(string_pred, eos_id='eos')
>>> string_out
['a', 'b', 'c', 'd']
"""
if isinstance(string_pred, list):
try:
eos_index = next(
i for i, v in enumerate(string_pred) if v == eos_id
)
except StopIteration:
eos_index = len(string_pred)
string_out = string_pred[:eos_index]
else:
raise ValueError("The input must be a list.")
return string_out | fbf3a8900c83dfdd7d8309e1daa262e2d193d3b0 | 46,864 |
import pathlib
import sys
import os
def get_src_path(user_flags, notebook):
"""Get path of source notebook based on user flags or the destination file.
Args:
user_flags: Command-line arguments
notebook: Destination notebook used to select source notebook.
Returns:
A Path of the source-of-truth notebook.
"""
if user_flags.site_root:
site_root = pathlib.Path(user_flags.site_root)
else:
site_root = pathlib.Path(__file__).parent.parent.joinpath("site")
if not site_root.is_dir():
print(f"Error: --site_root must be a directory: {site_root}",
file=sys.stderr)
sys.exit(1)
if not user_flags.src:
# Determine path from previous notebook and source language
fp_relpath = notebook.path.relative_to(site_root) # relative path
fp_relpath = pathlib.Path(*fp_relpath.parts[1:])
return site_root.joinpath(user_flags.lang, fp_relpath)
elif os.path.isdir(user_flags.src):
return pathlib.Path(user_flags.src) / notebook.path.name
elif os.path.isfile(user_flags.src):
return pathlib.Path(user_flags.src)
else:
print(f"Error: File not found: {user_flags.src}", file=sys.stderr)
sys.exit(1) | d955b71f4844f3e97e99c6446bba610b8d5ed0ad | 46,866 |
import random
def _random_subset(seq, m):
"""Taken from NetworkX
Given a sequence seq, return a ransom subset of size m.
Parameters
----------
seq : list
The population
m : int
The sample size
Returns
-------
set
The sample from seq of size m
"""
# Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
targets = set()
while len(targets) < m:
x = random.choice(seq)
targets.add(x)
return targets | a5c29c4503bab87e193c23f3942642728232b6df | 46,867 |
def mean_of_targets(dataset):
"""
Returns the mean of the targets in a dataset.
Parameters
----------
dataset : DenseDesignMatrix
Returns
-------
mn : ndarray
A 1-D vector with entry i giving the mean of target i
"""
return dataset.y.mean(axis=0) | 20f2a8dc05b2747440b388268d05493930599459 | 46,868 |
import sqlite3
def create_connection(db_file):
""" create a database connection to a SQLite database """
try:
connection = sqlite3.connect(db_file)
print("Opened database successfully", type(connection))
except Exception as e:
raise e
return connection | c414a49ee3e559869d995ac666a8fc409d5f23be | 46,869 |
def populate(d, allow_overwrite=True):
"""
Create a decorator that populates a given dict-like object by name.
Arguments
---------
* d: a dict-like object to populate
* allow_overwrite: if False, raise a ValueError if d already has an
existing such key (default=True)
"""
def populator(name):
def populated(fn):
if not allow_overwrite and name in d:
raise ValueError("'{}' is already present".format(name))
d[name] = fn
return fn
return populated
return populator | d0d4c254980787f43313dd3d6bd3655975bdf1f9 | 46,870 |
def sequence_generator(n):
"""
Generates sequence of numbers for a given positive integer 'n' by iterative process as specified in Goldbach
conjecture.
:param n: positive
:return: list of numbers in the generated sequence, boolean indicating whether last element of sequence is 1
"""
if not isinstance(n,int):
# Will raise an exception if n cant be converted to int
n = int(n)
if n <= 0:
raise ValueError("Input value has to be at least 1.")
curr_number = n
sequence = [curr_number]
check = False
while curr_number != 1 and not check:
if curr_number%2 == 0:
curr_number = int(curr_number/2)
else:
curr_number = 3*curr_number + 1
if curr_number in sequence:
check = True
sequence.append(curr_number)
break
else:
sequence.append(curr_number)
return sequence, sequence[-1] == 1 | b70d7820883ab41c1d41b2d5d8bfa7d4671f612c | 46,872 |
import pyflakes.api
import pyflakes.reporter
def undefined_names(sourcecode):
"""
Parses source code for undefined names
Example:
>>> print(ub.repr2(undefined_names('x = y'), nl=0))
{'y'}
"""
class CaptureReporter(pyflakes.reporter.Reporter):
def __init__(reporter, warningStream, errorStream):
reporter.syntax_errors = []
reporter.messages = []
reporter.unexpected = []
def unexpectedError(reporter, filename, msg):
reporter.unexpected.append(msg)
def syntaxError(reporter, filename, msg, lineno, offset, text):
reporter.syntax_errors.append(msg)
def flake(reporter, message):
reporter.messages.append(message)
names = set()
reporter = CaptureReporter(None, None)
pyflakes.api.check(sourcecode, '_.py', reporter)
for msg in reporter.messages:
if msg.__class__.__name__.endswith('UndefinedName'):
assert len(msg.message_args) == 1
names.add(msg.message_args[0])
return names | f1124db4af4ee2a37ba5949660073b4f8b3e651b | 46,873 |
import math
def solar_radius_vector_aus(eccentricity_earth_orbit, solar_true_anomaly):
"""Returns the Solar Radius Vector.
Measured as distance in Astronomical Units, (AUs).
With Eccentricity of Earth's Orbit, eccentricity_earth_orbit, and Solar
True Anomaly, solar_true_anomaly.
"""
solar_rad_vector_aus = (1.000001018 * (1 - eccentricity_earth_orbit ** 2)) / (
1 + eccentricity_earth_orbit * math.cos(math.radians(solar_true_anomaly))
)
return solar_rad_vector_aus | 13f2458bea1d878e09cf207629621d812c5e7550 | 46,874 |
def _CppName(desc):
"""Return the fully qualified C++ name of the entity in |desc|."""
return '::'+desc.fqname.replace('.', '::') | bd5985321918850bfb1f095c1587028194e9739b | 46,875 |
def parse_geneseekr_profile(value):
"""
Takes in a value from the GeneSeekr_Profile of combinedMetadata.csv and parses it to determine which markers are
present. i.e. if the cell contains "invA;stn", a list containing invA, stn will be returned
:param value: String delimited by ';' character containing markers
:return: List of markers parsed from value
"""
detected_markers = []
marker_list = ['invA', 'stn', 'IGS', 'hlyA', 'inlJ', 'VT1', 'VT2', 'VT2f', 'uidA', 'eae']
markers = value.split(';')
for marker in markers:
if marker in marker_list:
detected_markers.append(marker)
return detected_markers | e0ac1772f540b207272875f544f461ac633422b7 | 46,877 |
def prune(trie, threshold):
"""
Prune the branches that are connected to the trie by a prefix value lower than threshold.
"""
queue = [("", trie._root)]
while queue:
path, local_node = queue.pop(0)
branch2prune = []
for child_key, child_node in local_node.children.items():
key = path + " " + child_key
prefix = key[1:]
if child_node.value < threshold:
branch2prune.append(prefix)
else:
queue.append((path + " " + child_key, child_node))
for prefix in branch2prune:
del trie[prefix:]
return trie | 555ec917f66e96c81dc3ec06ce9f83a6bc1167d8 | 46,878 |
def _follow_path(json_data, json_path):
"""Get the value in the data pointed to by the path."""
value = json_data
for field in json_path.split("."):
value = value[field]
return value | 6c453125ba06a560b77d3f89ec4816a8393cd919 | 46,880 |
def epoch_timestamp_to_ms_timestamp(ts: int) -> int:
""" Converts an epoch timestamps to a milliseconds timestamp
:param ts: epoch timestamp in seconds
:return: timestamp in milliseconds
"""
return int(ts * 1000) | 485035d7effc0adfa6bbe8bff22df0b3480ec7f3 | 46,882 |
def _platform_toolchain_cmd_split(template):
"""
>>> cmds = _platform_toolchain_cmd_split(test_build_template)
>>> cmds["icepack"]
(2, ['icepack', '{build_name}.txt', '{build_name}.bin'])
>>> pprint.pprint(cmds["nextpnr-ice40"])
(1,
['nextpnr-ice40', '--json', '{build_name}.json', '--pcf', '{build_name}.pcf'])
>>> cmds["yosys"]
(0, ['yosys', '-q', '-l', '{build_name}.rpt', '{build_name}.ys'])
"""
cmds = {}
for i, cmdline in enumerate(template):
cmdline_parts = cmdline.split()
cmds[cmdline_parts[0]] = (i, list(cmdline_parts))
return cmds | 37ce56d4f43009289a32b31aa901acb987390cd3 | 46,884 |
def fst(pair):
"""Return the first element of pair
"""
return pair[0] | f31a63338a07548691d7354b6f948399cb9cfae5 | 46,885 |
def public_download_url(file, **kwargs):
"""No public download URL"""
return None | 22dc7f4ce58b2938afb8b1895819e100f5641c7e | 46,886 |
def config_section_data():
"""Produce the default configuration section for app.config,
when called by `resilient-circuits config [-c|-u]`
"""
config_data = u"""[fn_ldap_search]
# LDAP server ip or fully qualified hostname
server=ldap.forumsys.com
port=389
# The domain setting must be set to a valid Windows domain if using NTLM authentication.
#domain=WORKGROUP
user=cn=read-only-admin,dc=example,dc=com
password=password
auth=SIMPLE
use_ssl=False
connect_timeout=10
"""
return config_data | 070795f107573ce2192caa482764e445564a43f0 | 46,887 |
def read_input(datafile, classfile):
"""Read the data points file and class id of each point.
Args:
datafile (str): Data points file.
classfile (str): Point class file.
Returns:
tuple: Returns a tuple containing a list of points and a list
containing the class of each point.
"""
points = []
with open(datafile, 'r') as f:
for i, line in enumerate(f.readlines()):
x, y = list(map(float,line.split()))
points.append((i, x, y))
classes = []
with open(classfile, 'r') as f:
for i, line in enumerate(f.readlines()):
classes.append((i, int(line)))
return points, classes | 49ed27b048d754bddd50fd59b521f6f564dc6a95 | 46,889 |
def map_to_range(x: int, from_low: int, from_high: int, to_low: int, to_high: int) -> int:
"""
Re-map a number from one range to another.
A value of fromLow would get mapped to toLow, a value of fromHigh to toHigh, values in-between to values in-between.
Do not constrain values to within the range, because out-of-range values are sometimes intended and useful.
Inspired by https://www.arduino.cc/reference/en/language/functions/math/map/
:param x: The number to map
:param from_low: The lower bound of the value’s current range
:param from_high: The upper bound of the value’s current range
:param to_low: The lower bound of the value’s target range
:param to_high: The upper bound of the value’s target range
:return: The re-mapped value
:type x: int
:type from_low: int
:type from_high: int
:type to_low: int
:type to_high: int
:rtype: int
"""
return int((x - from_low) * (to_high - to_low) / (from_high - from_low) + to_low) | ab69a069c9544b8a2546f849f8544e81631a6870 | 46,890 |
import re
def has_cyrillic(text):
"""
This is ensuring that the given text contains cyrllic characters
:param text: The text to validate
:return: Returns true if there are cyrillic characters
"""
# Note: The character range includes the entire Cyrillic script range including the extended
# Cyrillic alphabet (e.g. ё, Є, ў)
return bool(re.search('[\u0400-\u04FF]', text)) | 9556007206003534e8da7e6aa73a3d3a10962c55 | 46,891 |
import numpy as np
def tensor_product(g1, g2):
"""
:param g1: first tensor
:param g2: second tensor
:return: product between the two tensors
"""
g1 = np.array(g1)
g2 = np.array(g2)
g = np.zeros(g1.shape)
for i in range(g1.shape[0]):
for j in range(g2.shape[0]):
g[i][j] = 0
for k in range(g1.shape[0]):
g[i][j] = g[i][j] + np.multiply(g1[i][k], g2[k][j])
# g = g1 * g2 (dim of the tensor: 3*3*xg*yg*zg)
return g | 60845dc4dd591e2abf889d1a3a0de85abf4a0264 | 46,892 |
def str_aligned(results, header=None):
"""
Given a tuple, generate a nicely aligned string form.
>>> results = [["a","b","cz"],["d","ez","f"],[1,2,3]]
>>> print str_aligned(results)
a b cz
d ez f
1 2 3
Args:
result: 2d sequence of arbitrary types.
header: optional header
Returns:
Aligned string output in a table-like format.
"""
k = list(zip(*results))
stringlengths = list()
count = 0
for i in k:
col_max_len = max([len(str(m)) for m in i])
if header is not None:
col_max_len = max([len(str(header[count])), col_max_len])
stringlengths.append(col_max_len)
count += 1
format_string = " ".join(["%" + str(d) + "s" for d in stringlengths])
returnstr = ""
if header is not None:
header_str = format_string % tuple(header)
returnstr += header_str + "\n"
returnstr += "-" * len(header_str) + "\n"
return returnstr + "\n".join([format_string % tuple(result)
for result in results]) | 8945813211548884193fd54a2e939eaabbb75f18 | 46,893 |
def expected_utility_a(copula, lottery):
"""Calculate the expected utility for lottery A."""
# TEMPORAL DECISIONS
if lottery in [1, 2, 3, 4, 5, 19, 20, 21, 22, 23]:
rslt = copula.evaluate(50, 0, t=0)
# Note: question 13 is temporal but t=0. So it is handled under risky choices.
elif lottery in [7, 8, 9, 10, 11, 25, 26, 27, 28, 29]:
rslt = copula.evaluate(0, 50, t=0)
elif lottery in [6, 16, 24]:
rslt = copula.evaluate(50, 0, t=6)
elif lottery in [12, 30]:
rslt = copula.evaluate(0, 50, t=6)
elif lottery == 14:
rslt = copula.evaluate(50, 0, t=1)
elif lottery == 15:
rslt = copula.evaluate(50, 0, t=3)
elif lottery == 17:
rslt = copula.evaluate(50, 0, t=12)
elif lottery == 18:
rslt = copula.evaluate(50, 0, t=24)
# RISKY CHOICES
elif lottery == 13:
rslt = copula.evaluate(50, 0, t=0)
elif lottery == 31:
rslt = 0.50 * copula.evaluate(15, 0, t=0) + 0.50 * copula.evaluate(20, 0, t=0)
elif lottery == 32:
rslt = 0.50 * copula.evaluate(30, 0, t=0) + 0.50 * copula.evaluate(40, 0, t=0)
elif lottery == 33:
rslt = 0.50 * copula.evaluate(60, 0, t=0) + 0.50 * copula.evaluate(80, 0, t=0)
elif lottery == 34:
rslt = 0.50 * copula.evaluate(0, 15, t=0) + 0.50 * copula.evaluate(0, 20, t=0)
elif lottery == 35:
rslt = 0.50 * copula.evaluate(0, 30, t=0) + 0.50 * copula.evaluate(0, 40, t=0)
elif lottery == 36:
rslt = 0.50 * copula.evaluate(0, 60, t=0) + 0.50 * copula.evaluate(0, 80, t=0)
elif lottery == 37:
rslt = 0.50 * copula.evaluate(15, 25, t=0) + 0.50 * copula.evaluate(25, 15, t=0)
elif lottery == 38:
rslt = 0.50 * copula.evaluate(30, 50, t=0) + 0.50 * copula.evaluate(50, 30, t=0)
elif lottery == 39:
rslt = 0.50 * copula.evaluate(60, 100, t=0) + 0.50 * copula.evaluate(100, 60, t=0)
elif lottery == 40:
rslt = 0.50 * copula.evaluate(30, 0, t=0) + \
0.50 * (0.50 * copula.evaluate(54, 0, t=0) + 0.50 * copula.evaluate(26, 0, t=0))
elif lottery == 41:
rslt = 0.50 * copula.evaluate(30, 0, t=0) + \
0.50 * (0.80 * copula.evaluate(33, 0, t=0) + 0.20 * copula.evaluate(68, 0, t=0))
elif lottery == 42:
rslt = 0.50 * copula.evaluate(30, 0, t=0) + \
0.50 * (0.80 * copula.evaluate(47, 0, t=0) + 0.20 * copula.evaluate(12, 0, t=0))
elif lottery == 43:
rslt = 0.50 * copula.evaluate(0, 30, t=0) + \
0.50 * (0.50 * copula.evaluate(0, 54, t=0) + 0.50 * copula.evaluate(0, 26, t=0))
elif lottery == 44:
rslt = 0.50 * copula.evaluate(0, 30, t=0) + \
0.50 * (0.80 * copula.evaluate(0, 33, t=0) + 0.20 * copula.evaluate(0, 68, t=0))
elif lottery == 45:
rslt = 0.50 * copula.evaluate(0, 30, t=0) + \
0.50 * (0.80 * copula.evaluate(0, 47, t=0) + 0.20 * copula.evaluate(0, 12, t=0))
else:
raise AssertionError
return rslt | 63cb5ca12258c31510bb852b176385c6dcc4bbf2 | 46,894 |
def make_prev_next(seg_table):
"""
Function to make two column table into a four column table with prev/next seg
:param seg_table: Input table of form:
They They
don't do|n't
know know
:return: Four column table with prev/next group context columns:
_ don't They They
They know don't do|n't
don't _ know know
"""
prev_group = "_"
segs = [tuple(i.split('\t')) for i in seg_table]
out_segs = []
for i, line in enumerate(segs):
current_group, segmentation = line
if i < len(seg_table) - 1:
next_group = segs[i+1][0]
else:
next_group = "_" # Last group in data
if i > 0:
prev_group = segs[i-1][0]
out_segs.append("\t".join([prev_group, next_group, current_group, segmentation]))
return out_segs | f867755e079583c09a6aa47a6f8fc80f56066b07 | 46,895 |
import argparse
def argument_parse():
""" """
parser = argparse.ArgumentParser()
parser.add_argument(
'-v',
'--input-vcf',
dest='invcf',
help='A snpeff annotated vcf',
required=True
)
parser.add_argument(
'-c',
'--cap-excel',
dest='c_exc',
help='A CAP-PT excel file',
required=True
)
parser.add_argument(
'-r',
'--recal-bam',
dest='recal',
help='A recal bam for making a screenshot of IGV',
required=True
)
parser.add_argument(
'-b',
'--bed',
dest='bed',
help='A bed region of calling variant',
required=True
)
args = parser.parse_args()
invcf = args.invcf
c_exc = args.c_exc
recal = args.recal
bed = args.bed
return invcf, c_exc, recal, bed | e6f507c1dca882fd9f3674daafb50bf182cd4c5a | 46,896 |
def _coerce_to_number(value):
"""Attempt to coerce to a 'number' value.
Since json schema spec is loose here, we'll return the int value
if it's equal to the float value, otherwise give you a float.
"""
if int(value) == float(value):
return int(value)
return float(value) | ec04e8116db7571f9d7edc38df785be49d8ed6fa | 46,898 |
import re
def getTopBillLevel(dirName: str):
"""
Get path for the top level of a bill, e.g. ../../congress/data/116/bills/hr/hr1
Args:
dirName (str): path to match
Returns:
[bool]: True if path is a top level (which will contain data.json); False otherwise
"""
dirName_parts = dirName.split('/')
return (re.match(r'[a-z]+[0-9]+', dirName_parts[-1]) is not None and dirName_parts[-3]=='bills') | 9d6622ad45fa78b80a3aa3e8e59418fe4e6bfbee | 46,899 |
import os
def mux(decrypted_video_filepath, decrypted_audio_filepath, merged_filepath):
"""
@author Jayapraveen
"""
print(decrypted_video_filepath, decrypted_audio_filepath, merged_filepath)
if os.name == "nt":
command = f"ffmpeg -y -i \"{decrypted_video_filepath}\" -i \"{decrypted_audio_filepath}\" -c copy \"{merged_filepath}\""
else:
command = f"nice -n 7 ffmpeg -y -i \"{decrypted_video_filepath}\" -i \"{decrypted_audio_filepath}\" -c copy \"{merged_filepath}\""
retCode = os.system(command)
if retCode == 0:
return True
else:
return False | 112a80f5f65f6c48867af45d5d5e0e77015972d7 | 46,900 |
def parse_feature_table(ft_file):
"""Parse all available features and qualifiers from the FT definition.
This is ugly and parses it straight out of the HTML but this is much easier
than trying to get it from the specs.
"""
feature_keys = []
qual_keys = []
with open(ft_file) as ft_handle:
in_feature_region = False
for line in ft_handle:
if in_feature_region:
if line.strip() == "":
in_feature_region = False
else:
qual_key, feature_key = line.strip().split()
qual_keys.append(qual_key)
feature_keys.append(feature_key)
elif line.find('QUALIFIER FEATURE KEY') == 0:
in_feature_region = True
qual_keys = list(set(qual_keys))
qual_keys = [k.replace('/', '') for k in qual_keys]
feature_keys = list(set(feature_keys))
qual_keys.sort()
feature_keys.sort()
return feature_keys, qual_keys | d72a111bc1a15ac4e0e569c01691a94ce7c0eeb1 | 46,904 |
def css_compatible(name):
"""Is the name suitable for use as a CSS class name?
This is rough and ready!"""
for c in name:
if not c.isalnum() and c != '_':
return False
return True | f1e95097e088620f5901ac441b7e58433c221707 | 46,905 |
import socket
def record_exists(record: str) -> bool:
"""
Determines whether a DNS record exists
"""
try:
socket.getaddrinfo(record, None)
except socket.gaierror:
return False
return True | c9aaa2aaf855aa6e12363acc4da359f8597c9531 | 46,907 |
import numpy
def fountain_np(num):
"""numpy way of initializing data using ufuncs instead of loops"""
pos = numpy.ndarray((num, 4), dtype=numpy.float32)
col = numpy.ndarray((num, 4), dtype=numpy.float32)
vel = numpy.ndarray((num, 4), dtype=numpy.float32)
pos[:,0] = numpy.sin(numpy.arange(0., num) * 2.001 * numpy.pi / num)
pos[:,0] *= numpy.random.random_sample((num,)) / 3. + .2
pos[:,1] = numpy.cos(numpy.arange(0., num) * 2.001 * numpy.pi / num)
pos[:,1] *= numpy.random.random_sample((num,)) / 3. + .2
pos[:,2] = 0.
pos[:,3] = 1.
col[:,0] = 0.
col[:,1] = 1.
col[:,2] = 0.
col[:,3] = 1.
vel[:,0] = pos[:,0] * 2.
vel[:,1] = pos[:,1] * 2.
vel[:,2] = 3.
vel[:,3] = numpy.random.random_sample((num, ))
return pos, col, vel | 6c29f281e67d55ef05234433b3834b1abadd6879 | 46,908 |
import os
import json
def loadJSONFile(filename):
""" Loads as a JSON file as a Python dict.
"""
content = {}
if not os.path.exists(filename):
return content
with open(filename, "rb") as content_file:
try:
content = json.load(content_file)
except ValueError:
content = {}
return content | b161fc94be57c4d35debaf67f7ead2aed2d61e7f | 46,910 |
import torch
def displacement_error(pred_traj, pred_traj_gt, mode='average'):
"""
Input:
- pred_traj: Tensor of shape (m, seq_len, 2). Predicted trajectory.
- pred_traj_gt: Tensor of shape (m, seq_len, 2). Ground truth
predictions.
- mode: Can be sum or average
Output:
- loss: gives the eculidian displacement error
"""
m, seq_len, _ = pred_traj.size()
loss = pred_traj_gt - pred_traj
loss = loss**2
loss = torch.sqrt(loss.sum(dim=2)).sum(dim=1) # m
loss = torch.sum(loss)/m # a scalar: loss for each person
if mode == 'sum':
return loss # a tensor
elif mode == 'average':
return loss/seq_len | 16fbc36a42ba541b364cd253eb07214ac7682f91 | 46,911 |
def add_reference_context_args(parser):
"""
Extends an ArgumentParser instance with the following commandline arguments:
--context-size
"""
reference_context_group = parser.add_argument_group("Reference Transcripts")
reference_context_group.add_argument(
"--reference-context-size",
type=int,
default=30,
help=(
"Number of nucleotides used to match assembled sequence to "
"reference transcript to establish reading frame."))
return reference_context_group | 2dbd0948079bdc56a110cabee1aba1665df6b9b8 | 46,912 |
def convert_to_float(tensor, half=False):
"""
Converts the tensor to either float32
or float16 based on the parameter.
"""
return tensor.half() if half else tensor.float() | ff8ccfbc8de91c6eb059cc26c963c438a4adb0a5 | 46,913 |
def convert_time_to_date_str(time_obj, format="%Y-%m-%d"):
"""
:param time_obj: datetime 类型的变量, 精确到日
:return:
"""
return time_obj.strftime(format) | 1ec2a3a10e9ab53107e0ff17a4c770af7e0e9949 | 46,914 |
import numpy
def covfnc2(x):
"""oscillatory choice (more optimistic)"""
return 0.004 * numpy.sinc(x / 0.5 + 0.25) + 0.0008 * numpy.exp(-x / 20.0) | 94ef55bf947f176fe3713379eb989632a4967afb | 46,915 |
def generate_words(words):
"""Create a list of words with appended integers from a list of provided words"""
return ["{}{}".format(word, number) for word, number in zip(words, range(len(words)))] | 946a0558541230d9902540e865da7a3c9eb797fa | 46,917 |
from typing import Sequence
from typing import Tuple
def get_range(shape: Sequence[int], itemsize: int, strides: Sequence[int]) -> Tuple[int, int]:
"""
Given an array shape, item size (in bytes), and a sequence of strides,
returns a pair ``(min_offset, max_offset)``,
where ``min_offset`` is the minimum byte offset of an array element,
and ``max_offset`` is the maximum byte offset of an array element plus itemsize.
"""
assert len(strides) == len(shape)
# Now the address of an element (i1, i2, ...) of the resulting view is
# addr = i1 * stride1 + i2 * stride2 + ...,
# where 0 <= i_k <= length_k - 1
# We want to find the minimum and the maximum value of addr,
# keeping in mind that strides may be negative.
# Since it is a linear function of each index, the extrema will be located
# at the ends of intervals, so we can find minima and maxima for each term separately.
# Since we separated the offsets already, for each dimension the address
# of the first element is 0. We calculate the address of the last byte in each dimension.
last_addrs = [(length - 1) * stride for length, stride in zip(shape, strides)]
# Sort the pairs (0, last_addr)
pairs = [(0, last_addr) if last_addr > 0 else (last_addr, 0) for last_addr in last_addrs]
minima, maxima = zip(*pairs)
min_offset = sum(minima)
max_offset = sum(maxima) + itemsize
return min_offset, max_offset | 40e9deb664941dec91cdd5bb08af5ff2de487f69 | 46,919 |
def get_P_X_uncond(number_of_symbols):
"""
Compute P(X), the probability of the current activity using
the plug-in estimator.
"""
return [number_of_symbols[0] / sum(number_of_symbols),
number_of_symbols[1] / sum(number_of_symbols)] | bfa1a157f44d370c4e8ab30c5eb6896f0152a8ef | 46,920 |
def hello():
"""Przykladowy uchwyt - serwuje powitanie, gdy uzytkownik odwiedzi adres malinki"""
return "Hello world" | 567a4b021c47d39b1b5b0bd19b30c2880a412c64 | 46,921 |
def linear_search_recursive(array, item, index=0):
"""O(1) beacuse we are recursing here"""
# implement linear search recursively here
if item is not None:
if item == array[index]:
return index
else:
return linear_search_recursive(array, item, index+1)
# once implemented, change linear_search to call linear_search_recursive
# to verify that your recursive implementation passes all tests | 05765799b9d67446707b9aa9436d734dee4c3289 | 46,923 |
import os
import argparse
def check_key(file):
""" Check if file exist, empty, or over max size"""
if os.path.isfile(file):
FIRSTLINE = "-----BEGIN RSA PRIVATE KEY-----"
LASTLINE = "-----END RSA PRIVATE KEY-----"
size = os.path.getsize(file)
if size > 2000 or size == 0:
raise argparse.ArgumentTypeError("size of {} is {} the key file size must be greater than 0 and less than 2k!".format(file, size))
else:
with open(file, "r") as key:
key_lines = key.readlines()
if not ((FIRSTLINE in key_lines[0]) and (LASTLINE in key_lines[-1])):
raise argparse.ArgumentTypeError("{} is not an RSA private key".format(file))
else:
raise argparse.ArgumentTypeError("{} does not exist".format(file))
return file | 52c84f14f714dc92c6d7636d67cbdfca407069b2 | 46,924 |
def escape(s):
"""Escape content of strings which will break the api using html entity type escaping"""
s = s.replace("&", "&")
s = s.replace("\r\n", " ")
s = s.replace("\n", " ")
s = s.replace("\r", " ")
s = s.replace("(", "(")
s = s.replace(")", ")")
s = s.replace(",", ",")
s = s.replace("§", "§")
return s | efbc4820078d5e7f703c2443c531f01aa5b2d983 | 46,925 |
from datetime import datetime
def load_data(spark, log, config, data_frames):
"""
Collect data locally and write to CSV.
:return: None
"""
log.info("*** load_data starts: {}".format(datetime.now()))
# LOGIC GOES HERE
# EXAMPLE
"""
df_writer(
data_frames["foo"],
file_path="{}/bar".format(config["export_path"]),
header=True,
mode="overwrite",
separator=",",
)
"""
log.info("*** load_data ends: {}".format(datetime.now()))
return None | ac6ef9a5b5cc0e5941b36cd703e3fe75bae5cefb | 46,928 |
import subprocess
import sys
def shell_command(cmd):
"""Execute and return the output of a shell command.
"""
kw = {}
kw['shell'] = isinstance(cmd, str)
kw['stdout'] = kw['stderr'] = subprocess.PIPE
(out, err) = subprocess.Popen(cmd, **kw).communicate()
if not isinstance(out, str):
out = out.decode(sys.stdout.encoding or 'iso8859-1')
return out | 0316217149447282693829c1e3bab0924cd2798a | 46,930 |
def grad_refactor_2(a, b):
""" if_test """
def inner(x):
return x * b
return inner(b) * inner(a) | 00f4f7532250e7f9e43151629d53a653438962ba | 46,932 |
def factorizable(n, factors, sort_factors=True):
"""
Check if there is a way to factorize n by the factors passed as a second argument.
Factors could be non-prime.
As the factors could be non-prime, we can't just divide by all the factors one by one
until there are no factors left or we got 1. We have to check all possible orders of factorization.
We'll try to divide N by all the factors, store all the quotients in the list and then try to check all
numbers in that list in the same manner until we run out of possible factors or get 1.
"""
if not factors:
return False
if sort_factors:
factors = sorted(set(factors))
fs = [n] # The initial list of numbers to check contains only n
while fs: # If there are no numbers left to check, there is no way to fully factorize n by factors
sub = [] # A list to store all the quotients from the division of n by factors
for n in fs: # Try to factorize further
for f in factors:
if n == f: # Fully factorized!
return True
elif f <= n: # A possible factor
if n % f == 0: # n is divisible by f. Let's check if n/f is in turn factorizable by factors
sub.append(n/f)
else: # This, and, consequently, all subsequent factors are too large for n to be divisible by them
break
# We are still here, so we still don't know if n is fully divisible by factors.
# Let's check all the quotients in the same way
fs = sub
return False | 496834496b03ad14e824c1cac5dd0b6f3b2b8fc6 | 46,934 |
def add_domain(user):
"""
Helper function that appends @linaro.org to the username. It does nothing if
it is already included.
"""
if '@' not in user:
user = user + "@linaro.org"
return user | 505561ae6506226e16373b611a4e296351278b68 | 46,935 |
import os
import re
def table_name_from_blob(blob_name: str, file_extension: str):
""" Make a BigQuery table name from a blob name.
:param blob_name: the blob name.
:param file_extension: the file extension of the blob.
:return: the table name.
"""
assert '.' in file_extension, 'file_extension must contain a .'
file_name = os.path.basename(blob_name)
match = re.match(fr'.+?(?={file_extension})', file_name)
if match is None:
raise ValueError(f'Could not find table name from blob_name={blob_name}')
return match.group(0) | f0a41052053442e46995932b1b350f01a676d834 | 46,937 |
import subprocess
import sys
def check_command(command_name):
"""
Function to check for the presence of a command
Args:
command_name (str): command name
Returns:
str: absolute path for command
"""
process = subprocess.Popen(
"which {0}".format(command_name),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(stdout, stderr) = process.communicate()
stdout = stdout.decode("utf-8")
if stdout == "":
sys.stderr.write("ERROR: Invalid command ({0}).\n Check PATH or installed in the system.\n".format(command_name))
sys.exit(1)
command_path = stdout.rstrip("\r\n")
return command_path | 92988ab67870fdb29c2fa487efa29b3bfd2fd28b | 46,938 |
import re
from typing import OrderedDict
def split_tags(tags, separator=','):
"""
Splits string tag list using comma or another separator char, maintain
order and removes duplicate items.
@param tags List of tags separated by attribute separator (default: ,)
@param separator Separator char.
@return Ordered list of tags.
"""
if not tags:
return []
tags = re.sub('\s*{0}+\s*'.format(re.escape(separator)), separator, tags)
tags = re.sub('[\n\t\r]', '', tags)
tags = tags.strip().split(separator)
tags = filter(None, tags)
return OrderedDict.fromkeys(tags).keys() | 8a5c850146201a801c74c4c102f141c193a20ea9 | 46,940 |
import urllib.request
import ssl
def urlinfo(url):
"""
Returns the headers for the web page at ``url``.
The headers are returned as a dictionary.
If there is no web page at url a ``URLError``. If the url is malformed, it raises a
``ValueError`` instead.
:param url: The web page url
:type url: ``str``
:return: The headers for the web page at ``url`` if it exists.
:rtype: ``dict``
"""
# The fact that an "hidden" function is recommended by a PEP is a crime
gcontext = ssl._create_unverified_context()
connect = urllib.request.urlopen(url,context=gcontext)
header = connect.info()
result = {}
for item in header.raw_items():
result[item[0]] = item[1]
return result | 28c8064c2f9ad71c7862333f3b5738a28218c4a7 | 46,941 |
def is_string(atype):
"""find out if a type is str or not"""
return atype == str | 222398429f641e65b04867f7ef9a130d52881fc8 | 46,942 |
from textwrap import dedent
from datetime import datetime
def generate_header(model_name: str, use_async: bool) -> str:
"""Generates the header for a Python module.
Args:
model_name (str): The name of the system model the header is generated for.
use_async (bool): True if asynchronous code should be generated, false otherwise.
Returns:
str: The Python code for the header.
"""
return dedent("""\
# Generated from '{model}' on {date}
from typing import Tuple
from toptica.lasersdk.{_async}client import UserLevel
from toptica.lasersdk.{_async}client import Client
from toptica.lasersdk.{_async}client import DecopBoolean
from toptica.lasersdk.{_async}client import DecopInteger
from toptica.lasersdk.{_async}client import DecopReal
from toptica.lasersdk.{_async}client import DecopString
from toptica.lasersdk.{_async}client import DecopBinary
from toptica.lasersdk.{_async}client import MutableDecopBoolean
from toptica.lasersdk.{_async}client import MutableDecopInteger
from toptica.lasersdk.{_async}client import MutableDecopReal
from toptica.lasersdk.{_async}client import MutableDecopString
from toptica.lasersdk.{_async}client import MutableDecopBinary
from toptica.lasersdk.{_async}client import Connection
from toptica.lasersdk.{_async}client import NetworkConnection
from toptica.lasersdk.{_async}client import SerialConnection
from toptica.lasersdk.{_async}client import DecopError
from toptica.lasersdk.{_async}client import DeviceNotFoundError
""".format(model=model_name, date=str(datetime.now()), _async='asyncio.' if use_async else '')) | ce1d06f5c41cc6aaf15d3f9ef4bbb474e07327de | 46,943 |
from typing import Sequence
def default_cleaner(quotes: Sequence[str]) -> Sequence[str]:
"""
Default cleaner function used by bot instance.
:param quotes: Sequence of quotes which are to be pre-processed.
:return:
processed quotes.
"""
quotes = [q.strip() for q in quotes if q]
return quotes | 354b912e9074342c704a176ecec2d8661d209eb4 | 46,944 |
def group_days_by(days, criterion):
"""
Group the given vector of days according to the given criterion.
Parameters
----------
days: pd.DatetimeIndex
criterion: str
Indicates how to group the given days. It can be either "year" or "month" or "season".
(The meteorological seasons are considered, and not the astronomical ones)
Returns
----------
list
List of pairs (i.e. tuples).
Each pair is a group of days.
- The first element is a string which represents the group name (i.e. group label).
- The second element is the vector of days in that group, i.e. it's a pd.DatetimeIndex.
Raises
----------
ValueError
When `criterion` is neither "year" nor "month" nor "season".
Notes
----------
For the sake of completeness, it's important to say that if `criterion` is either "month" or "season", also days of
different years could be grouped together.
"""
days = days.sort_values()
if criterion=="year":
years = days.year.drop_duplicates()
return [(str(year),days[days.year==year]) for year in years]
elif criterion=="month":
def stringify_month(month):
return ["January","February","March","April","May","June","July","August","September",
"October","November","December"][month-1]
months = days.month.drop_duplicates()
return [(stringify_month(month),days[days.month==month]) for month in months]
elif criterion=="season":
def to_season(month):
return ["Winter", "Spring", "Summer", "Fall"][month%12//3]
seasons = days.month.map(lambda month: to_season(month)).drop_duplicates()
return [(season,days[list(days.month.map(lambda month: to_season(month)==season))]) for season in seasons]
else:
raise ValueError("criterion must be either \"year\" or \"month\" or \"season\" ") | 46e71c3ecdd4dc3b62ddae02a323c214258bc30e | 46,945 |
def arcgiscache_path(tile_coord):
"""
>>> arcgiscache_path((1234567, 87654321, 9))
'L09/R05397fb1/C0012d687'
"""
return 'L%02d/R%08x/C%08x' % (tile_coord[2], tile_coord[1], tile_coord[0]) | aaecbe8cef389cdcf35c3ed35d9490490fd6ed45 | 46,946 |
def get_config_item(config, item, default=None, replace_char='_'):
"""
Args:
config:
item:
default:
replace_char:
Returns:
Configuration item
"""
value = config.get(item, default)
if type(value) == str:
value = value.replace(" ", replace_char).lower()
return value | 14d6d81b48685cec86193968bd7845a4e49c6bb9 | 46,947 |
def urlpath2(url:bytes) -> bytes:
""" Get url's path(strip params) """
return url.split(b'?', 1)[0] | b5464b3617cbd6303f4438c92fd8f5271f6906e1 | 46,949 |
import re
def splice_hyphenated_word(word):
"""
:param word:
:return:
>>> splice_hyphenated_word('fortis-eum')
'fortis eum'
>>> splice_hyphenated_word('prorogabatur—P')
'prorogabatur P'
"""
# return word.replace('-', ' ')
hyphen_codes = [45, 8212]
hyphens = [chr(val) for val in hyphen_codes]
return re.sub('[{}]'.format(''.join(hyphens)), ' ', word) | f4b9d47398997d5deb8654a88de958c81d9621b2 | 46,951 |
def interval_days (iv) :
""" Compute number of days in a roundup.date Interval. The
difference should be computed from two dates (without time)
>>> D = Date
>>> I = Interval
>>> interval_days (D ('2014-01-07') - D ('2013-01-07'))
365
>>> interval_days (D ('2014-01-07') - D ('2012-01-07'))
731
>>> interval_days (I ('23d'))
23
>>> interval_days (I ('-23d'))
-23
>>> interval_days (D ('2012-01-07') - D ('2014-01-07'))
-731
"""
t = iv.get_tuple ()
assert abs (t [0]) == 1
assert t [1] == 0
assert t [2] == 0
return t [3] * t [0] | 7be7c69b00a3bf7b4a1889bc15c11d664cec81ef | 46,952 |
def leading_zeros(val, n):
""" Return string with "n" leading zeros to integer. """
return (n - len(str(val))) * '0' + str(val) | eb4d55caf41f71c4d953398c17c8980070892a0c | 46,953 |
import re
import os
def get_files(path, pattern):
"""
Recursively find all files rooted in <path> that match the regexp <pattern>
"""
L = []
# base case: path is just a file
if (re.match(pattern, os.path.basename(path)) != None) and os.path.isfile(path):
L.append(path)
return L
# general case
if not os.path.isdir(path):
return L
contents = os.listdir(path)
for item in contents:
item = path + item
if (re.search(pattern, os.path.basename(item)) != None) and os.path.isfile(item):
L.append(item)
elif os.path.isdir(path):
L.extend(get_files(item + '/', pattern))
return L | 11f24054ce51fa39a01600cd63c2165dfa627a1c | 46,954 |
def dependent_object():
"""Access an instance of an object having dependencies.
"""
class Obj(object):
dep_type = 'test'
val = 'r'
run = 2
def traverse(self):
yield self
def __str__(self):
return 'Test_object'
return Obj() | da65757b481ecb319db113bef32808a50d3d7134 | 46,955 |
def get_vaf(mutations):
"""
Given the list of mutations in the form
<TRANSCRIPT_1>_X123Y#0.56,<TRANSCRIPT_2>_X456Y#0.91,etc -> for SNVS
<5'TRANSCRIPT_1>-<3'TRANSCRIPT_1>_FUSION_Junction:X-Spanning:Y,\
<5'TRANSCRIPT_2>-<3'TRANSCRIPT_2>_FUSION_Junction:A-Spanning:B,etc -> for FUSIONS
return it
in the form 0.XX for an SNV or 0.0 for a FUSION
:param str mutations: The mutations covered by the input IAR
:return: The VAF
:rtype: float
>>> get_vaf('ENST1231.1_S123K#0.56')
0.56
>>> get_vaf('ENST1231.1_S123K#0.56,ENST1211.1_S143K#0.61')
0.56
>>> get_vaf('ENST1231.1_S123K#0.43_K124S#0.61,ENST1211.1_S143K#0.43_K144S#0.61')
0.43
>>> get_vaf('ENST1231.1-ENST4564.2_FUSION_Junction:5-Spanning:10')
0.0
"""
muts = mutations.split(',')
vaf = 1.0
for mutation in muts:
if 'FUSION' in mutation:
return 0.0
mutation = mutation.split('_')[1:]
for mut in mutation:
vaf = min(vaf, float(mut.split('#')[1]))
return vaf | a736f3b40c8de59508f66a9db35833344ef873ca | 46,956 |
def compute_hash(*args):
"""Compute a hash vlaue from three patient-specific fields that must be
removed due for respecting the patient's privacy."""
return hash(args) | 795172e742ba5e352fa3b0a785b58537b83b78b4 | 46,957 |
def prescribing_transform(row):
"""Transform a row from a formatted file into data suitable for
storing in our bigquery schema
A 'formatted file' is a file created by the
import_hscic_prescribing Django management command.
"""
# To match the prescribing table format in BigQuery, we have
# to re-encode the date field as a bigquery TIMESTAMP and drop
# a couple of columns
row[10] = "%s 00:00:00" % row[10]
del(row[3])
del(row[-1])
return row | 959f0c57770d01a842bd43401ef11be709473410 | 46,958 |
def ishexdigit(c):
"""
>>> ishexdigit('0')
True
>>> ishexdigit('9')
True
>>> ishexdigit('/')
False
>>> ishexdigit(':')
False
>>> ishexdigit('a')
True
>>> ishexdigit('f')
True
>>> ishexdigit('g')
False
>>> ishexdigit('A')
True
>>> ishexdigit('F')
True
>>> ishexdigit('G')
False
"""
return c.isdigit() or ord('a') <= ord(c.lower()) <= ord('f') | b450b243bc40ea4f5c84ddfdeddcd8022839def3 | 46,959 |
def getSignature(firmware_data):
"""Function that returns the signature of a broadcom combo solution
firmware."""
start = firmware_data[:-2].rfind('\x00') + 1
ret = firmware_data[start:]
if not 'Version' in ret or not 'Date' in ret:
raise Exception("Invalid signature")
return ret | a21df258b4aa899bb816adef72b33e45c9615127 | 46,960 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.