content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def _fix_data(df):
"""整理数据"""
df['股票代码'] = df['股票代码'].map(lambda x: x[1:])
# df.reset_index(inplace=True)
return df | 94fe13120da55a2e2649a0e4a2c6e3d021fb7bfd | 49,671 |
def createId(df1, dest_col, cols):
"""
Append flight IDs to the input table (usually the FSU table).
Parameters:
df1 (pandas.DataFrame) : input dataframe to enhance with flight identifiers
dest_col (string) base name of the identifier column
cols (list(string)) : columns from which to form the identifier
Return:
A dataframe with one additional identifier column.
The original input remains intact. The number of rows in the returned dataframe
is identical to the number of rows in the input dataframe.
"""
df = df1.copy()
df[dest_col] = df[cols[0]]
for i in range(1,len(cols)):
df[dest_col] += df[cols[i]]
return df | 87777a104d531d5e3482e72fd6085be010662d16 | 49,672 |
def generate_domain(year, month, day, length=32, tld=''):
""" Generates a domain by considering the current date. """
domain = ""
for i in range(length):
year = ((year ^ 8 * year) >> 11) ^ ((year & 0xFFFFFFF0) << 17)
month = ((month ^ 4 * month) >> 25) ^ 16 * (month & 0xFFFFFFF8)
day = ((day ^ (day << 13)) >> 19) ^ ((day & 0xFFFFFFFE) << 12)
domain += chr(((year ^ month ^ day) % 25) + 97)
domain += tld
return domain | 219db5378dbcc192be5c1678db6753be6913eae8 | 49,673 |
def path_from_node_to_root(root, letter):
"""Recursive formula to reach a node
Args:
root: root node
letter: letter to find
Returns:
list: list of the path
"""
if root is None:
return None
elif root.get_letter() == letter:
return []
left_answer = path_from_node_to_root(root.left, letter)
if left_answer is not None:
left_answer.append(0)
return left_answer
right_answer = path_from_node_to_root(root.right, letter)
if right_answer is not None:
right_answer.append(1)
return right_answer
return None | 2a9027d456869061a074331d10f7e2d075b2eea1 | 49,674 |
def potential_energy(fa, voltages, position, charge):
""" The potential energy of a charged particle in an electric field.
Parameters
----------
fa :: FastAdjust
voltages :: np.array([v0, v1, ... vn]) (V)
position :: np.array([x, y, z]) (m)
charge :: float64 (C)
Returns
-------
float64
"""
return charge * fa.potential_r(position, voltages) | 9cd25052558e2ae5fd7ffd273c6b53a824c1ac6e | 49,675 |
import os
def read_code(code_path):
"""
read code to be encrypted as a plain text file
:param code_path: code path
:return: code in text format
"""
code_text = {}
if os.path.isfile(code_path):
code_text[code_path] = "".join(list(open(code_path).readlines()))
elif os.path.isdir(code_path):
for code in os.listdir(code_path):
if os.path.isfile(os.path.join(code_path, code)):
code_text[code] = "".join(list(open(os.path.join(code_path, code)).readlines()))
return str(code_text) | efda117bd04f98045d4ee9b5b88f3bfdcdfd9a48 | 49,676 |
import logging
def _setup_logger():
"""Set up and return the logger for the module."""
template = "%(asctime)s %(name)s:%(levelname)s - %(message)s"
logger = logging.getLogger(__name__)
handler = logging.StreamHandler() # Logs to stderr.
handler.setFormatter(logging.Formatter(fmt=template))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger | 0419c8a521bfe9790f5bb2bfba2a54ff53ba8d5d | 49,677 |
def my_case_fixture_legacy(case_data, request):
"""Getting data will now be executed BEFORE the test (outside of the test duration)"""
return case_data.get() | f3dd6403d45d695dfddd8be944501048206d60b8 | 49,678 |
def personal_id(request):
"""Get the OPN personal profile ID for the authenticated profile."""
wallet_info = request.wallet_info
if 'personal_profile' in wallet_info:
return wallet_info['personal_profile']['id']
profile = wallet_info['profile']
if profile['is_individual']:
return profile['id']
# Personal profile not provided. Old version of OPN?
return '' | cf6a8c6a8512326e0933a6ddf02f1a5fc72b21b2 | 49,680 |
def get_mean(l):
"""
Concatenates two pandas categoricals.
Parameters
----------
l : list
A list of numbers.
Returns
-------
float
The result.
Examples
--------
>>> from foocat_tao_huang import foocat_tao_huang
>>> a = [1,2,3]
>>> foocat.get_mean(a)
2.0
"""
return sum(l) / len(l) | 4932c3d2bebaafc349f92ae733d8a826b85707ef | 49,681 |
def construct_workflow_name(example, workflow_engine):
"""Construct suitable workflow name for given REANA example.
:param example: REANA example (e.g. reana-demo-root6-roofit)
:param workflow_engine: workflow engine to use (cwl, serial, yadage)
:type example: str
:type workflow_engine: str
"""
output = '{0}.{1}'.format(example.replace('reana-demo-', ''),
workflow_engine)
return output | b7b1d4bbde6e5d98815c1d3898b2c4e009ce13ac | 49,682 |
def list_paragraph_styles(d):
"""
Name: list_paragraph_styles
Inputs: docx.document.Document, open word document
Output: dict, style_id (keys) with name and counts (keys) found
Features: Returns a list of all the paragraph styles found in given doc
"""
style_dict = {}
para_num = len(d.paragraphs)
for i in range(para_num):
para = d.paragraphs[i]
if para.style.style_id not in style_dict:
style_dict[para.style.style_id] = {
'name': para.style.name,
'count': 1
}
else:
style_dict[para.style.style_id]['count'] += 1
return style_dict | 26d45798c648c5fb46eb16c3e1bea790323333c3 | 49,683 |
def ka_C_Binary_ratio(y, positive=1):
"""Find the positive ration of dependent variable
Parameters
----------
y: pandas series
binary dependent variable
positive: 1 or 0
identify which value is positive
Return
------
float value display positive rate
"""
return y.value_counts()[positive] / (y.value_counts().sum()) | bb1cedf43764e0d66baee318ed104be522c12afe | 49,687 |
from pathlib import Path
def write_read_instructions(
read_instructions: str, scene_path: Path, index: int
) -> Path:
"""*Deprecated, use `write_yaml_instructions` instead.*
The instructions come from the `read` key in the user's `yaml`
configuration file. Files created by this function are what is
sent to the TTS engine by the `record_audio()` function.
Args:
read_instructions (str): A string of text that will be written to the
`.txt` file. Can contain `ssml` syntax. The string is written
as-is.
scene_path (Path): The path towards the scene where
`read_instructions` come from.
index (int): The index of the command block.
Returns:
Path: The path towards where the new text file has been written.
"""
read_path: Path = scene_path / Path("read")
file_name: str = f"read_{index + 1}.txt"
file_path: Path = read_path / Path(file_name)
with open(file_path, "w") as stream:
stream.write(read_instructions)
return file_path | 9e23159ac154c2014b6b3198122c3198624f3a1c | 49,688 |
import base64
def decode_base64(passwd_base64):
"""
解密base64秘钥
:param passwd_base64: 加密后的字符
:return: 解密后的字符
"""
# 原始版本
# passwd = str(bytes.decode(base64.b64decode((passwd_base64).encode('utf-8'))))
# return passwd
# # V1.1
# # 改进为通过获取秘钥长度
# passwd_tmp = str(bytes.decode(base64.b64decode((passwd_base64).encode('utf-8'))))
# # 获取salt的长度,由于可能在密码中存在=因此取最后一位
# # 转为int类型
# salt_len = passwd_tmp.split('=')[-1]
# salt_len = int(salt_len)
# # 获取秘钥,通过截取=之前的字符
# # 生成的是list,取list[0],再去掉salt的长度,得到最终结果
# passwd = passwd_tmp.split('=')[:-1][0][salt_len:]
# return passwd
# V1.2
# 改进为通过获取秘钥长度来分割,获取得到的格式salt+passwd+salt+salt长度
passwd_tmp = str(bytes.decode(base64.b64decode((passwd_base64).encode('utf-8'))))
salt_len = int(passwd_tmp.split('=')[-1])
salt = passwd_tmp[:salt_len]
passwd = passwd_tmp.split(salt)[1]
return passwd | dbb422d70f2e972677e3ffbf3eb7cfda5d91fe60 | 49,690 |
from typing import Iterable
from typing import Sequence
def get_unique_list_in_order(list_of_lists: Iterable[Sequence]):
"""Gets unique items from a list of lists, in the order of the appearance
Args:
list_of_lists: list of lists
Returns: list of unique items in order of appearance
"""
output = []
seen = set()
for ls in list_of_lists:
for elem in ls:
if elem not in seen:
output.append(elem)
seen.add(elem)
return output | 959c61a9a82bd7d0ee1fab63615ea979901cc0ec | 49,691 |
import yaml
def read_config(fname="params.yaml"):
"""Function to read and return config params from yaml file
Args:
fname (str) : File name for yaml file
Returns:
dict : Dictionary with parameters from yaml file
"""
with open(fname, "r") as fs:
try:
return yaml.safe_load(fs)['Feature_Selection']
except yaml.YAMLError as exc:
print(exc)
return | b3c34ec2872dddd7ee4bd4d2d3b2ded15d727358 | 49,692 |
def stereotype_name(stereotype):
"""Return stereotype name suggested by UML specification. First will be
character lowercase unless the second character is uppercase.
:Parameters:
stereotype
Stereotype UML metamodel instance.
"""
name = stereotype.name
if not name:
return ""
elif len(name) > 1 and name[1].isupper():
return name
else:
return name[0].lower() + name[1:] | 244fcca73c769797c273b55013a881468eb43d19 | 49,693 |
def find_uninflected_stem(stem, form):
"""
Finds all the shared caracters from left to right.
find_uninflected_stem('rAmaH', 'rAmo') => -1+aH
:param stem: form to reach by applying the diff
:param form: given form
:return: a diff: '-<number of chars to delete>+<characters to add>'
"""
i = 0
while i <= len(stem) - 1 and i <= len(form) - 1 and stem[i] == form[i]:
i += 1
stem_ending = stem[i:]
form_ending = form[i:]
if stem_ending == '' and form_ending == '':
operation = ''
else:
form_ending_len = len(form_ending)
operation = '-{}+{}'.format(form_ending_len, stem_ending)
return operation | db66368a4bf01d02c046bc2f5c3a208097904358 | 49,696 |
def calc_yd_line_int(play):
"""
Calculates the yard line as an integer b/w 0 - 100,
where 0 - 50 represents the opponent's side of the field,
and 50 - 100 represents the possessing team's side.
"""
if play.data['yrdln'] == '':
return None
if play.data['yrdln'] == '50':
return 50
side, yrdln = play.data['yrdln'].split(' ')
yrdln = int(yrdln)
if play.data['posteam'] == side:
return yrdln
else:
return 100 - yrdln | 6c89bf6fd8b37bc298f467c43ed8f0d5692204d0 | 49,697 |
import numpy
def calcAstrometricError(mag, m5, fwhmGeom=0.7, nvisit=1, systematicFloor=10):
"""
Calculate an expected astrometric error.
Can be used to estimate this for general catalog purposes (use typical FWHM and nvisit=Number of visit).
Or can be used for a single visit, use actual FWHM and nvisit=1.
Parameters
----------
mag: float
Magnitude of the source
m5: float
Point source five sigma limiting magnitude of the image (or typical depth per image).
fwhmGeom: float, optional
The geometric (physical) FWHM of the image, in arcseconds. Default 0.7.
nvisit: int, optional
The number of visits/measurement. Default 1.
If this is >1, the random error contribution is reduced by sqrt(nvisits).
systematicFloor: float, optional
The systematic noise floor for the astrometric measurements, in mas. Default 10mas.
Returns
-------
float
Astrometric error for a given SNR, in mas.
"""
# The astrometric error can be applied to parallax or proper motion (for nvisit>1).
# If applying to proper motion, should also divide by the # of years of the survey.
# This is also referenced in the astroph/0805.2366 paper.
# D. Monet suggests sqrt(Nvisit/2) for first 3 years, sqrt(N) for longer, in reduction of error
# because of the astrometric measurement method, the systematic and random error are both reduced.
# Zeljko says 'be conservative', so removing this reduction for now.
rgamma = 0.039
xval = numpy.power(10, 0.4*(mag-m5))
# The average FWHMeff is 0.7" (or 700 mas), but user can specify. Convert to mas.
seeing = fwhmGeom * 1000.
error_rand = seeing * numpy.sqrt((0.04-rgamma)*xval + rgamma*xval*xval)
error_rand = error_rand / numpy.sqrt(nvisit)
# The systematic error floor in astrometry (mas).
error_sys = systematicFloor
astrom_error = numpy.sqrt(error_sys * error_sys + error_rand*error_rand)
return astrom_error | f97fdf1dd2828c682593168be90e98368d5c48a1 | 49,699 |
def delete_note(client, note_id):
"""Delete a note"""
return client.post("/delete", data=dict(
note_id=note_id
), follow_redirects=True) | 5c9c3462223d4c96deb393a6d4657ce4ac7d38a2 | 49,700 |
def larger(x, y):
"""For demo purposes only; built-in max is preferable"""
if x > y: return x
return y | 09801a6990fbbb2ebcf295bf20f08a861e713bbe | 49,701 |
def titleize(s: str) -> str:
"""
Titleizes a string, aka transforms it from underscored to English title
format
"""
s = s.replace('_', ' ')
words = s.split(' ')
new_words = [
w[0].upper() + w[1:]
for w in words
]
return ' '.join(new_words) | 7f94a10abec10b19950f687251683a5c07166ff3 | 49,703 |
def _get_tags() -> list[str]:
"""Return test tags.
:return: Tag list.
"""
return ["Test Tag 1", "Test Tag 2"] | 79f04b17da4e3df3c2a2572980bdc0ca18d4f796 | 49,704 |
def divisors(num):
"""
約数全列挙
"""
divisors = []
for i in range(1, int(num ** 0.5) + 1):
if num % i == 0:
divisors.append(i)
if i != num // i:
divisors.append(num // i)
return divisors | e56b07bf8e88dc60d30658997bcaa1eae477bd3d | 49,706 |
def int2fixed(i):
"""Convert an integer to fixed point"""
return i << 16 | 7505fa97238dd2440b4aaf46483fc452a145e68a | 49,707 |
def task_import():
"""
Imports changes to CSV files into main file
"""
return {"actions": None, "task_dep": ["diff", "merge", "dedupe", "sort",]} | ac0f7e95edc32711548bc1bc851ef6964fd364d6 | 49,708 |
import torch
def convert_tensors_recursively_to(val, *args, **kwargs):
""" Applies `.to(*args, **kwargs)` to each tensor inside val tree. Other values remain the same."""
if isinstance(val, torch.Tensor):
return val.to(*args, **kwargs)
if isinstance(val, (tuple, list)):
return type(val)(convert_tensors_recursively_to(item, *args, **kwargs) for item in val)
return val | 76525788cf89732ef2eed2822ac98e2be0fe14a6 | 49,709 |
def read_country_code_file(country_code_file_path):
"""
function to read in file containing country codes
"""
with open(country_code_file_path) as file:
country_codes = [x.rstrip() for x in file]
return country_codes | c8aa3f8e0540a2cc55c6b976421c02dcbfc22d49 | 49,712 |
def contrib_inline_aff(contrib_tag):
"""
Given a contrib tag, look for an aff tag directly inside it
"""
aff_tags = []
for child_tag in contrib_tag:
if child_tag and child_tag.name and child_tag.name == "aff":
aff_tags.append(child_tag)
return aff_tags | 39ff3c69ae19f1596ce829d0d1b5b45d0e5bd2a4 | 49,713 |
from pathlib import Path
import bisect
def process_boarding_passes_part2(file: Path) -> int:
"""
Process a given boarding pass file
:param file: boarding pass file to process
:return: highest id of all the boarding passes
"""
id_list = list()
for seat in open(file):
seat = seat.strip().replace('F', '0').replace('B', '1')
seat = seat.replace('L', '0').replace('R', '1')
id_ = int(seat, 2)
bisect.insort(id_list, id_)
seat_id = 0
for i, id_ in enumerate(id_list):
if 0 < i < len(id_list):
if id_list[i - 1] + 1 != id_:
seat_id = id_list[i - 1] + 1
break
return seat_id | 9e45c6258381262ad6aff0a40a336698805870f1 | 49,714 |
from typing import OrderedDict
def motu_command(mparam):
"""
Build the motuclient string command based on the mparam dict contents.
This is an example of the commands used in motuclient used in bash (aviso):
python -m motuclient --motu https://my.cmems-du.eu/motu-web/Motu
--service-id SEALEVEL_GLO_PHY_L4_REP_OBSERVATIONS_008_047-TDS
--product-id dataset-duacs-rep-global-merged-allsat-phy-l4
--longitude-min 0.125 --longitude-max -0.125 --latitude-min -50
--latitude-max -10 --date-min "2020-06-03 00:00:00"
--date-max "2020-06-03 00:00:00" --variable adt --variable err
--variable sla --variable ugos --variable ugosa --variable vgos
--variable vgosa --out-dir <OUTPUT_DIRECTORY>
--out-name <OUTPUT_FILENAME> --user <USERNAME> --pwd <PASSWORD>
"""
mainstr = 'python -m motuclient '
motudict = OrderedDict(mparam)
for k in motudict.keys():
if type(motudict[k]) != list:
mainstr = mainstr + f'--{k} {motudict[k]} '
elif type(motudict[k]) == list:
for i in motudict[k]:
mainstr = mainstr + f'--{k} {i} '
return mainstr | 30d2b80c817fc2b26dce085d5df820eb33df9273 | 49,716 |
import torch
def detach(input):
"""
Detach tensor from calculation graph.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> tt = ttorch.randn({
... 'a': (2, 3),
... 'b': {'x': (3, 4)},
... })
>>> tt.requires_grad_(True)
>>> tt
<Tensor 0x7f5881338eb8>
├── a --> tensor([[ 2.5262, 0.7398, 0.7966],
│ [ 1.3164, 1.2248, -2.2494]], requires_grad=True)
└── b --> <Tensor 0x7f5881338e10>
└── x --> tensor([[ 0.3578, 0.4611, -0.6668, 0.5356],
[-1.4392, -1.2899, -0.0394, 0.8457],
[ 0.4492, -0.5188, -0.2375, -1.2649]], requires_grad=True)
>>> ttorch.detach(tt)
<Tensor 0x7f588133a588>
├── a --> tensor([[ 2.5262, 0.7398, 0.7966],
│ [ 1.3164, 1.2248, -2.2494]])
└── b --> <Tensor 0x7f588133a4e0>
└── x --> tensor([[ 0.3578, 0.4611, -0.6668, 0.5356],
[-1.4392, -1.2899, -0.0394, 0.8457],
[ 0.4492, -0.5188, -0.2375, -1.2649]])
"""
return torch.detach(input) | a35f70ac7401549ced5a4998c44d87b1b08c0c3b | 49,717 |
import torch
def mae(hat_y, y):
"""MAE
Args:
hat_y: 预测值
y: 真实值
Return:
('mae', mae): 评价指标名称,评价结果
"""
mae = torch.mean(torch.abs(y-hat_y))
return 'mae', mae | cfa42c34509ed1f2b9830b34e9653cfd2d0d7ba4 | 49,718 |
import os
def summaries(parent_directory):
"""The path where tensorflow summaries are stored."""
return os.path.join(parent_directory, 'summaries') | a9d71bf1e41de27f630c5a11cc37cf35c848bc15 | 49,719 |
import copy
def execute_graph_from_context(graph, context, *targets, inplace=False):
"""Execute a graph up to a target given a context.
Parameters
----------
graph : grapes Graph
Graph of the computation.
context : dict
Dictionary of the initial context of the computation (input).
targets : strings (or keys in the graph)
Indicator of what to compute (desired output).
inplace : bool
Whether to modify graph and context inplace (default: False).
Returns
-------
grapes Graph
Graph with context updated after computation.
"""
if not inplace:
graph = copy.deepcopy(graph)
context = copy.deepcopy(context)
graph.set_internal_context(context)
graph.execute_to_targets(*targets)
return graph | e27fcc2ed83b88821a496a996e94e569c8b0d86c | 49,720 |
def LOS(x1, y1, x2, y2):
"""returns a list of all the tiles in the straight line from (x1,y1) to (x2, y2)"""
point_in_LOS = []
y=y1
x=x1
dx = x2-x1
dy = y2-y1
point_in_LOS.append([x1, y1])
if(dy<0):
ystep=-1
dy=-dy
else:
ystep=1
if dx<0:
xstep=-1
dx=-dx
else:
xstep=1
ddy = 2*dy
ddx = 2*dx
if(ddx >=ddy):
errorprev = dx
error = dx
for i in range(dx):
x+=xstep
error +=ddy
if error>ddx:
y+=ystep
error-=ddx
if (error+errorprev)<ddx:
point_in_LOS.append([x, y-ystep])
elif (error+errorprev) > ddx:
point_in_LOS.append([x-xstep, y])
else:
point_in_LOS.append([x, y-ystep])
point_in_LOS.append([x-xstep, y])
point_in_LOS.append([x,y])
errorprev=error
else:
errorprev = dy
error = dy
for i in range(dy):
y += ystep
error += ddx
if error>ddy:
x+=xstep
error -=ddy
if (error+errorprev)<ddy:
point_in_LOS.append([x-xstep, y])
elif (error+errorprev)>ddy:
point_in_LOS.append([x, y-ystep])
else:
point_in_LOS.append([x, y-ystep])
point_in_LOS.append([x-xstep, y])
point_in_LOS.append([x,y])
errorprev=error
return point_in_LOS | 85b5e7b4304a6eb44fe1969275cd82f76ca32252 | 49,721 |
import os
import re
def version_string():
"""Returns the version string found in ``vagrant/__init_.py``, or ``None``
if it could not be found.
"""
fname = os.path.join(os.path.dirname(__file__), "vagrant", "__init__.py")
with open(fname, "r") as f:
m = re.search(r'^__version__\s+=\s"(.+?)"', f.read(), re.MULTILINE)
if m:
return m.group(1) | 43d72440018bcf33394b5879d1005269aef1146b | 49,723 |
def _has_valid_shape(table):
"""Returns true if table has a rectangular shape."""
if not table.columns:
return False
if not table.rows:
return False
num_columns = len(table.columns)
for row in table.rows:
if len(row.cells) != num_columns:
return False
return True | 8b51f96b46ae0d8b586df68ebe192410b390273d | 49,724 |
def extract_yields_stats(yields):
"""Extract coverage, mean and std of yields."""
coverage_mask = (yields != 0.0)
return coverage_mask.mean(), yields[coverage_mask].mean(), yields[coverage_mask].std() | 750a842dbf2081744c9031278bfd1a2d9b544007 | 49,725 |
import base64
def b64url_decode(data):
"""Decode data encoded as base64-URL"""
datalen = len(data)
# Recover base64 padding
if datalen % 4 == 3:
data += '='
elif datalen % 4 == 2:
data += '=='
return base64.b64decode(data, altchars='-_') | c7a97da31c600f3a92e20af7be945b4442c4d2b9 | 49,726 |
def riemann_sum(func, partition):
"""
Compute the Riemann sum for a given partition
Inputs:
- func: any single variable function
- partition: list of the form [(left, right, midpoint)]
Outputs:
- a float
"""
return sum(func(point) * (right - left) for left, right, point in partition) | 06bed21fc0a8c95440d5e53c12148af8b19f0943 | 49,727 |
def find_variables(param_dict):
"""Finds items in dictionary that are lists and treat them as variables."""
variables = []
for key, val in param_dict.items():
if isinstance(val, list):
variables.append(key)
return variables | 8e9c35ff85a7cea92a03aa88436999d11f64a21f | 49,730 |
import math
def dew_point(T, RH=None):
"""
Given the relative humidity and the dry bulb (actual) temperature,
calculates the dew point (one-minute average).
The constants a and b are dimensionless, c and d are in degrees
celsius.
Using the equation from:
Buck, A. L. (1981), "New equations for computing vapor pressure
and enhancement factor", J. Appl. Meteorol. 20: 1527-1532
"""
if RH is None:
return "0.0"
d = 234.5
if T > 0:
# Use the set of constants for 0 <= T <= 50 for <= 0.05% accuracy.
b = 17.368
c = 238.88
else:
# Use the set of constants for -40 <= T <= 0 for <= 0.06% accuracy.
b = 17.966
c = 247.15
gamma = math.log(RH / 100 * math.exp((b - (T / d)) * (T / (c + T))))
return "%.2f" % ((c * gamma) / (b - gamma)) | ce619d5b33c0108c14303202507dc98f86aa9561 | 49,731 |
import re
def validate_variables_name(variables):
"""
Validate variable names for Lambda Function.
Property: Environment.Variables
"""
RESERVED_ENVIRONMENT_VARIABLES = [
"AWS_ACCESS_KEY",
"AWS_ACCESS_KEY_ID",
"AWS_DEFAULT_REGION",
"AWS_EXECUTION_ENV",
"AWS_LAMBDA_FUNCTION_MEMORY_SIZE",
"AWS_LAMBDA_FUNCTION_NAME",
"AWS_LAMBDA_FUNCTION_VERSION",
"AWS_LAMBDA_LOG_GROUP_NAME",
"AWS_LAMBDA_LOG_STREAM_NAME",
"AWS_REGION",
"AWS_SECRET_ACCESS_KEY",
"AWS_SECRET_KEY",
"AWS_SECURITY_TOKEN",
"AWS_SESSION_TOKEN",
"LAMBDA_RUNTIME_DIR",
"LAMBDA_TASK_ROOT",
"TZ",
]
ENVIRONMENT_VARIABLES_NAME_PATTERN = r"[a-zA-Z][a-zA-Z0-9_]+"
for name in variables:
if name in RESERVED_ENVIRONMENT_VARIABLES:
raise ValueError(
"Lambda Function environment variables names"
" can't be none of:\n %s" % ", ".join(RESERVED_ENVIRONMENT_VARIABLES)
)
elif not re.match(ENVIRONMENT_VARIABLES_NAME_PATTERN, name):
raise ValueError("Invalid environment variable name: %s" % name)
return variables | 7749cb7458c5378713607e29c8039b312e57fd1c | 49,732 |
def get_substr_slices(umi_length, idx_size):
"""
Create slices to split a UMI into approximately equal size substrings
Returns a list of tuples that can be passed to slice function.
"""
cs, r = divmod(umi_length, idx_size)
sub_sizes = [cs + 1] * r + [cs] * (idx_size - r)
offset = 0
slices = []
for s in sub_sizes:
slices.append((offset, offset + s))
offset += s
return slices | ade4601b74b38d5ef26dd8cfeb89c652b14a447a | 49,733 |
import requests
def finance_product_recommend_api(value_list):
"""
Ibm Watson ML API
"""
api_key = "YOUR_API_KEY"
token_response = requests.post('https://iam.cloud.ibm.com/identity/token',
data={"apikey": api_key, "grant_type": 'urn:ibm:params:oauth:grant-type:apikey'})
ml_token = token_response.json()["access_token"]
payload_scoring = {"input_data": [{"fields": ['sex', 'rolename', 'location', 'carbon_credit', 'limit_0', 'period',
'organization'],
"values": value_list}]}
# You could obtain the url after model deployed on IBM Cloud platform.
response_scoring = requests.post(
'https://us-south.ml.cloud.ibm.com/ml/v4/deployments/XXXXX',
json=payload_scoring, headers={'Authorization': 'Bearer ' + ml_token})
return response_scoring | 7cfaccba954346c163bffa9f4c4c606ba72f2b74 | 49,734 |
def capwords(s, sep=None):
"""capwords(s, [sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. Note that this replaces runs of whitespace characters by
a single space.
"""
return (sep or ' ').join([x.capitalize() for x in s.split(sep)]) | 6e25788a3c257d5b6a9ee9484a3712580b7ea2ae | 49,735 |
def t_func(model,p_h):
"""class tax function calculates total tax burden
for given housing price p_h
Args:
model: class attributes
p_h: housing price
Returns:
individual tax burden
"""
return model.tau_g*(p_h*model.epsilon)+model.tau_p*max((p_h*model.epsilon)-model.p_bar,0) | 1b4e9206d005994ab9959420d4ab3563e6c7b8b9 | 49,736 |
def merge_sort(A):
"""
A function that sorts elements of an list
"""
def merge(L, R, arr):
"""
This merges the left (L) and right (R) parts
of list (arr) in order
"""
l = r = i = 0
print(len(L), len(R), len(arr))
while l < len(L) and r < len(R):
if L[l] < R[r]:
arr[i] = L[l]
l += 1
else:
arr[i] = R[r]
r += 1
i += 1
while l < len(L):
arr[i] = L[l]
l += 1
i += 1
while r < len(R):
arr[i] = R[r]
r += 1
i += 1
return arr
if (len(A) < 2):
return A
mid = len(A) // 2
L = merge_sort(A[:mid])
R = merge_sort(A[mid:])
merge(L, R, A)
return A | 483d6c1d3ca3bbd2a1728ce0b93925ea28bebca4 | 49,737 |
def df_level(value, bounds="80:95"):
"""Convert a numeric value to "success", "warning" or "danger".
The two required bounds are given by a string.
"""
# noinspection PyTypeChecker
warning, error = [float(x) for x in bounds.split(":")]
if value < warning <= error or error <= warning < value:
return "success"
elif warning <= value < error or error < value <= warning:
return "warning"
return "danger" | ca43024929f5d8a7906eafc32f06994975825563 | 49,738 |
def get_offset(im, point, x_fov=1, y_fov=1):
"""
Calculate the angular offset from centre of the point, based on given
angular field of view for camera.
"""
height, width = im.shape
xpos, ypos = point
offset_x = xpos - width/2
offset_y = ypos - height/2
xscale = x_fov / width
yscale = y_fov / height
return xscale * offset_x, yscale * offset_y | 798489c1de43c26f8a3ab1a2fd23abd01c273a96 | 49,739 |
from typing import Sequence
def wrap_text(text, line_length) -> Sequence[str]:
"""splits the given `text` into lines of length at most `line_length` (seperating only at spaces)"""
words = text.split()
assert(all(len(word)<= line_length for word in words)) # not possible otherwise
lines = [[]]
for word in words:
if len(" ".join(lines[-1] + [word])) <= line_length:
lines[-1].append(word)
else:
lines.append([word])
return [" ".join(line) for line in lines] | 2e9001435b342c57011069a04658fda5b3daf319 | 49,740 |
def getVelIntSpec(mcTable, mcTable_binned, variable):
"""
Calculates the integrated reflectivity for each velocity bin
Parameters
----------
mcTable: McSnow output returned from calcParticleZe()
mcTable_binned: McSnow table output binned for a given velocity bin
variable: name of column variable wich will be integrated over a velocity bin
Returns
-------
mcTableVelIntegrated: table with the integrated reflectivity for each velocity bin
"""
mcTableVelIntegrated = mcTable.groupby(mcTable_binned)[variable].agg(['sum'])
return mcTableVelIntegrated | e232268ba81df0d21c9103ea49a7a786e9371dcc | 49,744 |
from typing import List
def token_airdrop(token_agent, amount: int, origin: str, addresses: List[str]):
"""Airdrops tokens from creator address to all other addresses!"""
def txs():
for address in addresses:
txhash = token_agent.contract.functions.transfer(address, amount).transact({'from': origin})
yield txhash
receipts = list()
for tx in txs(): # One at a time
receipt = token_agent.blockchain.wait_for_receipt(tx)
receipts.append(receipt)
return receipts | d3f3b66356030be3e5d8a70d6a96c9faaa0c0a6e | 49,745 |
def saveDataset(dataset = {}, filepath = "", append=0):
"""
Reads a XAYA format dataset into a csv file where the first row contains
the file headings and all other rows contain the data. Inverse of getDataset.
Algorithm:
1. 'DATA' component of dataset translated into a list of strings -- transList
2. Write transList to a file object
'append' keyword: 0 includes headings, not 0 is append mode and headings are not written
"""
# 'DATA' component of dataset translated into a list of strings -- transList (see xayacore transGraphToList)
transList = []
# If append mode -- then skip line 0 -- the headings
if append != 0:
for key in dataset['DATA']:
if key != 0:
valueString = " ".join([str(x) + ',' for x in dataset['DATA'][key]]).rstrip(',')
newline = '\n'
finalString = valueString + newline
transList.append(finalString)
#If not in append mode, include all lines
else:
for key in dataset['DATA']:
valueString = " ".join([str(x) + ',' for x in dataset['DATA'][key]]).rstrip(',')
newline = '\n'
finalString = valueString + newline
transList.append(finalString)
# Write transList to a file object (see xayacore writeGraph)
fileObject = open(filepath, 'a+')
fileObject.writelines(transList)
fileObject.flush()
return fileObject | 54314514c232654500b82da4ac7405699387be08 | 49,746 |
def iterable(obj):
"""Check if object is iterable"""
try: iter(obj)
except Exception: return False
else: return True | ec4f79f62edfcdf5f1dd8fb9da0feb6ce51b7ce4 | 49,748 |
def notas(*lista, sit=False):
"""-->Função para analizar notas é situação de varios lunos
:param n: notas dos alunos
:param sit: valor opcional indicando se deve ou não adicionar a situação
:return: retorna um dicionario con varias informações sobre a situação do turma
"""
cont = 0
maior = 0
menor = 0
diario = dict()
media = sum(lista)/len(lista)
for c in lista:
if cont == 0:
maior = c
menor = c
if c > maior:
maior = c
if c < menor:
menor = c
cont += 1
diario = {'total':len(lista), 'maior':maior, 'menor':menor, 'media': media}
if sit == True:
if media >= 7.0:
diario['Situação'] = 'Boa'
elif 7.0 > media > 4.0:
diario['Situação'] = 'Razoavel'
else:
diario['Situação'] = 'Ruin'
return diario | b4c492ecdf67a5935ff0bc1c39c4e83a94c20c90 | 49,750 |
import pickle
def pickle_loader(path):
"""A loader for pickle files that contain a sample
Args:
path: Path to an audio track
Returns:
sample: A sample
"""
with open(path, 'rb') as pkl:
sample = pickle.load(pkl)
return sample | 4ffe1822570e9e9a499076b592f7046f045d4baa | 49,751 |
import sys
def merge_two_dicts(x, y):
""" Given two dicts, merge them into a new dict as a shallow copy.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression"""
if (sys.version_info > (3, 4)):
# z = {**x, **y} # Find out how to leave this code valid in Py2 and Py3
z = x.copy() # Delete these 3 lines if solved
z.update(y) #
return z #
else:
# Python < 3.5
z = x.copy()
z.update(y)
return z | 98a8790178eb42eee0afdfb02442e75cc477c7d9 | 49,752 |
import random
import math
def pick_action(s, values, epsilon):
"""
Chooses an action for s based on an epsilon greedy strategy
:param s: the state being evaluated
:param values: The Q-values for all s-a pairs, nest Dict
:param epsilon: the threshold for random choice, governing exploration vs. exploitation
:return:
"""
if random.random() < epsilon:
return random.choice(values[s].keys())
max_q_val = -math.inf
max_action = None
for action, value in values[s].items():
if max_q_val < value:
max_q_val = value
max_action = action
return max_action | dc305e32cdb57d3c3d59a574c2fa466b460cdd16 | 49,753 |
import torch
def synthetic_data(w, b, num_examples):
"""
根据真实w,真实b,生成对应的label
num_examples为生成的数量
y = Xw + b + noise
"""
x = torch.randn(num_examples, len(w))
y = torch.matmul(x, w) + b
# noise
noise = torch.normal(0, 0.01, y.shape)
y += noise
return x, y.reshape(-1, 1) | 5101f5f014f9e90529ed958987ebedcfa1bc55ac | 49,755 |
import time
def timestamp():
"""Return Unix timestamp, seconds since 1970."""
ts = int(time.time())
return ts | e5ee9328340fea8d1d156aaa65ef37fcf9a90492 | 49,759 |
import os
from textwrap import dedent
def _get_file(name):
"""
Retrieves one template.
"""
this = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(this, "_onnx_export_templates_%s.tmpl" % name)
if not os.path.exists(filename):
raise FileNotFoundError( # pragma: no cover
"Unable to find template %r in folder %r." % (name, this))
with open(filename, "r", encoding="utf-8") as f:
return dedent(f.read()) | db874ed0dc1091fa8a60cb4981d92dc796a0afa8 | 49,760 |
def parse_job(children):
"""
parse status information out of the Job element
"""
job_status = {'name': False,
'state': "",
'status': "",
'succeeded': ""}
for mpijob_name, mpijob in children['Job.batch/v1'].items():
job_status['name'] = mpijob_name
if mpijob.get('status', {}).get('active', 0) == 1:
job_status['state'] = 'Running'
job_status['succeeded'] = 'Unknown'
job_status['status'] = 'Unknown'
for condition in mpijob.get('status', {}).get('conditions', []):
if (condition['type'] == 'Complete' or
condition['type'] == 'Failed') and \
condition['status'] == 'True':
job_status['state'] = 'Finished'
job_status['succeeded'] = \
("succeeded" if
mpijob.get('status', {}).get('succeeded', 0) == 1
else "Failed")
else:
job_status['state'] = 'Running'
job_status['succeeded'] = 'Unknown'
job_status['status'] = condition['type']
return job_status | 54c32ff0b832d9fac42352acd48590ddd373f23c | 49,762 |
def quick_one(lis):
"""
the first item of the list is the key, all items that are less than the key will be on the left of the key,
all items that are larger will be on the right of the key, returns the list and the index of the key.
"""
nList = lis.copy()
key = lis[0]
keyI = 0
pMin = 0
pMax = len(lis)-1
while(pMin!=pMax):
while pMax>keyI:
if nList[pMax]<=key:
nList[pMax],nList[keyI] = nList[keyI],nList[pMax]
keyI = pMax
break
pMax-=1
while pMin<keyI:
if nList[pMin]>key:
nList[pMin],nList[keyI] = nList[keyI],nList[pMin]
keyI = pMin
break
pMin+=1
return nList,keyI | b919edac6ca63a10c2de23fa002750722ffbf6c2 | 49,763 |
def DESS(inString):
"""Decode a single string into two strings (inverse of ESS).
DESS is an acronym for DEcode from Single String. This function
uses the method suggested in the textbook for converting a single
string that encodes two strings back into the original two
strings. DESS is the inverse of the function ESS.
Args:
inString (str): The string to be decoded
Returns:
(str, str): A 2-tuple containing the two strings that were decoded from the input.
Example:
>>> DESS('3 abcdefg')
('abc', 'defg')
"""
# split on the first space character
(theLength, remainder) = inString.split(" ", 1)
inString1 = remainder[: int(theLength)]
inString2 = remainder[int(theLength) :]
return (inString1, inString2) | 9db440e6449ef351c2c7a22d285e86ca2ef02348 | 49,765 |
import random
def create_code():
"""create_code() -> str, returns the 4 colors"""
code=''
#initialize our code object to an empty string in order to fill it with the
# string values which will represent each color value
colorList = ['r','y','b','g','o','p']
#these represent the colors: red, yellow, blue, green, orange, purple
for color in range(4):
#since there are only four colors total in our color code we use range(4)
#in order to pick only 4 colors
randColor = random.randrange(len(colorList))
#select an integer that will go on to represent the index
#of a color in our colorList
#print(randColor)
code += colorList[randColor]
#convert our random integer to a color representation
#and add it to the code object
#print(code)
colorList.pop(randColor) #so we won't choose the color again, easy version
#print(colorList)
return code | 68914933990e85eb250a3726252faeb375eb5209 | 49,766 |
def intersection(rectangle1,rectangle2):
"""
return True of False if rectangle1 and rectangle2 intersect
Note: rectangles may be single points as well, with xupper=xlower etc.
arguments:
rectangleX: list [xlower,ylower,xupper,yupper]
"""
xl1 = rectangle1[0]
yl1 = rectangle1[1]
xu1 = rectangle1[2]
yu1 = rectangle1[3]
xl2 = rectangle2[0]
yl2 = rectangle2[1]
xu2 = rectangle2[2]
yu2 = rectangle2[3]
nonintersection = (xl1>xu2)|(xl2>xu1)|(yl1>yu2)|(yl2>yu1)
intersection = not nonintersection
return intersection | c0c6ee934a768cab49abce9cb92f5ecb6c715093 | 49,768 |
def _get_input_output_states(model):
"""Get input/output states of model with external states."""
input_states = []
output_states = []
for i in range(len(model.layers)):
config = model.layers[i].get_config()
# input output states exist only in layers with property 'mode'
if 'mode' in config:
input_state = model.layers[i].get_input_state()
if input_state not in ([], [None]):
input_states.append(model.layers[i].get_input_state())
output_state = model.layers[i].get_output_state()
if output_state not in ([], [None]):
output_states.append(output_state)
return input_states, output_states | e22363fa2f20ead8e3cf973a88c05fc1651a84c7 | 49,770 |
from typing import List
def load_lpc_deu_news_2015_100K_sents() -> List[str]:
"""Load LPC sentences corpus, German news, year 2015, 100k examples
Sources:
--------
- https://wortschatz.uni-leipzig.de/en/download/german
"""
try:
fp = open("data/lpc/deu_news_2015_100K-sentences.txt", "r")
X = [s.split("\t")[1].strip() for s in fp.readlines()]
fp.close()
return X
except Exception as err:
raise Exception(err) | 4200982c05d6f674280e8c78b8986a4b1a3e1988 | 49,772 |
def count_occurences(grid, number):
"""Count occurrences of number on grid."""
return sum(row.count(number) for row in grid) | 3980ccdc57f4976c7e60e02e30ab0e4aadcf3e37 | 49,773 |
def flag8(byte, lsb=False) -> list:
"""Split a byte (as a str) into a list of 8 bits, MSB first by default."""
if lsb is True:
return [(bytes.fromhex(byte)[0] & (1 << x)) >> x for x in range(8)]
return [(bytes.fromhex(byte)[0] & (1 << x)) >> x for x in reversed(range(8))] | 106a6654ccf52b9e77d437080fe06c28b3f26da6 | 49,774 |
def _apply_link(input, link, ln_title="Link"):
"""input[0]: mol_img_tag
input[1]: link_col value"""
link_str = link.format(input[1])
result = '<a target="_blank" href="{}" title="{}">{}</a>'.format(
link_str, ln_title, input[0]
)
return result | 92522b01c0d1054a7829909a76f82dfaf5348d05 | 49,775 |
def history_json():
"""Define a successful response to POST api/v1/event/app/get_all_history_record."""
return {
"code": 0,
"msg": "Succeed.",
"data": [
{
"monitor_id": 128428371,
"station_sn": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"device_sn": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"storage_type": 1,
"storage_path": "/media/mmcblk0p1/Camera00/20191028123014.dat",
"hevc_storage_path": "",
"cloud_path": "",
"frame_num": 119,
"thumb_path": "https://path/to/image.jpg",
"thumb_data": "",
"start_time": 1572287416088,
"end_time": 1572287426100,
"cipher_id": 158,
"cipher_user_id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"has_human": 0,
"volume": "Anker_HmQx0Cp_g",
"vision": 0,
"device_name": "Driveway",
"device_type": 1,
"video_type": 0,
"extra": "",
"viewed": False,
"create_time": 1572287430,
"update_time": 1572287430,
"status": 1,
"station_name": "",
"p2p_did": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"push_did": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"p2p_license": "WPMWQK",
"push_license": "BDYNMB",
"ndt_did": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"ndt_license": "DKUIYX",
"p2p_conn": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"app_conn": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"wipn_enc_dec_key": "ZXSecurity17Cam@",
"wipn_ndt_aes128key": "ZXSecurity17Cam@",
"query_server_did": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"prefix": "",
"ai_faces": None,
"is_favorite": False,
},
{
"monitor_id": 128340343,
"station_sn": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"device_sn": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"storage_type": 1,
"storage_path": "/media/mmcblk0p1/Camera00/20191028100453.dat",
"hevc_storage_path": "",
"cloud_path": "",
"frame_num": 113,
"thumb_path": "https://path/to/image.jpg",
"thumb_data": "",
"start_time": 1572278695752,
"end_time": 1572278702140,
"cipher_id": 158,
"cipher_user_id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"has_human": 1,
"volume": "Anker_HmQx0Cp_g",
"vision": 0,
"device_name": "Driveway",
"device_type": 1,
"video_type": 0,
"extra": "",
"viewed": False,
"create_time": 1572278710,
"update_time": 1572278710,
"status": 1,
"station_name": "",
"p2p_did": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"push_did": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"p2p_license": "WPMWQK",
"push_license": "BDYNMB",
"ndt_did": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"ndt_license": "DKUIYX",
"p2p_conn": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"app_conn": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"wipn_enc_dec_key": "ZXSecurity17Cam@",
"wipn_ndt_aes128key": "ZXSecurity17Cam@",
"query_server_did": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"prefix": "",
"ai_faces": None,
"is_favorite": False,
},
],
} | ecd6522be4b2831b15c3856c62be429a1f9ab13e | 49,776 |
import re
def extract_address(data):
""" Finds all <...>, chooses the last one and removes the unnecessary brackets
:param data: Data sent by the client.
:return: Just the extracted address from triangle brackets.
"""
address = re.findall("\<.*\>", data)[-1][1:-1]
return address | 8f078e6b1f5549f6c433961ce0b6e663a6f89e30 | 49,777 |
def decorated_func(specific):
""" this is a decorated function """
return specific | 7f25b78037ac2e1bde1dc3b3cb68c9da1d804cba | 49,780 |
def decode_gcs_url(url):
"""Decode GCS URL.
Args:
url (str): GCS URL.
Returns:
tuple: (bucket_name, file_path)
"""
split_url = url.split('/')
bucket_name = split_url[2]
file_path = '/'.join(split_url[3:])
return (bucket_name, file_path) | 665ed672c03bf0190c981fda78c7f92c835aaa44 | 49,781 |
def fcn_VR_FMM(r_div_R):
""" Transversal velocity factor of FMM in Eq. (31) in [2]
"""
return (1/2.)*(3.*r_div_R - r_div_R**3.0) | e88f237270508d85203800e210aa16928e3ac630 | 49,782 |
import subprocess
def run(cmd: str) -> subprocess.CompletedProcess:
""" Runs the given commend in the terminal and returns the completed
process instance that represents the process. """
return subprocess.run(cmd.split()) | de8fb338a46e2c64135d757a2cd6ad8a2b86d0f9 | 49,783 |
def generate_sas_url(
account_name: str,
account_domain: str,
container_name: str,
blob_name: str,
sas_token: str
) -> str:
"""
Generates and returns a sas url for accessing blob storage
"""
return f"https://{account_name}.{account_domain}/{container_name}/{blob_name}?{sas_token}" | 6fedfb9cacc08fddc8e6912bb6cd5c0c2d929e9b | 49,784 |
def box_enum(typ, val, c):
"""
Fetch an enum member given its native value.
"""
valobj = c.box(typ.dtype, val)
# Call the enum class with the value object
cls_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ.instance_class))
return c.pyapi.call_function_objargs(cls_obj, (valobj,)) | a1ea8d1a52a57874b576afc6a4c45e7d624d409e | 49,785 |
def equal_to(trial_datum, value) -> bool:
"""Returns True if the trial_datum is equal to a given value."""
return trial_datum == value | d028802a7732c4472363a8f71db63505365991f8 | 49,786 |
def filter_merged(merged, x, y, orientation, dist1, dist2):
"""
filter the merged fragments table for only those fragments with endpoints
close to both breakpoints; assumes that the fragments should extend from
each breakpoint only in a single direction
"""
if orientation == "++":
condition = (((x-merged["end_pos_x"]).between(dist1, dist2)) &
((y-merged["end_pos_y"]).between(dist1, dist2)))
elif orientation == "+-":
condition = (((x-merged["end_pos_x"]).between(dist1, dist2)) &
((merged["start_pos_y"]-y).between(dist1, dist2)))
elif orientation == "-+":
condition = (((merged["start_pos_x"]-x).between(dist1, dist2)) &
((y-merged["end_pos_y"]).between(dist1, dist2)))
elif orientation == "--":
condition = (((merged["start_pos_x"]-x).between(dist1, dist2)) &
((merged["start_pos_y"]-y).between(dist1, dist2)))
else:
raise Exception("unknown orientation: '{}'".format(orientation))
filtered = merged.loc[condition].copy()
return filtered | f5ac705c6dc970f65e3ee496450f476ff742cb53 | 49,787 |
def convert_words_to_numbers(tokens):
"""
Converts numbers in word format into number format
>>> convert_words_to_numbers(['five', "o'clock"])
['5', "o'clock"]
>>> convert_words_to_numbers(['seven', "o'clock"])
['7', "o'clock"]
"""
number_words = ["zero", "one", "two", "three", "four", "five", "six",
"seven", "eight", "nine", "ten", "eleven", "twelve"]
for index, token in enumerate(tokens):
if token.lower() in number_words:
tokens[index] = str(number_words.index(token.lower()))
return tokens | 8fbdacf9deb46dfe59beaeee0c4551056c3cbfd7 | 49,789 |
def Rt_a_m(Rt):
"""
se ingresa el valor en radios terrestres y se convierte en metros
"""
m=6371000*Rt
return m | 518213aeb255b55dcc89afd5d3d453ff7b4dede1 | 49,790 |
def parseValueCoord(vcstr):
"""
Parse a value*coord ZMAT string.
E.g., "R", "1.0", "2*A", "-2.3*R", "-A"
All whitespace is removed from string before parsing.
If there is no '*' operator, then the string must be
a valid float literal or a coordinate label. A coordinate
label must begin with a letter. A leading '+' or '-' may
be added before a coordinate label (if there is no '*').
If there is a '*' operator, then the string before it must
be a valid float literal. The string after it must be a valid
coordinate label.
Parameters
----------
vcstr : str
Value-coordinate string
Returns
-------
v : float
The literal value or coefficient.
c : str
The coordinate label. This is None if there is no coordinate.
"""
vcstr = "".join(vcstr.split()) # We expect no whitespace. Remove if any.
if "*" in vcstr:
# A two factor value * coordinate string
vstr, cstr = vcstr.split('*')
if len(vstr) == 0 or len(cstr) == 0:
raise ValueError("Malformed value-coordinate string")
elif not cstr[0].isalpha():
raise ValueError("Invalid coordinate label")
else:
return (float(vstr), cstr)
else:
# A bare literal or a bare coordinate, possibly with leading '-' or '+'
if vcstr[0].isalpha():
return (1.0, vcstr[0:])
elif vcstr[0] == '-' and vcstr[1].isalpha():
return (-1.0, vcstr[1:])
elif vcstr[0] == '+' and vcstr[1].isalpha():
return (+1.0, vcstr[1:])
else:
return (float(vcstr), None) | 751eb605e5668d9e279f36de988484aeee098190 | 49,792 |
def L_B():
"""
The `L_B` function returns the value for the expression 1-d(TDB)/d(TCB)
according to the IERS numerical standards (2010).
"""
return 1.550519768-8 | 12466e49e55517a7d531d15936d213248f2c0e51 | 49,794 |
import win32api
import platform
def get_win_drives():
""" Finds the drive letters in Windows OS.
"""
if platform == 'win':
# noinspection PyUnresolvedReferences
drives = win32api.GetLogicalDriveStrings()
drives = drives.split('\000')[:-1]
return drives
else:
return [] | a00a2f0b268de45dfe13689a5240573b2c279068 | 49,795 |
import argparse
def parse_arguments():
"""Parse Arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--test_features",
default="",
type=str,
help="path to the file which has test features.",
)
parser.add_argument("--obs_len",
default=20,
type=int,
help="Observed length of the trajectory")
parser.add_argument("--pred_len",
default=30,
type=int,
help="Prediction Horizon")
parser.add_argument(
"--traj_save_path",
required=True,
type=str,
help=
"path to the pickle file where forecasted trajectories will be saved.",
)
return parser.parse_args() | 1314fa3d66ef5185291f7979bfdf5caf9c14ea90 | 49,796 |
def shuntconnection(connections, height, maxnodes, nodes):
""" Given a particular configuration of connections, move to
the next connection in the enumeration
"""
changed = 0
if (height >= 1):
current = connections[height - 1]
nodesbelow = nodes - current
if (height <= 1):
prev = 0
else:
prev = connections[height - 2]
if (current >= 2**prev) or (nodesbelow + current >= maxnodes):
changed = shuntconnection(connections, height - 1, maxnodes, nodesbelow)
if nodesbelow + changed < maxnodes:
connections[height - 1] = 1
changed = changed - current + 1
if height == maxnodes:
changed = -1
else:
connections[height - 1] = 0
changed = changed - current
else:
connections[height - 1] = current + 1
changed = 1
return changed | ae1e285d1bfcea105b97400766ab39468a216bb2 | 49,797 |
def lambda_handler(event, context):
"""
Expects event in the form:
[ [I, [O1, O2, ... On-1], On ]
Returns:
[ I, [O1, O2, ... On ]
"""
results = event[0][1]
results.append(event[1])
return [ event[0][0], results ] | ea1fd8ab9d694cc566d944044d22ddf3b1034208 | 49,798 |
def calc_suma_recursiva(n:int=10) -> int:
"""
Calcula la suma recursiva de los n primeros números
Valor predefinido: 10
:param n: número de números a sumar
:type n: int
:return: suma de los n primeros números
:rtype: int
"""
if n == 0:
return 0
else:
return n + calc_suma_recursiva(n-1) | d17b92531270160601981f0c8683a8e324fa8a3d | 49,800 |
def evaluate_against_tags(entities, tags):
"""Perform evaluation against tags."""
id_list = tags["id"].values
filtered_entities = entities[entities["article_id"].apply(lambda x: x in id_list)]
filtered_entities = (
filtered_entities.groupby("article_id")["word"]
.apply(list)
.reset_index(name="entities")
)
all_tags, found_tags = [], []
for ind in tags.index:
article_id = tags["id"][ind]
found = filtered_entities[filtered_entities["article_id"] == article_id][
"entities"
].tolist()
if found:
[found] = found
for tag in tags["tags"][ind]:
all_tags += [tag]
if tag in found:
found_tags += [tag]
return all_tags, found_tags | 617c18cb9906f4e6e7ffb2218019d48980108730 | 49,802 |
import base64
def encode(encoding_input):
"""This function converts a string to base64, and removes trailing ="""
if (isinstance(encoding_input, str)):
byte = str.encode(encoding_input)
else:
byte = encoding_input
b64 = base64.urlsafe_b64encode(byte)
res = b64.decode('utf-8')
return res.replace('=', '') | 38110b1d6f6f4570a360cb5d3bf4ac414af9c4e4 | 49,803 |
def handler():
""" Get information about the status of the service
Returns:
:obj:`dict` in ``Health`` schema
"""
return {
"status": "ok"
} | 870e8e2b61728d18a5c9167d3b4226bc1be33e2f | 49,805 |
def yesno_as_boolean(yesno_string):
"""converts text containing yes or no to a bool"""
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
return valid[yesno_string.lower()] | bb74071ab5e9f1ead675936663e585bec001b855 | 49,806 |
def fixture_repo_owner() -> str:
"""Return a repository owner."""
return "hackebrot" | 6970cd459823f1a7fe8a1de64c2d7a52c90e6585 | 49,807 |
def sanitise_p_value(n_iter, p_val):
"""
Fixes p value too small or == 0 when computed form distribution
(if not even 1 sample on the other side of measured value)
:param int n_iter: The number of iterations that were performed to compute the distribution
:param float p_val:
:return:
"""
if p_val == 0:
p_val = "< {}".format(1 / n_iter)
else:
p_val = "{:.4f}".format(p_val)
return p_val | ff29b7a978b680fbe15fa6a4e7912f22f75af884 | 49,808 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.