content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import math
def isLeft(p1,p2,p3):
"""Given three points, we determine if p3 is on the left of the directed line given by points p1 and p2.
Output: Signed distance between p3 and the line.
When output>0, p3 is on the left of the line."""
return ((p2.x-p1.x)*(p3.y-p1.y)-(p2.y-p1.y)*(p3.x-p1.x))/math.sqrt((p2.y-p1.y)**2+(p2.x-p1.x)**2) | 7e2b0bd299636c19b6086a4463e5154170802b3f | 46,961 |
def os_path_norm(diroot):
"""function os_path_norm
Args:
diroot:
Returns:
"""
diroot = diroot.replace("\\", "/")
return diroot + "/" if diroot[-1] != "/" else diroot | 7065de60afbbfb0fc47126b978b8704fa615cc96 | 46,962 |
def matched_requisition(ref, requisitions):
"""Get the requisition for current ref."""
for requisition in requisitions:
if requisition["reference"] == ref:
return requisition
return {} | 806f7578cb5405b3f52b57274ecd9e4c4b5d157f | 46,963 |
import tempfile
import os
def UmaskNamedTemporaryFile(*args, **kargs):
"""Create NamedTemporaryFile which follows standard *NIX umask conventions.
Thanks to Pierre at https://stackoverflow.com/a/44130605/507544
"""
fdesc = tempfile.NamedTemporaryFile(*args, **kargs)
umask = os.umask(0)
os.umask(umask)
os.chmod(fdesc.name, 0o666 & ~umask)
return fdesc | 6439e6169cda645cd87425ebbdcc65feb91144d1 | 46,964 |
import time
def get_day_num(dayname) -> int:
"""Converts dayname to 0 indexed number in week e.g Sunday -> 6"""
return time.strptime(dayname, "%A").tm_wday | 29e2b00066b6a0ca207ae3cced3f7efb7ce2c190 | 46,965 |
import json
import os
def create_json(filename, data):
""" method to store the results in a json file """
with open(filename, 'w') as doc:
json.dump(data, doc, indent=4)
return os.path.isfile(filename) | faf07404df6e7363f2859c58ed744f2c9322fa35 | 46,966 |
def add_start(start, notes):
"""Adds the \"start\" delay to all the events inside of
a song called from another song"""
for note in notes:
if type(note) == list:
note[1] += start
elif type(note) == dict and note["type"] == "song":
note["start"] += start
return notes | 96cf76653af469eca6948f006e8301c500b3a705 | 46,967 |
def clean(s):
"""Clean up a string"""
if s is None:
return None
s = s.replace("\n", " ")
s = s.replace(" ", " ")
s = s.strip()
return s | e706b68c7ed5b78ca54a3fd94af5de35ee142d43 | 46,968 |
from typing import Dict
import os
def get_syn2cls_mapping(root: str) -> Dict[str, int]:
"""Maps ImageNet classes (i.e., Wordnet synsets) to numeric labels."""
try:
with open(os.path.join(root, 'LOC_synset_mapping.txt'), 'r') as f:
cls2syn = dict(
enumerate(list(map(lambda x: x.split()[0], f.readlines()))))
except FileNotFoundError:
raise Exception(
'\nCould not find LOC_synset_mapping.txt in root directory. Move file to root directory.\n')
syn2cls = {syn: cls for cls, syn in cls2syn.items()}
return syn2cls | ad816ed60d753b15132ab1c6ee291afead678ce0 | 46,969 |
import os
def command_exists_on_localhost(command: str) -> bool:
"""Check if a command exists on localhost"""
cmd_ext = "hash " + command + ' 2>/dev/null && echo "True" || echo ""'
return True if os.popen(cmd_ext).read() else False | ac45c09d0a786d6907ef6b6db9f006ef08438884 | 46,970 |
import re
def get_reconstruction_info(line):
"""
Scraps data for a shock-capturing recomnstruction kind of data output
:param line: line of a file (str)
:return: tuple of scalar tuples --> regex obj (can be treated as bool), name, recon
"""
name = ''
recon = [0, 0, 0]
match_obj = re.match(r'.*WENO\s:\s(.*)\%\sENO\s:\s(.*)\%\sFIRST_ORDER\s:\s(.*)\%.*', line)
if match_obj:
if 'shock-capturing' in line:
# name = [str(match_obj.group(3)),str(match_obj.group(5)),str(match_obj.group(7))]
name = ["WENO", "ENO", "FIRST_ORDER"]
# recon = [float(match_obj.group(4)),float(match_obj.group(6)),float(match_obj.group(8))]
recon = [float(match_obj.group(1)), float(match_obj.group(2)), float(match_obj.group(3))]
else:
match_obj = False
return match_obj, name, recon | c43001cfd3a200f1ce47e9a3704f34ef4ae6a0e0 | 46,971 |
def leapdays(y1, y2):
"""
Return number of leap years in range [y1, y2]
Assume y1 <= y2 and no funny (non-leap century) years
"""
return (y2 + 3) / 4 - (y1 + 3) / 4 | c7e7c3b4650ef1236fc70ba94240f7119d9397c0 | 46,973 |
import os
def if_main_process():
"""Checks if the current process is the main process and authorized to run
I/O commands. In DDP mode, the main process is the one with RANK == 0.
In standard mode, the process will not have `RANK` Unix var and will be
authorized to run the I/O commands.
"""
if "RANK" in os.environ:
if os.environ["RANK"] == "":
return False
else:
if int(os.environ["RANK"]) == 0:
return True
return False
return True | a4da7ebabbf121a8a0685615261bf7f0e3c0f80e | 46,974 |
import tempfile
import shutil
import atexit
def temporary_folder():
"""Get path to new temporary folder that will be deleted on program exit.
Returns a temporary folder using mkdtemp. The folder is deleted on exit
using the atexit register.
Returns:
path (string): an absolute, unique and temporary folder path.
"""
path = tempfile.mkdtemp()
def remove_folder(path):
"""Function to remove a folder and handle exceptions encountered. This
function will be registered in atexit."""
shutil.rmtree(path, ignore_errors=True)
atexit.register(remove_folder, path)
return path | 6bf7ccb6aa9c18017508eeb045fd25ebd7e624cb | 46,980 |
import os
def local_file_path(filename: str) -> str:
"""Returns the full path of a local file."""
return os.path.join(os.path.dirname(__file__), filename) | 4b83f4bf77bfe13d2f538c9938df4eb3fbfd42fa | 46,981 |
def truncate_column(data_column, truncation_point):
"""
Abstraction of numpy slicing operation for 1D array truncation.
:param data_column: 1D np.array
:param truncation_point: int of truncation index to test
:return: np.array
"""
assert (len(data_column.shape) == 1) # Assert data_column is a 1D numpy array
return data_column[-1 * truncation_point:] | bc444517953228d003fc18851f5c22b2b70f6e55 | 46,984 |
from typing import Any
def _place_holder(x: Any):
"""
Whatever in, whatever out
Args:
x ():
Returns:
"""
return x | e39dfa1b4428ef265e4d371a069ebcfa7ff18e81 | 46,987 |
from typing import Optional
import json
def get_receive_count_for_rust_sqs_executor_line(line: str) -> Optional[int]:
"""
Corresponds to logs output from
info!(message_batch_len = message_batch_len, "Received messages");
in sqs-executor/lib.rs
"""
try:
json_line = json.loads(line)
except json.decoder.JSONDecodeError:
# Definitely not handlable by the rust sqs_executor parser
return None
try:
message = json_line["fields"]["message"]
if not message == "Received messages":
return None
received_batch_len: int = json_line["fields"]["message_batch_len"]
return received_batch_len
except KeyError:
return None | ba85083fe867774a852961e89969f844351c19cc | 46,988 |
import math
def asinh(x):
"""Get asinh(x)"""
return math.asinh(x) | e0086dd83ca8a4dd005deed4ef0e58d11c306d0a | 46,990 |
def multiply_with_density(cube, density=1000):
"""Convert precipitatin from m to kg/m2."""
cube.data = cube.core_data() * density
cube.units *= 'kg m**-3'
return cube | aa469c1165898f6543300d70a58b5635d758ec56 | 46,991 |
from typing import Tuple
def get_ronik_time(time_splits : Tuple[int], seconds : int) -> Tuple[int]:
"""
generates time in new system
starts at 0 for all counts
last append is for orbits
"""
time = []
for time_split in time_splits:
time.append(seconds % time_split)
seconds //= time_split
time.append(seconds)
return tuple(time) | 7d9d700f756d3cbb26fe49f3711be369d5e92765 | 46,993 |
import io
def export_python(palette):
"""
Return a string of a Python tuple of every named colors.
Arguments:
palette (dict): Dictionnary of named colors (as dumped in JSON from
``colors`` command)
Returns:
string: Python tuple.
"""
# Open Python tuple
python_palette = io.StringIO()
python_palette.write(u'colors = (\n')
for original_code, values in sorted(palette.items(), key=lambda x:x[1]):
name, from_color = values
python_palette.write(" ('{}', '{}'),\n".format(name, original_code))
# Close Python tuple
python_palette.write(u')\n\n')
output = python_palette.getvalue()
python_palette.close()
return output | b32b97412612d36096aab088d6e3671e5059a32b | 46,994 |
import json
def getStatus(response):
"""
Get the status of the request from the API response
:param response: Response object in JSON
:return: String - statuse
"""
resp_dict = json.loads(response.text)
try:
status = resp_dict["status"]
except KeyError:
print('Retrieval unsuccessful.')
return None
return status | a72a056ef574fdf0fb8f5744073d351836c6db07 | 46,997 |
def smog(polysyllables_count):
"""Given the number of words with at least 3 syllables in a text, compute the SMOG
grade.
Note that this was originally intended for the english language, and for a section
of text containing at least 30 sentences.
Keyword arguments:
polysyllables_count -- number of words with at least 3 syllables in the sentence`
"""
return 1.043 * (30.0 * polysyllables_count) ** 0.5 + 3.1291 | fa2cfd828e43e589b6a859477ebb5b213bfea301 | 46,998 |
def _create_custom_gendered_seq_names():
"""The names have detail that is adequately represented by the image."""
BOY = 0x1f466
GIRL = 0x1f467
MAN = 0x1f468
WOMAN = 0x1f469
HEART = 0x2764 # Heavy Black Heart
KISS_MARK = 0x1f48b
return {
(MAN, HEART, KISS_MARK, MAN): 'Kiss',
(WOMAN, HEART, KISS_MARK, WOMAN): 'Kiss',
(WOMAN, HEART, KISS_MARK, MAN): 'Kiss',
(WOMAN, HEART, MAN): 'Couple with Heart',
(MAN, HEART, MAN): 'Couple with Heart',
(WOMAN, HEART, WOMAN): 'Couple with Heart',
(MAN, GIRL): 'Family',
(MAN, GIRL, GIRL): 'Family',
(MAN, GIRL, BOY): 'Family',
(MAN, BOY): 'Family',
(MAN, BOY, BOY): 'Family',
(MAN, WOMAN, GIRL): 'Family',
(MAN, WOMAN, GIRL, GIRL): 'Family',
(MAN, WOMAN, GIRL, BOY): 'Family',
(MAN, WOMAN, BOY): 'Family',
(MAN, WOMAN, BOY, BOY): 'Family',
(MAN, MAN, GIRL): 'Family',
(MAN, MAN, GIRL, GIRL): 'Family',
(MAN, MAN, GIRL, BOY): 'Family',
(MAN, MAN, BOY): 'Family',
(MAN, MAN, BOY, BOY): 'Family',
(WOMAN, GIRL): 'Family',
(WOMAN, GIRL, GIRL): 'Family',
(WOMAN, GIRL, BOY): 'Family',
(WOMAN, BOY): 'Family',
(WOMAN, BOY, BOY): 'Family',
(WOMAN, WOMAN, GIRL): 'Family',
(WOMAN, WOMAN, GIRL, GIRL): 'Family',
(WOMAN, WOMAN, GIRL, BOY): 'Family',
(WOMAN, WOMAN, BOY): 'Family',
(WOMAN, WOMAN, BOY, BOY): 'Family' } | a79ca3f79134c91ac7c27843f35698a621235d98 | 46,999 |
def exec_tested_method(tx_name, method, tested_method, inputs, server_db):
"""Execute tested_method within context and arguments."""
if tx_name == 'transaction' and method == 'construct':
return tested_method(server_db, inputs[0], **inputs[1])
elif (tx_name == 'util' and (method in ['api','date_passed','price','generate_asset_id','generate_asset_name','dhash_string','enabled','get_url','hexlify','parse_subasset_from_asset_name','compact_subasset_longname','expand_subasset_longname',])) \
or tx_name == 'script' \
or (tx_name == 'blocks' and (method[:len('get_tx_info')] == 'get_tx_info')) \
or tx_name == 'transaction' \
or tx_name == 'transaction_helper.serializer' \
or method == 'sortkeypicker' \
or tx_name == 'backend' \
or tx_name == 'message_type' \
or tx_name == 'address':
return tested_method(*inputs)
else:
if isinstance(inputs, dict):
return tested_method(server_db, **inputs)
else:
return tested_method(server_db, *inputs) | af0d404f2aeef53444d988b7677222815529b79f | 47,002 |
import itertools
def flatten_metas(meta_iterables):
"""
Take a collection of metas, and compose/flatten/project into a single list.
For example:
A: pkg1, pkg2a
B: pkg2b, pkg3
Flattened([A, B]) => [pkg1, pkg2a, pkg3]
Flattened([B, A]) => [pkg1, pkg2b, pkg3]
The resulting list of metas will not be ordered in any particular way.
"""
visited = {}
for metas in meta_iterables:
visited_this_depth = {}
for meta in metas:
if meta.name() not in visited:
visited_this_depth.setdefault(meta.name(), []).append(meta)
for name, metas in visited_this_depth.items():
visited.setdefault(name, []).extend(metas)
return itertools.chain.from_iterable(visited.values()) | 4125a7bea44989140909e91392cd3adb740e26f1 | 47,004 |
def plural(singular, plural, seq):
"""Selects a singular or plural word based on the length of a sequence.
Parameters
----------
singlular : str
The string to use when ``len(seq) == 1``.
plural : str
The string to use when ``len(seq) != 1``.
seq : sequence
The sequence to check the length of.
Returns
-------
maybe_plural : str
Either ``singlular`` or ``plural``.
"""
if len(seq) == 1:
return singular
return plural | 352d8fca4b2b7fb8139b11296defd65796e0e712 | 47,005 |
def get_nations():
"""Restituisce la lista dei generi esistenti"""
nations = [
{'name': 'Italy'},
{'name': 'USA'},
{'name': 'France'},
{'name': 'UK'},
{'name': 'Japan'},
{'name': 'Indonesia'},
{'name': 'Canada'},
{'name': 'Mexico'},
{'name': 'Germany'},
{'name': 'Australia'},
{'name': 'New Zealand'},
{'name': 'Ireland'},
{'name': 'Mozambique'}
]
return nations | ba1790e93e3e71fa48abc302c51d2f5cd230dc03 | 47,006 |
def static_feature_array(df_all, total_timesteps, seq_cols, grain1_name, grain2_name):
"""Generate an arary which encodes all the static features.
Args:
df_all (pd.DataFrame): Time series data of all the grains for multi-granular data
total_timesteps (int): Total number of training samples for modeling
seq_cols (list[str]): A list of names of the static feature columns, e.g. store ID
grain1_name (str): Name of the 1st column indicating the time series graunularity
grain2_name (str): Name of the 2nd column indicating the time series graunularity
Return:
fea_array (np.array): An array of static features of all the grains, e.g. all the
combinations of stores and brands in retail sale forecasting
"""
fea_df = (
df_all.groupby([grain1_name, grain2_name]).apply(lambda x: x.iloc[:total_timesteps, :]).reset_index(drop=True)
)
fea_array = fea_df[seq_cols].values
return fea_array | 58f4664fa318f1026a1c3e48616431f51b224704 | 47,007 |
def _get_org():
"""Gets the org for this python session"""
return '00000000-0000-0000-0000-000000000000' | 51ed4c27ba5b62572cb4ff9b18d87d922edf956c | 47,008 |
import typing
def get_cluster_sources(clusters: typing.Set[str],
source_map: typing.Dict[str,
typing.List[str]], side: str):
"""Returns a list of cluster source directories for the given clusters.
Returns:
The set of source directories to build.
"""
cluster_sources: typing.Set[str] = set()
for cluster in clusters:
if not cluster in source_map:
raise ValueError("Unhandled %s cluster: %s"
" (hint: add to src/app/zap_cluster_list.py)" % (side, cluster))
cluster_sources.update(source_map[cluster])
return cluster_sources | 5599a0e50b5a486b938ec9622fa3f089dc313d29 | 47,009 |
def parse_vimeo_tag(text):
"""Replaces [vimeo: id] tags"""
video_id = text.group("video")
return f'<div class="video-container"><iframe src="https://player.vimeo.com/video/{video_id}" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe></div>' | c11dd5d8e92b1be694ab6222f0975d44895975d2 | 47,012 |
def make_descriptions(painting):
"""Given a painting object construct descriptions in en/nl/sv.
Returns None if there is a problem.
@param painting: information object for the painting
@type painting: dict
@return: descriptions formatted for Wikidata input
@rtype: dict
"""
db_creator_name = None
try:
db_creator = painting['object']['proxies'][0]['dcCreator']
db_creator_name = db_creator['sv'][0].strip()
except KeyError:
# Skip any weird creator settings
return
descriptions = {}
if db_creator_name:
if db_creator_name == u'Okänd':
descriptions['en'] = {
'language': u'en',
'value': u'painting by unknown painter'}
descriptions['nl'] = {
'language': u'nl',
'value': u'schilderij van onbekende schilder'}
descriptions['sv'] = {
'language': u'sv',
'value': u'målning av okänd konstnär'}
elif db_creator_name.startswith(u'Attributed to'):
attrib_name = db_creator_name[len(u'Attributed to'):].strip()
descriptions['en'] = {
'language': u'en',
'value': u'painting attributed to %s' % attrib_name}
descriptions['nl'] = {
'language': u'nl',
'value': u'schilderij toegeschreven aan %s' % attrib_name}
descriptions['sv'] = {
'language': u'sv',
'value': u'målning tillskriven %s' % attrib_name}
elif db_creator_name.startswith(u'Manner of') or \
db_creator_name.startswith(u'Copy after') or \
db_creator_name.startswith(u'Workshop of') or \
db_creator_name.startswith(u'Circle of'):
return None
else:
descriptions['en'] = {
'language': u'en',
'value': u'painting by %s' % db_creator_name}
descriptions['nl'] = {
'language': u'nl',
'value': u'schilderij van %s' % db_creator_name}
descriptions['sv'] = {
'language': u'sv',
'value': u'målning av %s' % db_creator_name}
else:
descriptions['en'] = {'language': u'en', 'value': u'painting'}
descriptions['nl'] = {'language': u'nl', 'value': u'schilderij'}
descriptions['sv'] = {'language': u'sv', 'value': u'målning'}
return descriptions | b0648f82a30ac499475383556a7aa48955f459d7 | 47,013 |
def no_fuse(x):
"""No fuse"""
return x + 2 | a05a4d007398248bf0ffa5d294239cbf75d77893 | 47,014 |
import re
def check_for_title(line):
"""
Check the current line for whether it reveals the title of a new entry.
:param srtr line: the line to check
:return: tuple (the entry title, the entry type) or (None, None)
"""
re_title = re.compile(
'^(?P<title>.+) \\((?P<type>EPHEMERA OBJECT|SPELL|INCANTATION|OBJECT OF POWER|CONJURATION|INVOCATION|ENCHANTMENT|RITUAL|CHARACTER SECRETS|HOUSE SECRETS|FORTE ABILITY)\\)$')
m = re_title.match(line)
if m:
return m.group('title'), m.group('type')
return None, None | 73d7b73d29e51b87a37810d434582c975c6d0c07 | 47,015 |
def rotate_right(arr):
"""
Rotate a copy of given 2D list
clockwise by 90 degrees
and return a new list.
:param arr: A 2D-list of arbitrary
dimensions.
:return: A list that is "arr" rotated
90 degree clockwise by its center.
"""
n = len(arr)
m = len(arr[0])
res = [[None for _ in range(n)] for _ in range(m)]
for i in range(m):
for j in range(n):
res[i][j] = arr[n - j - 1][i]
return res | 2d393acbb8accaae90dd562b5360199698a032a9 | 47,017 |
def get_dl_dir(cd, t, chan):
"""Get the cached download destination directory for file
Get the path for the destination directory for channel for GOES 16 ABI L1B
is downloaded. It does not check if the destination directory exists.
Args:
cd (pathlib.Path): Root path for cache directory
t (pandas.Timestamp): Timestamp (down to the minute) to which the file
corresponds
chan (int): ABI channel number
Returns:
pathlib.Path object pointing to directory that should contain files
"""
dd = cd / "abi" / t.strftime("%Y/%m/%d/%H") / f"C{chan:>01d}"
return dd | 33a315189feffe33a3c4ecc9d1851164b5b70e1c | 47,019 |
import os
def get_item(name):
"""
Retrieve a test resource by filename.
:param name: String filename of the requested resource
:returns: Absolute path to the requested resource
"""
current_path = os.path.realpath(__file__)
current_path = os.path.dirname(current_path)
current_path = os.path.join(current_path, "data" + os.sep)
return os.path.join(os.path.dirname(current_path), name) | 2bc72219491c34ec918a113df8c8f737f87a6f5c | 47,020 |
def can_receive_blood_from(blood_group):
"""Return allowed blood groups given a specific blood group."""
can_receive_from = {
'A+': ['A+', 'A-', 'O+', 'O-'],
'O+': ['O+', 'O-'],
'B+': ['B+', 'B-', 'O+', 'O-'],
'AB+': ['A+', 'O+', 'B+', 'AB+', 'A-', 'O-', 'B-', 'AB-'],
'A-': ['O-', 'A-'],
'O-': ['O-'],
'B-': ['B-', 'O-'],
'AB-': ['AB-', 'A-', 'B-', 'O-']
}
can_receive_blood_from = can_receive_from[blood_group]
return can_receive_blood_from | 34c956f829a14c9b1ebe6f664c44d554451b11e0 | 47,021 |
def replace_ref_nan(row):
"""Function to replace nan info values of some references coming from references without PubMed id
with their reference id."""
if isinstance(row["combined"], float):
return row["reference_id"]
else:
return row["combined"] | 76f5effe6613178055e38721f8a09d459ab0a019 | 47,023 |
def list4ToBitList32(lst):
"""Convert a 4-byte list into a 32-bit list"""
def intToBitList2(number,length):
"""Convert an integer into a bit list
with specified length"""
return [(number>>n) & 1
for n in reversed(range(length))]
lst2 = []
for e in lst:
lst2 += intToBitList2(e,8)
return list([0]*(32-len(lst2)))+lst2 | ac95e68927b703292913229f7a1d0316941f570e | 47,024 |
def board():
"""
Sudoku board for testing
"""
board = [
[7, 8, 0, 4, 0, 0, 1, 2, 0],
[6, 0, 0, 0, 7, 5, 0, 0, 9],
[0, 0, 0, 6, 0, 1, 0, 7, 8],
[0, 0, 7, 0, 4, 0, 2, 6, 0],
[0, 0, 1, 0, 5, 0, 9, 3, 0],
[9, 0, 4, 0, 6, 0, 0, 0, 5],
[0, 7, 0, 3, 0, 0, 0, 1, 2],
[1, 2, 0, 0, 0, 7, 4, 0, 0],
[0, 4, 9, 2, 0, 6, 0, 0, 7]
]
return board | 69992170a55286586e10b544dd2707fdf73869ca | 47,025 |
def IpBinaryToDecimal(bin_ip):
"""
:param bin_ip: IPv4 in binary notation, e.g. 00001010000000000000000000000001
:return: IPv4 in decimal notation, e.g. 167772161
"""
return int(bin_ip, 2) | 20366a1667fd1f9c1f17e7c13c2292bd4a7e74b0 | 47,027 |
import numpy
def sph2Ludwig3(azl, EsTh, EsPh):
"""Input: an array of theta components and an array of phi components.
Output: an array of Ludwig u components and array Ludwig v.
Ref Ludwig1973a."""
EsU = EsTh*numpy.sin(azl)+EsPh*numpy.cos(azl)
EsV = EsTh*numpy.cos(azl)-EsPh*numpy.sin(azl)
return EsU, EsV | 8ec7b6253ea4ab9e5ad2dee8b0374c7feeeeff1f | 47,028 |
import logging
import inspect
import sys
def check_config_missing(config: dict) -> bool:
""" Check that the minimal config items exist """
log = logging.getLogger(inspect.stack()[0][3])
try:
if "GENERAL" not in config:
raise KeyError("missing general section from configuration")
options = config["GENERAL"].keys()
if "interface" not in options:
raise KeyError("missing interface from config")
if "channel" not in options:
raise KeyError("missing channel from config")
if "ssid" not in options:
raise KeyError("missing ssid from config")
except KeyError:
log.error("%s", sys.exc_info()[1])
return False
return True | f923a33f20c7cb55ae2f5d423fd18c6a601f9746 | 47,029 |
def _rescale_0_1(batch):
"""
Rescale all image from batch, per channel, between 0 and 1
"""
for image_id in range(batch.size(0)):
for channel_id in range(batch[image_id].size(0)):
pix_min = batch[image_id][channel_id].min()
pix_range = batch[image_id][channel_id].max() - pix_min
batch[image_id][channel_id].sub_(pix_min).div_(pix_range)
return batch | 65e70cb6b3779f9ec776568a65fee13f56f0ca21 | 47,031 |
import numpy
def linear_percent(cumulative_histogram, percent, minv, binsize):
"""
Image contrast enhancement.
Given a cumulative histogram, upper and lower DN values are
computed and returned.
:param cumulative_histogram:
A 1D numpy array. Must the cumulative sum of a histogram.
:param perecent:
A value in the range of 0-100.
:param minv:
The minumum value to be used in the determining the stretch.
:param binsize:
The binsize used in constructing the histogram of which the
cumulative histogram was then derived.
:return:
Two scalars, maxdn and mindn, corresponding to the maximum
and minimum values of the original array to be used in the
contrast stretch.
:author:
Josh Sixsmith; joshua.sixsmith@ga.gov.au; josh.sixsmith@gmail.com
:history:
* 2013/10/24: Created
"""
ch = cumulative_histogram
if len(ch.shape) != 1:
raise ValueError('Only 1D arrays are supported.')
# Calculate upper and lower values
low = (percent / 100.)
high = (1 - (percent / 100.))
# number of elements
n = ch[-1]
x1 = numpy.searchsorted(ch, n * low)
while ch[x1] == ch[x1 + 1]:
x1 = x1 + 1
x2 = numpy.searchsorted(ch, n * high)
while ch[x2] == ch[x2 - 1]:
x2 = x2 - 1
mindn = x1 * binsize + minv
maxdn = x2 * binsize + minv
return maxdn, mindn | 94ab268a7c8259fa4a24e38d5b7c351f1d52a972 | 47,032 |
import re
def extract_id(i):
"""Extracts the identifier from the URL in redo's ID column"""
r = re.compile(r'<a href="([^"]*)">([^<]*)</a>')
m = r.match(i)
if m is None:
ident = i
url = None
else:
ident = m.group(2)
url = m.group(1)
return {"trial_id": ident, "trial_url": url} | d2eb2d021deae453c3c64a16ab5150d19ea45d6d | 47,033 |
import re
def __safe_file_name(name: str) -> str:
"""
This helper is responsible for removing forbidden OS characters from a certain string.
:param name: String to be converted
:return: Safe string
"""
return re.sub(r'<|>|/|:|\"|\\|\||\?|\*', '', name) | 6b52ededba763fa48c3e8c1d3f64c1487269e457 | 47,034 |
import argparse
def get_options():
"""Returns user-specific options."""
parser = argparse.ArgumentParser(
description='Set options for all devices.')
parser.add_argument(
'--save_type', dest='save_type',
type=str, default='RGB', choices=['RGB', 'D', 'IR', 'RGBD', 'RGBDIR'],
help='set save type, RGB or Depth or IR or RGBD or RGBDIR')
parser.add_argument(
'--is_rsopt', dest='is_rsopt', action='store_true',
help='use custom realsense options?')
parser.add_argument(
'--rectime', dest='record_time', type=int, default=10,
help="set recording time [sec]")
parser.add_argument(
'--bag_path', dest='bag_path', type=str,
help='set path to bag file')
parser.add_argument(
'--pcd_path', dest='pcd_path', type=str,
help='set path to pcd file')
parser.add_argument(
'--indir', dest='indir', type=str,
help='set path to input directory')
parser.add_argument(
'--outdir', dest='outdir', type=str,
help='set path to output directory')
parser.add_argument(
'--cfg_path', dest='cfg_path',
type=str, default='data/cfg/rsd435.pkl',
help='set path to realsense config file')
parser.add_argument(
'--save_mode', dest='save_mode',
type=str, default='snapshot', choices=['snapshot', 'one-scene', 'all'],
help='set save mode for bag2img')
parser.add_argument(
'--save_fps', dest='save_fps',
type=float, default=1.0,
help='set save fps for bag2img')
parser.add_argument(
'--img_ext', dest='imgext',
type=str, default='png', choices=['png', 'jpg', 'bmp', 'tif'],
help='set save image extention for bag2img')
parser.add_argument(
'--video_ext', dest='videoext',
type=str, default='mp4', choices=['mp4', 'wmv', 'mov', 'avi'],
help='set save image extention for bag2video')
return parser.parse_args() | 2f60513713526c37fa9fd825b2419e82986b6df7 | 47,035 |
from pathlib import Path
def get_modality_from_name(sensor_path: Path):
"""Gets the modality of a sensor from its name.
Args:
sensor_path (Path): the Path of the sensor. Ex: CAM_FRONT_RIGHT, LIDAR_TOP, etc.
Returns:
str: the sensor modality
"""
sensor_name_str = str(sensor_path)
if "CAM" in sensor_name_str:
return "camera"
elif "RADAR" in sensor_name_str:
return "radar"
elif "LIDAR" in sensor_name_str:
return "lidar"
else:
return "unknown" | 4d37ea2bf096032eb824f7c951099fc7caea09fd | 47,036 |
def constant(value=0):
"""A flat initial condition. Rather boring, you probably want a source or some interesting boundary conditions
for this to be fun!
Args:
value (float): The value the function takes everywhere. Defaults to 0."""
return lambda x: value | 9286cfe97bdf0a19831fab3fc7d69268e6660674 | 47,037 |
import numpy as np
import math
def tile_slicer(im, shape):
"""Takes image and divides it into multiple tiles with equal specified shape.
Shape should be a tuple with the desired tile size. Returns as a dicctionary with
tile locations as keys of data type tuple. Returns tiled image, preserved shapes for padded tiles,
and locations for each original tile for stitching."""
#import xarray as xr
if type(shape) != tuple:
print('The specified shape is not in tuple form.')
return
im_shape = np.array(np.shape(im)) # the full shape of the image, tuple
des_shape = np.array(shape)
div = im_shape / des_shape
for i in range(len(div)):
div[i] = math.ceil(div[i])
#print(div)
for i in div: # Ends the function if the tile size is greater than the image
if i < 1:
print('The specified tile size is larger than the image.')
return
tile_num = int(np.prod(div))
#print(tile_num, 'Tiles')
shape_iter = list(shape)
tile_dict = {}
saved_locs = {}
for i in range(1,int(div[1])+1): # iterates with different tiles locations until the whole image is captured
for j in range(1,int(div[0])+1):
location = tuple([i,j])
# subset slicing
# vertical coordinates
vh = j * shape_iter[0]
vl = vh - shape_iter[0]
hh = i * shape_iter[1]
hl = hh - shape_iter[1]
# print(f'{vl}:{vh}')
# print(f'{hl}:{hh}')
# ensures indexing is within the bounds
if vh > list(im_shape)[0]:
vh = list(im_shape)[0]
if hh > list(im_shape)[1]:
hh = list(im_shape)[1]
subset = im[vl:vh, hl:hh]
saved_locs[location] = [vl, vh, hl, hh]
tile_dict[location] = subset
return tile_dict, saved_locs, im_shape | a51b01fbd04d1f468829bae40a5b30f5f053254a | 47,038 |
import os
def get_last_modification(file):
"""get time of last modification of passed file
Args:
file (str): absolute path of file
"""
if file is not None:
if os.path.exists(file):
return os.stat(file).st_mtime
else:
return None
else:
return None | 818e1579e43053037494f684f2861c42e4d634b9 | 47,040 |
def _csstr_to_list(csstr: str) -> list[str]:
"""
Convert a comma-separated string to list.
"""
return [s.strip() for s in csstr.split(',')] | 88b2197a6c86839426daf35bbddb212519ef1659 | 47,041 |
def update_cur_center(cur_center, matches, center):
"""Update current center"""
matched = []
for match in matches:
matched.append(match[0])
cur_center[match[1]] = (center[match[0]] + cur_center[match[1]]) / 2
for i in range(len(center)):
if i not in matched:
cur_center.append(center[i])
return cur_center | 391d435b0f62c537e5dd6a022b491577ba1c140c | 47,042 |
import time
def render_vextab(vextab: str, show_source: bool = False) -> str:
"""Create Javascript code for rendering VExflow music notation
:param vextab: The Vexflow source code to render into music notation
:param show_source: ``True`` to include the original Vexflow music notation
source code in the cell output
:return: The Javascript code as a single string
"""
vextab_js = vextab.replace('\n', r'\n')
cid='vextab-{}'.format(int(round(time.time() * 1000)))
output = [
'require(["vextabjs"], function(VEXTABJS) {',
#This works if we reload the notebook page
'element.prepend("<div class=\'vex-tabdiv\'>{}</div>");'.format(vextab_js),
#This doesn't seem to work?
#'element.prepend("<div id=\'{}\', class=\'vex-tabdiv\'></div>");'.format(cid),
#'VexTab = VEXTABJS.VexTab;',
#'Artist = VEXTABJS.Artist;',
#'Renderer = VEXTABJS.Vex.Flow.Renderer;',
# '// Create VexFlow Renderer from canvas element with id #boo and a random component.',
#'renderer = new Renderer($(\'#{}\')[0], Renderer.Backends.CANVAS);'.format(cid),
# '// For SVG, you can use Renderer.Backends.SVG',
#'// Initialize VexTab artist and parser.',
#'artist = new Artist(10, 10, 600, {scale: 0.8});',
#'vextab = new VexTab(artist);',
#'// Parse VexTab music notation passed in as a string.',
#'vextab.parse("{}")'.format(vextab_js),
#'vextab.parse("tabstave notation=true\n notes :q 4/4\n");'.replace('\n', r'\n'),
#'// Render notation onto canvas.',
#'artist.render(renderer);',
'});']
if show_source:
output.insert(3,
'element.prepend("<pre>{}</pre>");'
.format(vextab).replace('\n', '<br />'))
return ''.join(output) | 3b52a6befef4c78d1a3d6abaf270699da3273d47 | 47,043 |
import requests
def get_header_contents() -> str:
"""
Get js_ReaScriptAPI header from GitHub as raw string.
Returns
-------
str
"""
root_raw = 'https://raw.githubusercontent.com/'
header = (
'juliansader/ReaExtensions/master/js_ReaScriptAPI/' +
'Source%20code/js_ReaScriptAPI_def.h'
)
r = requests.get("{}{}".format(root_raw, header))
return r.content.decode() | b2272dfbc131a422ec2aeb6e57d17a0ad6b41ae2 | 47,044 |
def serially():
"""
Force a testcase method to run serially (vs :func:`concurrently`).
Remember methods that are ran serially are run first and by
default are those that
- take more than one target as arguments
- are evaluation methods
"""
def decorate_fn(fn):
setattr(fn, "execution_mode", 'serial')
return fn
return decorate_fn | e62895ee9774ec69af6f46c0fdf593a8b3e026be | 47,045 |
def actions(board):
"""
Returns set of all possible actions (i, j) available on the board.
"""
moves = set()
for row in range(len(board)):
for col in range(len(board[0])):
if board[row][col] == None:
moves.add((row,col))
return moves | 639a4c6fbf701b7d3afc09198911ab3a1a587460 | 47,046 |
def _check_composite(n, s, d, a):
""" check compositeness of n with witness a. (n,s,d) should satisfy d*2^s = n-1 and d is odd """
a %= n
if a == 0: return False
x = pow(a, d, n)
if x == 1 or x == n - 1: return False
for y in range(1, s):
x = x * x % n
if x == 1: return True
if x == n - 1: return False
return True | 8ba5b281e848c919f09621d82e9b57d4fb529ecb | 47,047 |
def calc_water_to_residues_map(water_hbonds, solvent_resn):
"""
Returns
-------
frame_idx: int
Specify frame index with respect to the smaller trajectory fragment
water_to_residues: dict mapping string to list of strings
Map each water molecule to the list of residues it forms
contacts with (ie {"W:TIP3:8719:OH2:29279" : ["A:PHE:159:N:52441", ...]})
solvent_bridges: list
List of hbond interactions between two water molecules
[("W:TIP3:757:OH2:2312", "W:TIP3:8719:OH2:29279"), ...]
"""
frame_idx = 0
water_to_residues = {}
_solvent_bridges = []
for frame_idx, atom1_label, atom2_label, itype in water_hbonds:
if solvent_resn in atom1_label and solvent_resn in atom2_label:
_solvent_bridges.append((atom1_label, atom2_label))
continue
elif solvent_resn in atom1_label and solvent_resn not in atom2_label:
water = atom1_label
protein = atom2_label
elif solvent_resn not in atom1_label and solvent_resn in atom2_label:
water = atom2_label
protein = atom1_label
else:
raise ValueError("Solvent residue name can't be resolved")
if water not in water_to_residues:
water_to_residues[water] = set()
water_to_residues[water].add(protein)
# Remove duplicate solvent bridges (w1--w2 and w2--w1 are the same)
solvent_bridges = set()
for water1, water2 in _solvent_bridges:
key1 = (water1, water2)
key2 = (water2, water1)
if key1 not in solvent_bridges and key2 not in solvent_bridges:
solvent_bridges.add(key1)
solvent_bridges = sorted(list(solvent_bridges))
return frame_idx, water_to_residues, solvent_bridges | 8a73998e7295e4ee5bf812804edfa4b521e2aa31 | 47,048 |
import shutil
def executable_path(*tries):
"""Find path to executable, or throw."""
path = None
for ex in tries:
path = shutil.which(ex)
if path:
break
if not path:
raise Exception(f"Unable to find path to {tries[0]}")
return path | 825bb53c9b44678e43cdd2b9d68d905932027737 | 47,049 |
def mod11(list, max_weight=7):
"""Implements Modulus 11 check digit algorithm.
It's a variation of the HP's Modulus 11 algorithm.
Requires the sequence to be calculated in a list of
integers.
The HP's Modulus 11 algorithm can be accessed through
the following link:
http://docs.hp.com/en/32209-90024/apds02.html
Requires a list of integers with the values to be
calculated and the maximum value of weight variable.
"""
sum = 0
weight = 2
# Iterates through the list from right to left,
# +multiplying each value for it's weight. If
# +the weight reaches max_weight, then it is
# +restarted to 2.
for item in reversed(list):
sum += item * weight
weight += 1
if weight > max_weight:
weight = 2
mod = 11 - sum % 11
# HP's Modulus 11 algorithm says that a 10
# +result is invalid and a 11 result is equal
# +to 0. So, both values are returned as 0.
if mod > 9:
return 0
else:
return mod | 475e7e9b1403d62ad5a6ca040552bec633321512 | 47,051 |
def self_in_function(self=1) -> None:
"""A function containing `self` argument should not be ignored
>>> self_in_function()
None
"""
return None | 8ba962d11eefddf3ce916a3611c95cfe5e6e4f97 | 47,052 |
import csv
def get_tracked_sheets(cogs_dir, include_no_id=True):
"""Get the current tracked sheets in this project from sheet.tsv as a dict of sheet title ->
path & ID. They may or may not have corresponding cached/local sheets."""
sheets = {}
with open(f"{cogs_dir}/sheet.tsv", "r") as f:
reader = csv.DictReader(f, delimiter="\t")
for row in reader:
title = row["Title"]
if not title:
continue
sheet_id = row["ID"]
if not include_no_id and sheet_id == "":
continue
del row["Title"]
# Update ignore to a bool
ignore = False
if row.get("Ignore", "False") == "True":
ignore = True
row["Ignore"] = ignore
sheets[title] = row
return sheets | f0ebf10623f29b719801ee3069df0ce6160112c5 | 47,053 |
def update_over_braking_factor(driver, obf):
"""
Updates the over braking factor of the driver
:param driver: driver
:param obf: new over braking factor
:type driver: DriverProfile
:return: updated driver profile
"""
return driver.update_over_breaking_factor(obf) | a973fa3c6483d2ebf99770aac83c3057b0d14537 | 47,055 |
def artifact_fetch_error(reason):
"""artifact_fetch_error message"""
return"Failed to retrieve artifact: {}".format(reason) | 357183a1b11b027c9c9bd71776cec3e3760e519d | 47,056 |
import ast
def getconstant(tree):
"""Given an AST node `tree` representing a constant, return the contained raw value.
This encapsulates the AST differences between Python 3.8+ and older versions.
There are no `setconstant` or `makeconstant` counterparts, because you can
just create an `ast.Constant` in Python 3.6 and later. The parser doesn't
emit them until Python 3.8, but Python 3.6+ compile `ast.Constant` just fine.
"""
if type(tree) is ast.Constant: # Python 3.8+
return tree.value
# up to Python 3.7
elif type(tree) is ast.NameConstant: # up to Python 3.7 # pragma: no cover
return tree.value
elif type(tree) is ast.Num: # pragma: no cover
return tree.n
elif type(tree) in (ast.Str, ast.Bytes): # pragma: no cover
return tree.s
elif type(tree) is ast.Ellipsis: # `ast.Ellipsis` is the AST node type, `builtins.Ellipsis` is `...`. # pragma: no cover
return ...
raise TypeError(f"Not an AST node representing a constant: {type(tree)} with value {repr(tree)}") | 82bfd20aec39b7d59d9f36642a53c706695cdb03 | 47,059 |
def collate_codes_offsts(rec_df, age_start, age_stop, age_in_months=False):
"""Return a single patient's EmbeddingBag lookup codes and offsets for the given age span and age units"""
codes = []
offsts = [0]
age_span = age_stop - age_start
if rec_df.empty:
codes = ['xxnone'] * age_span
offsts = list(range(age_span))
else:
for i in range(age_start, age_stop, 1):
if age_in_months: res = (rec_df.code[rec_df.age_months == i]).values
else : res = (rec_df.code[rec_df.age == i]).values
if len(res) > 0:
codes.extend(res)
if i < age_stop - 1: offsts.append(offsts[-1] + len(res))
else:
codes.append('xxnone')
if i < age_stop - 1: offsts.append(offsts[-1] + 1)
assert len(offsts) == age_span
return codes, offsts | e808f91d372cf9d9e0ad85bdf03e5e369f699f64 | 47,062 |
def sdp_term_p(f):
"""Returns True if `f` has a single term or is zero. """
return len(f) <= 1 | 1fbefa0f751d0583f4c20b81289df2b4ca4dfca6 | 47,063 |
def coding_problem_46(str):
"""
Given a string, find the longest palindromic contiguous substring. If there are more than one with the maximum
length, return any one. Examples:
>>> coding_problem_46("aabcdcb")
'bcdcb'
>>> coding_problem_46("bananas")
'anana'
"""
for length in range(len(str), 0, -1):
for offset in range(len(str) - length + 1):
sub_str = str[offset:offset + length]
if sub_str == sub_str[::-1]:
return sub_str | af4033333274b23961edc048fdba034b29b801a7 | 47,066 |
def tostring(s):
"""
Convert to a string with quotes.
"""
return "'" + str(s) + "'" | 90c1cd55ecbf0e5562810399ab737f7b53b13305 | 47,067 |
from typing import List
def sort_array(arr: List[int]) -> List[int]:
"""
Sort given array without using sort()
>>> sort_array([4,3,2,1])
[1, 2, 3, 4]
Here, I'm going to use an algorithm called Merge Sort.
"""
if len(arr) > 1:
mid = len(arr) // 2
left = arr[:mid]
right = arr[mid:]
sort_array(left)
sort_array(right)
i = 0
j = 0
k = 0
len_left = len(left)
len_right = len(right)
while i < len_left and j < len_right:
if left[i] < right[j]:
arr[k] = left[i]
i += 1
else:
arr[k] = right[j]
j += 1
k += 1
while i < len_left:
arr[k] = left[i]
i += 1
k += 1
while j < len_right:
arr[k] = right[j]
j += 1
k += 1
return arr | d6003eacfc5de35a28d8d4e087f3bd284e5fb18c | 47,068 |
def wifi_code(ssid, hidden, authentication_type, password=None):
"""Generate a wifi code for the given parameters
:ssid str: SSID
:hidden bool: Specify if the network is hidden
:authentication_type str: Specify the authentication type. Supported types: WPA, WEP, nopass
:password Optional[str]: Password. Not required if authentication type is nopass
:return: The wifi code for the given parameters
:rtype: str
"""
hidden = 'true' if hidden else 'false'
if authentication_type in ('WPA', 'WEP'):
if password is None:
raise TypeError('For WPA and WEP, password should not be None.')
return 'WIFI:T:{type};S:{ssid};P:{password};H:{hidden};;'.format(
type=authentication_type, ssid=ssid, password=password, hidden=hidden
)
elif authentication_type == 'nopass':
if password is not None:
raise TypeError('For nopass, password should be None.')
return 'WIFI:T:nopass;S:{ssid};H:{hidden};;'.format(
ssid=ssid, hidden=hidden
)
raise ValueError('Unknown authentication_type: {!r}'.format(authentication_type)) | a479a2ea8c9a3aed40556e0c94f6dd5c8d67eca7 | 47,070 |
def calculate_polygon_area(corners):
"""Calculate 2D polygon area with given corners. Corners are assumed to be ordered.
Args:
corners (Nx2 array): xy-coordinates of N corner points
Returns:
float: polygon area"""
# Use the shoelace algorithm to calculate polygon's area
psum = 0
nsum = 0
npoints = corners.shape[0]
for i in range(npoints):
j = (i + 1) % npoints
psum += corners[i, 0] * corners[j, 1]
nsum += corners[j, 0] * corners[i, 1]
return abs(1/2*(psum - nsum)) | 87ed4e4c0c8e6655e70b6addf0588c37454f4968 | 47,071 |
def strip_newlines(string_in):
"""Tiny helper to strip newlines from json strings in CRD."""
return string_in.replace("\\n", "") | 5df483e5e14de042ca0a88f7baea9b44770be13c | 47,072 |
def get_global_stats(df):
"""Compute the metadata statistics of a given DataFrame and put them in a 2D list."""
stats = []
stats.append(["Number of rows", df.shape[0] ])
stats.append(["Number of columns", df.shape[1] ])
stats.append(["Number of duplicates", len(df) - len(df.drop_duplicates())])
stats.append(["Number of rows with NaN values", df.isnull().values.ravel().sum() ])
stats.append(["Header", [col for col in df.columns] ])
stats.append(["Data Types", [dtyp.name for dtyp in df.dtypes.values] ])
return stats | bf08c620e5937d73f1f0ce3a9ce251253919a516 | 47,075 |
def factorial_zeroes(n):
"""Consider a factorial like 19!:
19! = 1*2* 3*4* 5*6*7*8*9* 10* 11* 12* 13*14*15* 16* 17*18*19
A trailing zero is created with multiples of 10
and multiples of 10 are created with pairs of 5-multiples and 2-multiples.
Therefore, to count the number of zeros, we only need to count the pairs of multiples of 5 and 2.
There will always be more multiples of 2 than 5, though,
so simply counting the number of multiples of 5 is sufficient.
One "gotcha" here is 15 contributes a multiple of 5 (and therefore one trailing zero),
while 25 contributes two (because 25 = 5 * 5).
"""
if n < 0:
raise ValueError("factorial is undefined for negative numbers")
count = 0
i = 5
while int(n / i) > 0:
count += int(n / i)
i *= 5
return count | f60b8be459cba3af795d360a51754fbc43959a63 | 47,076 |
import os
def use_gold_linker():
"""@return True if the gold linker should be used; False otherwise."""
return os.path.isfile("/usr/bin/ld.gold") | ad288f41b410df355c76dff88ccacbd4936ed6d9 | 47,080 |
def count_up_directory(path):
"""Returns a count of ../, for determining what the best path is to import"""
return path.count('../') | 1ecabc0582201b1f6a974975989aae56cb8387c9 | 47,081 |
def compare(M, A):
"""
:param M: Matrix with Names and DNA sequences
:param A: Array with DNA values
:return: String representing a person's name
"""
for line in M[1:]:
match = True
for j in range(1, len(line)):
if A[j-1] == line[j]:
continue
else:
match = False
if match:
return line[0]
else:
continue
return 'No match' | 286d14a53bf2e1cb6dbaa7592418f5626521b6ac | 47,084 |
def rootsearch(f, a, b, dx):
""" x1,x2 = rootsearch(f,a,b,dx).
Searches the interval (a,b) in increments dx for
the bounds (x1,x2) of the smallest root of f(x).
Returns x1 = x2 = None if no roots were detected.
"""
x1 = a
f1 = f(a)
x2 = a + dx
f2 = f(x2)
#
while f1*f2 > 0.0:
if x1 >= b:
return None, None
x1 = x2
f1 = f2
#x2 = x1 + dx
x2 += dx
f2 = f(x2)
return x1, x2 | f6bb3ed2183850b2add575953ad6acee298f1a1f | 47,086 |
def eq_55_activation_of_heat_detector_device(
u: float,
RTI: float,
Delta_T_g: float,
Delta_T_e: float,
C: float
) -> float:
"""Equation 55 in Section 8.9 PD 7974-1:2019 calculates the heat detectors temperature rise.
PARAMETERS:
:param u: m/s, velocity of gases in proximity to heat sensing element.
:param RTI: (m s)^0.5, response time index of heat sensing element.
:param Delta_T_g: deg.C, change in gas temperature. (this is the gas temperature above ambient, i.e.
Delta_T_g = T_g - T_0)
:param C: (m/s)^0.5, conduction factor (delay factor?)
:param Delta_T_e: deg.C, change in temperature of heat sensing element.
:return dTe_dt:
INDICATIVE NOTES
Tsui and Spearpoint [37] quote C factors in the range of 0.33 – 0.65 (m/s)^0.5 depending upon the response type.
RTI values are given in the literature, e.g. [38]. The rated temperature, permitting calculation of ΔTe, can be
found in the relevant manufacturer’s specifications.
REFERENCES
[37] TSUI A. and SPEARPOINT M. J. Variability of sprinkler response time index and conduction factor using the
plunge test. Building Services Engineering Research and Technology, 31 (2) pp. 163–176, 2010.
DOI:10.1177/0143624410363064.
[36] HESKESTAD G. and BILL R. Quantification of thermal responsiveness of automatic sprinklers including
conduction effects. Fire Safety Journal, 14 (1-2) pp. 113–125, 1988.
"""
aa = u ** 0.5 / RTI
bb = Delta_T_g - Delta_T_e * (1 + C / u ** 0.5)
dTe_dt = aa * bb
return dTe_dt | 480e33a54b769dce6d2167291c6058a8750418d0 | 47,087 |
import typing
def _convertbits(
data: typing.Iterable[int], frombits: int, tobits: int, pad: bool = True
) -> typing.List[int]:
"""General power-of-2 base conversion."""
# accumulator, will be filled later
acc: int = 0
# number of bits pushed to acc that have not yet been accounted for in ret
bits: int = 0
# return value
ret: typing.List[int] = []
# maximum value in the destination representation, e.g. tobits=5 => maxv=11111
maxv = (1 << tobits) - 1
# maximum value that the accumulator can have during the process.
# e.g. from=5, to=8 => 12 (worst case: 7 in the acc, 5 are pushed => 12)
max_acc = (1 << (frombits + tobits - 1)) - 1
for value in data:
# check whether the value is in bounds
if value < 0 or (value >> frombits):
raise ValueError("value out of range")
# shift acc by the number of bits that will now be added,
# then set the lowest bits using logical OR (|)
# mask with max_acc, so we never have to reset acc
acc = ((acc << frombits) | value) & max_acc
bits += frombits
while bits >= tobits:
# take bunches of tobits bits and append them to ret
bits -= tobits
ret.append((acc >> bits) & maxv)
if pad:
if bits:
# there are still "uncommited" bits in acc, shift them to the upper end (tobits - bits)
# = fill with zeros from the right, and commit them
ret.append((acc << (tobits - bits)) & maxv)
elif bits >= frombits:
# there are still bits in acc, apparently more than one bunch of frombits, but less than one
# bunch of tobits
raise ValueError("illegal zero padding")
elif (acc << (tobits - bits)) & maxv:
# there are still bits in acc (less than one bunch of tobits), but they're not all zero
raise ValueError("non-zero padding")
return ret | b8b8ffcfd14e053f69f1d5b06adf1193e6c0d7a6 | 47,088 |
def accuracy(y_test, y_pred):
"""Calcula métrica de acurácia para classificação."""
n = len(y_test)
corrects = sum([bool(y1 == y2) for y1, y2 in zip(y_test, y_pred)])
return corrects/n | cd5fb11842a7239071545e789bc1879dab9abf68 | 47,090 |
def unique(hasDupes):
"""
Removes duplicate elements from a list
@param hasDupes : a list with duplicate items
@return: list with duplicates removed
"""
ul = list(set(hasDupes))
ul.sort()
return ul | 88d3dc491a3519a46b95138f514d57319ccc3f3b | 47,091 |
def get_kernel_name(optype):
"""
for many similar kernels, we use one kernel predictor since their latency difference is negligible,
return the kernel name via the optype
"""
if "conv" in optype and "dwconv" not in optype:
optype = "conv-bn-relu"
if "dwconv" in optype:
optype = "dwconv-bn-relu"
if optype == "fc-relu":
optype = "fc"
if optype == "max-pool":
optype = "maxpool"
if optype == "avg-pool":
optype = "avgpool"
if optype in ["global-pool", "gap"]:
optype = "global-avgpool"
if optype == "channel_shuffle":
optype = "channelshuffle"
if optype in ["bn-relu"]:
optype = "bnrelu"
if optype in ["add-relu"]:
optype = "addrelu"
if optype in ["SE", "SE-relu", "se", "se-relu"]:
optype = "se"
return optype | b85e2f1e88df125c51b002718d41df0c5421c88c | 47,095 |
def get_matrix_stride(mat):
"""Get the stride between lines of a C matrix"""
itemsize = mat.itemsize
stride = mat.strides[0] // itemsize
assert mat.strides == (stride * itemsize, itemsize)
return stride | eec8e2f79b6ff9df449079298829413cdc3d248f | 47,096 |
def _map_nested_lists(f, x, *arg, **kw):
"""Recursively map lists, with non-lists at the bottom.
Useful for applying `dd.bdd.copy_bdd` to several lists.
"""
if isinstance(x, list):
return [_map_nested_lists(f, y, *arg, **kw) for y in x]
else:
return f(x, *arg, **kw) | a67e904bc6566c6a9ebf3c13d80a7bea9f9b3af4 | 47,097 |
async def find_channel(guild):
"""
Finds a suitable guild channel for posting the
welcome message. You shouldn't need to call this
yourself. on_guild_join calls this automatically.
Thanks FrostLuma for code!
"""
for c in guild.text_channels:
if not c.permissions_for(guild.me).send_messages:
continue
return c | b3347be30652c0712104e8d35181ebdbfd101258 | 47,098 |
def get_dtype(matrix_type: int, precision: str='default'):
"""Reset the type if 'default' not selected"""
if precision == 'single':
if matrix_type in [1, 2]:
dtype = 'float32'
else:
dtype = 'complex64'
elif precision == 'double':
if matrix_type in [1, 2]:
dtype = 'float64'
else:
dtype = 'complex128'
else: # default
if matrix_type == 1:
dtype = 'float32'
elif matrix_type == 2:
dtype = 'float64'
elif matrix_type == 3:
dtype = 'complex64'
else:
dtype = 'complex128'
return dtype | 8908fbe7d5846a088c93ce3f44ddf925ca9eb0fe | 47,101 |
from pathlib import Path
def pathwalk(dir: Path) -> list[Path]:
"""Obtain all file paths in a directory and all it's subdirectories.
Function is recursive.
Args:
`dir` (`Path`): The starting, top-level directory to walk.
Returns:
(list): Containing Path objects of all filepaths in `dir`.
"""
# Obtain all folders within dir.
subdir_folders = [item for item in dir.iterdir() if item.is_dir()]
# Obtain all files within dir.
subfiles = [item for item in dir.iterdir() if item.is_file()]
# Use recursion to get paths to all files within dir.
if subdir_folders:
# Obtain file paths from the subdirectories within dir and
# add them to the subfiles list.
for folder in subdir_folders:
subfiles.extend(pathwalk(folder))
# When dir contains no folder, return the subfiles list.
return subfiles | e0cefd65166fb28b9c7c39acaf03978e9608c809 | 47,102 |
import os
def get_dataset_name():
"""Reads the name of the dataset from the environment variable `AICROWD_DATASET_NAME`."""
return os.getenv("AICROWD_DATASET_NAME", "cars3d") | b878df3ac7c61f43c3f107f8420e40d9b3b729fa | 47,103 |
def kataSlugToKataClass(kataSlug):
"""Tranform a kata slug to a camel case kata class"""
pascalCase = ''.join(x for x in kataSlug.title() if not x == '-')
# Java don't accept digits as the first char of a class name
return f"Kata{pascalCase}" if pascalCase[0].isdigit() else pascalCase | 92698e9002f42dd6f9abea1f6970b530575e54ef | 47,104 |
def least_significant_set_bit(n):
"""
Returns least-significant bit in integer 'n' that is set.
"""
m = n & (n - 1)
return m ^ n | 7813a92e53be724c14cbd68dc0cffd49490dc05e | 47,105 |
import os
import pickle
def get_T_V_T_dataset(file_path):
"""
list[
[label (int), [(x0, y0), (x1, y1), ..., (xn-2, yn-2), (xn-1, yn-1)]]
]
the range of x or y is 0 ~ 1
"""
tr_file_date_str = '2020_04_12'
va_file_date_str = '2020_03_07'
te_file_date_str = '2020_03_07'
tr_file_path = os.path.join(file_path, 'ml_normed_train_data_list_pickle_'+tr_file_date_str+'.file')
va_file_path = os.path.join(file_path, 'ml_normed_vali_data_list_pickle_'+va_file_date_str+'.file')
te_file_path = os.path.join(file_path, 'ml_normed_test_data_list_pickle_'+te_file_date_str+'.file')
with open(tr_file_path, 'rb') as file:
training_dataset = pickle.load(file)
with open(va_file_path, 'rb') as file:
validation_dataset = pickle.load(file)
with open(te_file_path, 'rb') as file:
test_dataset = pickle.load(file)
return training_dataset, validation_dataset, test_dataset | 58fd4839e0ba914e611dd4c79faccb1aa8a43fcf | 47,108 |
import re
def apply_postprocessing(x_pred, postprocessing):
"""
Transforms x_pred depending on postprocessing parameters.
Parameters
----------
x_pred: pandas.Dataframe
Dataframe that needs to be modified
postprocessing: dict
Modifications to apply in x_pred dataframe.
Returns
-------
pandas.Dataframe
Modified DataFrame.
"""
new_preds = x_pred.copy()
for feature_name in postprocessing.keys():
dict_postprocessing = postprocessing[feature_name]
data_modif = new_preds[feature_name]
new_datai = list()
if dict_postprocessing['type'] == 'prefix':
for value in data_modif.values:
new_datai.append(dict_postprocessing['rule'] + str(value))
new_preds[feature_name] = new_datai
elif dict_postprocessing['type'] == 'suffix':
for value in data_modif.values:
new_datai.append(str(value) + dict_postprocessing['rule'])
new_preds[feature_name] = new_datai
elif dict_postprocessing['type'] == 'transcoding':
unique_values = x_pred[feature_name].unique().tolist()
unique_values = [value for value in unique_values if value not in dict_postprocessing['rule'].keys()]
for value in unique_values:
dict_postprocessing['rule'][value] = value
new_preds[feature_name] = new_preds[feature_name].map(dict_postprocessing['rule'])
elif dict_postprocessing['type'] == 'regex':
new_preds[feature_name] = new_preds[feature_name].apply(
lambda x: re.sub(dict_postprocessing["rule"]['in'], dict_postprocessing["rule"]['out'], x))
elif dict_postprocessing['type'] == 'case':
if dict_postprocessing['rule'] == 'lower':
new_preds[feature_name] = new_preds[feature_name].apply(lambda x: x.lower())
elif dict_postprocessing['rule'] == 'upper':
new_preds[feature_name] = new_preds[feature_name].apply(lambda x: x.upper())
return new_preds | 7704580745b06882e943ebe14065893238f9b548 | 47,110 |
def _is_id(value):
"""
Check if the value is valid InfluxDB ID.
:param value: to check
:return: True if provided parameter is valid InfluxDB ID.
"""
if value and len(value) == 16:
try:
int(value, 16)
return True
except ValueError:
return False
return False | 0606cb9da07f8a6a48f60ef7358746c1426f7d90 | 47,111 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.