content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def check_smile(smi, node_features):
"""Check the smile to see if the predictions will be less reliable. E.G. are a salt or have Other typed atoms
Args:
smile (str): A Smile string
features [(node features, adjacency matrix)]: A list of the graph represenation for the corresponding SMILES string.
Returns:
A string with any warnings
"""
warn=""
#start with a check for salts
if '.' in smi.split()[0]:
warn+='Salt '
#use the calculated molecular features to determine if an "Other" typed atom exists in the molecule.
if node_features[:,11].sum():
warn+='Other-typed Atom(s) '
if warn!='':
warn+='Detected Prediction less reliable'
return warn | 3c8373ef7f025c1f74001ee118264842223637bb | 45,851 |
import subprocess
def capture(command):
"""Utility to execute and capture the result of a commmand."""
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
return out, err, proc.returncode | e81173c7d1e47fecd3421c811c9e8603190e79f3 | 45,852 |
def lr_poly(base_lr, epoch, max_epoch, power):
""" Poly_LR scheduler
"""
return base_lr * ((1 - float(epoch) / max_epoch) ** power) | 359c3988fa8b1e01ceea212c681a9d6cf3bfc9b8 | 45,855 |
def obtainLigandIndexes(trajectory, ligand):
"""
Extract the indexes for the ligand in the trajectory
:param trajectory: mdtraj trajectory
:param ligand: name of the ligand
:return: list of the atom indexes of the heavy atoms of the ligand
"""
residueIndexes = []
for residue in trajectory.topology.residues:
if residue.name == ligand:
for atom in residue.atoms:
if "H" not in atom.name:
residueIndexes.append(atom.index)
return residueIndexes | aa3f7f14afdff21e0e7b56ae69294e48e82d9a36 | 45,858 |
import uuid
def j2_uuid4():
""""Jinja2 custom filter that generates an UUID4 string (random)
"""
return uuid.uuid4() | 2b2a820c790827e12530c4183543d98617bd105e | 45,860 |
def quote_normalization(question):
""" Normalize all usage of quotation marks into a separate \" """
new_question, quotation_marks = [], ["'", '"', '`', '‘', '’', '“', '”', '``', "''", "‘‘", "’’"]
for idx, tok in enumerate(question):
if len(tok) > 2 and tok[0] in quotation_marks and tok[-1] in quotation_marks:
new_question += ["\"", tok[1:-1], "\""]
elif len(tok) > 2 and tok[0] in quotation_marks:
new_question += ["\"", tok[1:]]
elif len(tok) > 2 and tok[-1] in quotation_marks:
new_question += [tok[:-1], "\"" ]
elif tok in quotation_marks:
new_question.append("\"")
elif len(tok) == 2 and tok[0] in quotation_marks:
# special case: the length of entity value is 1
if idx + 1 < len(question) and question[idx + 1] in quotation_marks:
new_question += ["\"", tok[1]]
else:
new_question.append(tok)
else:
new_question.append(tok)
return new_question | c2b91aa4f7b746329d16c57844a3d403d11a1b0d | 45,861 |
def atof(text):
"""
Function converts char representation of number if the parameter text is number to float representation.
Else it returns the input unchaged.
:param text: text to be converted
:return: float number or unchanged input
"""
try:
retval = float(text)
except ValueError:
retval = text
return retval | 0e36562aafeab98e547c05d966242d3be171e166 | 45,862 |
def swir_exp_time(int_delay: int, int_hold: int) -> float:
"""
Parameters
----------
int_delay : int
parameters int_delay from the instrument_settings
int_hold : int
parameters int_holt from the instrument_settings
Returns
-------
float
exact (SWIR) pixel exposure time
"""
return 1.25e-6 * (65540 - int_delay + int_hold) | 9022f18bec5aecb35e8992f7cd0cbfe9a3cb84a2 | 45,864 |
import requests
from bs4 import BeautifulSoup
def download_country_data():
"""Download worldometer country population and return as BeautifulSoup object"""
url = 'https://www.worldometers.info/world-population/population-by-country/'
populations = requests.get(url)
populations.raise_for_status()
return BeautifulSoup(populations.text, 'html.parser') | b58d383b99b6924539c2029df59dc649cdc6d33a | 45,865 |
def get_dot_product(first_vector, second_vector):
""" (dict, dict) -> dict
The function takes two dictionaries representing vectors as input.
It returns the dot product between the two vectors.
>>> v1 = {'a' : 3, 'b': 2}
>>> v2 = {'a': 2, 'c': 1, 'b': 2}
>>> get_dot_product(v1, v2)
10
>>> v3 = {'a' : 5, 'b': 3, 'c' : 3}
>>> v4 = {'d': 1}
>>> get_dot_product(v3, v4)
0
>>> v5 = {}
>>> v6 = {}
>>> get_dot_product(v5, v6)
0
>>> v7 = {'a' : 2, 'b' : -2, 'c' : -4}
>>> v8 = {'a' : 1, 'b' : 3, 'c' : 2}
>>> get_dot_product(v7, v8)
-12
"""
# initialize dot product variable
dot_product = 0
# compute product for values whose key is in both vectors
for key in first_vector:
if key in second_vector:
# add product to the sum
product = first_vector[key] * second_vector[key]
dot_product += product
# return sum of products (dot product)
return dot_product | 52898081b331484c4dd8d964ad29334e8db96a6f | 45,866 |
def hanoiRecursivo(n):
"""
hanoiRecursivo:
função para resolver o problema das Torres de Hanoi,
com 3 torres e n discos, de forma recursiva.
Pré-requisitos: nenhum
Inputs: um inteiro >= 3
Outputs: a seqüência numerada de passos indicando a movimentação
do disco superior (ou único disco) de cada torre que deve
ser feita para resolver o problema.
Retorno: nenhum
Limitações: não incluí checagens e validações
Sugestões: Abrantes Araújo Silva Filho
abrantesasf@gmail.com
Referências: 1) a base desse algoritmo foi ensinada no curso MITx 6.00.1x
(Introduction to Computer Science and Programming Using
Python, disponível no edX)
2) a maneira de utilizar um contador em uma função recursiva
em Python foi sugerida pelo usuário "uselpa" na seguinte
discussão no StackOverflow:
https://stackoverflow.com/questions/15052704/how-to-create-a-counter-inside-a-recursive-function
Orientação: ---
GitHub: https://github.com/abrantesasf/algoritmos/tree/master/faesa_algoritmos_1
"""
passos = 2**n -1
print('Para ' + str(n) + ' discos, são mecessários ' + str(passos) + ' passos:')
def recursivo(n, origem, destino, auxiliar, contador):
if n == 1:
contador += 1
print(str(contador) + ' -- da ' + origem + ' para a ' + destino)
else:
contador = recursivo(n-1, origem, auxiliar, destino, contador)
contador = recursivo(1, origem, destino, auxiliar, contador)
contador = recursivo(n-1, auxiliar, destino, origem, contador)
return contador
recursivo(n, 'TORRE-1', 'TORRE-3', 'TORRE-2', contador = 0)
print('Fim! Os ' + str(passos) + ' passos foram executados. Parabéns!') | 7ebce2fdd5017331f1119e0f072d921de96796b4 | 45,867 |
from typing import Iterable
from typing import Any
def format_like_tuple(
values: Iterable[Any],
) -> str:
"""
Formats iterable into tuple-like format
that is readable for human being.
:param values: values to be formatted
"""
return ", ".join((repr(value) for value in values)) | f05e0edafd21a62fd2e7c010a9f2b9720256b3db | 45,868 |
def parse_pct(value):
"""
Parse percentage
"""
return float(value)/100 | 30694babef99a5a40a1f0037cfccced98bc153d1 | 45,869 |
def files_data_generation(candidates, namesmeasures):
"""Select all possible files that are related with the given measures."""
selected = []
for name in namesmeasures:
sele_name = []
for i in range(len(candidates)):
if name in candidates[i]:
sele_name.append(candidates[i])
selected.append(sele_name)
return selected | 85c42a84c7f2405575bef546210c7fc88022bcce | 45,870 |
def insert_init(shots, predist=100, dxmax=200, xcol='pos_x', initpoints=1):
"""
Insert initialization frames into scan list, to mitigate hysteresis and beam tilt streaking when scanning along x.
Works by inserting a single frame each time the x coordinate decreases (beam moves left) or increases by more
than dxmax (beam moves too quickly). The initialization frame is taken to the left of the position after the jump by
predist pixels. Its crystal_id and frame columns are set to -1.
:param shots: initial scan list. Note: if you want to have multiple frames, you should always first run set_frames
:param predist: distance of the initialization shot from the actual image along x
:param dxmax: maximum allowed jump size (in pixels) to the right.
:param xcol: name of x position column
:param initpoints: number of initialization points added
:return: scan list with inserted additional points
"""
def add_init(sh1):
initline = sh1.iloc[:initpoints, :].copy()
initline['crystal_id'] = -1
initline['frame'] = -1
if predist is not None:
initline[xcol] = initline[xcol] - predist
else:
initline[xcol] = 0
return initline.append(sh1)
dx = shots[xcol].diff()
grps = shots.groupby(by=((dx < 0) | (dx > dxmax)).astype(int).cumsum())
return grps.apply(add_init).reset_index(drop=True) | 029f1431890940b8f4345cfb400a64ca24d3611e | 45,871 |
def clean_code(qasm):
"""
Clean given QASM by rewriting each line to a more readable format.
For example, "{ X q[0] | H q[3,4,5] | X q[1] | X q[2] }"
Would become "{ X q[0:2] | H q[3:5]"
Args:
qasm: Valid QASM code to clean
Returns: The same QASM code with improved formatting.
"""
qasm_lines = qasm.split("\n")
for idx in range(len(qasm_lines)):
line = qasm_lines[idx]
gate_dict = {}
new_line = ""
if "Toffoli" not in line and "CR" not in line and "CNOT" not in line and ("{" in line or "," in line):
line = line.strip("{}")
elements = line.split("|")
for e in elements:
gate, target = e.split()
indices = list(map(int, target.strip("q[]").split(",")))
if gate not in gate_dict:
gate_dict[gate] = indices
else:
gate_dict[gate] += indices
parallel = len(gate_dict.keys()) > 1
if parallel:
new_line += "{ "
for gate, indices in gate_dict.items():
if max(indices) - min(indices) + 1 == len(indices) > 1:
new_line += "{} q[{}:{}]".format(gate, min(indices), max(indices))
else:
new_line += "{} q[{}]".format(gate, ",".join(map(str, indices)))
new_line += " | "
new_line = new_line[:-3]
if parallel:
new_line += " }"
else:
new_line = line
qasm_lines[idx] = new_line
return "\n".join(qasm_lines) | 9fbf561c4983a535b22a36818dc88c8c616213ef | 45,872 |
import random
def mutateChroms(c, prob):
"""Take a chromosome and randomly mutate it.
INPUTS:
c: list of chromsomes, which are tuples of 1's and 0's. Ex:
(1, 0, 0, 1, 0)
prob: decimal in set [0.0, 1.0] to determine chance of
mutating (bit-flipping) an individual gene
"""
out = []
for chrom in c:
newC = list(chrom)
# count = 0
for ind in range(len(c)):
if random.random() < prob:
# Flip the bit!
newC[ind] = 1 - newC[ind]
# count += 1
# Convert to tuple, put in output list
out.append(tuple(newC))
return out | 1bce921d919eada596a18b16f459e5d91ca0a516 | 45,873 |
def is_str(var):
"""Returns True, if int"""
return isinstance(var, str) | dfa5181a5f4819f789a72906c012ad0e8caab771 | 45,876 |
import re
def DarkenHexColorCode(color, shade=1):
"""Given a color in hex format (for HTML), darken it X shades."""
rgb_values = [int(x, 16) for x in re.findall('\w\w', color)]
new_color = []
for value in rgb_values:
value -= shade*32
if value <= 0:
new_color.append('00')
elif value <= 16:
# Google Chart API requires that color values be 0-padded.
new_color.append('0' + hex(value)[2:])
else:
new_color.append(hex(value)[2:])
return ''.join(new_color) | a6da63887b2203fae637f3b640aac9d964748bf7 | 45,877 |
import queue
def breadth_first_search(root, target):
"""Check if target exists within the tree"""
if not root:
return False
# BFS algorithm uses a queue to track the nodes to visit next
to_visit = queue.Queue()
to_visit.put(root)
# Visited set is used to skip processed nodes already - useful in graph
visited = set([root])
while not to_visit.empty():
curr_node = to_visit.get()
if curr_node._value == target:
return True
# for a graph, use appropriate method to get the
# array of neighbors for the curr_node
for child in [curr_node.left, curr_node.right]:
if child is curr_node:
raise Exception("Loop detected")
if child and child not in visited:
to_visit.put(child)
visited.add(curr_node)
return False | a5029ce61a6626016b362f2d01f0a4637824f015 | 45,878 |
import sys
def check_rinex_field(name, value, size):
"""
Checks if the field is of proper length and if not, it issues a
warning message and returns a sanitized (cropeed) version of the
field
"""
if value is None:
return "UNKN"
if (len(value) > size):
sys.stderr.write("The '{0}' field [ {1} ] is too long [ {2} ]. Cropping to {3} characters\n".format(name, value, len(value), size))
return value[0:size]
return value | 2a02523ac5ef407245efce46d868438238e46266 | 45,879 |
def UsersInvolvedInAmendments(amendments):
"""Return a set of all user IDs mentioned in the given Amendments."""
user_id_set = set()
for amendment in amendments:
user_id_set.update(amendment.added_user_ids)
user_id_set.update(amendment.removed_user_ids)
return user_id_set | e9543bcbd678758fc9f03bf35b3e0429d1202544 | 45,880 |
def apifirst(viewfunc):
"""
Used to mark a method on a ViewSet to prioritize api formats.
So if format is not one of the accepted formats use the parent method to process request
"""
def decorator(self, request, *args, **kwargs):
if request.accepted_renderer.format in self.accepted_formats:
return viewfunc(self, request, *args, **kwargs)
parent_viewfunc = getattr(super(self.__class__, self), viewfunc.__name__)
return parent_viewfunc(request, *args, **kwargs)
return decorator | fea6ca36d7c8ef3f9e486127d51b6090bc341695 | 45,881 |
from typing import Dict
from typing import List
from typing import Any
def convert_request_to_dict(arguments: Dict[str, List[bytes]]) -> Dict[str, Any]:
"""
Converts the arguments obtained from a request to a dict.
Args:
handler: a tornado.web.RequestHandler object
Returns:
A decoded dict with keys/values extracted from the request's arguments
"""
args = {}
for k, values in arguments.items():
args[k] = values[0].decode()
return args | 42699e662e94280ca6193ef15628ff064e1c6410 | 45,882 |
def create_list_of(class_, objects):
"""Return a list of model objects of class `class_` from list of object
metadata `objects`"""
return [class_.from_metadata(obj) for obj in objects] | abb91c36fd2faeb8bbb1cd785d417757f75352bd | 45,884 |
import inspect
def _valwrap_(validator):
"""
Wrap a validation function to allow extraneous named arguments in specfile,
this is usefull when getting specification with
validator._parse_with_caching(configspec[section][option])
"""
# Already wrapped
if validator.__name__.startswith("validator_wrapper-") or validator.__name__.startswith(
"list_validator_wrapper-"
):
return validator
# Wrapper
def validator_wrapper(value, *args, **kwargs):
# Remove extraneous arguments the validator can't handle
argspec = inspect.getfullargspec(validator)
kwargs = kwargs.copy()
for k in list(kwargs.keys()):
if k not in argspec.args:
kwargs.pop(k)
return validator(value, *args, **kwargs)
validator_wrapper.__name__ += "-" + validator.__name__
return validator_wrapper | 940ea6edadceeb24baaa216bd7b47c30776e52eb | 45,886 |
def try_or_giveup(func, n_tries, print_exc, *args, **kwargs):
"""Returns func() with trying n_times """
count = 0
while True:
try:
return func(*args, **kwargs), count
except Exception as e:
count += 1
if count == n_tries:
raise
if print_exc:
print(e) | d69eee4bc05d7c94e2fd721c9b24bd4cb41bf4d3 | 45,887 |
def has_min_length(entities, length):
"""Check if a term has the min required length."""
return entities.apply(lambda x: len(x) > length - 1) | 7406b1d0d2a776b12ac89b624669d739a4831ffa | 45,888 |
def edge_metadata(G):
"""Maximum number of times that a student rated another student."""
counts = [d["count"] for n1, n2, d in G.edges(data=True)]
maxcount = max(counts)
return maxcount | d0e14580e0a1a6d97a2a62754068feb542e2251f | 45,889 |
def get_reg_targets(idx, d, tags, coeff, regress, mode="k"):
"""Separate regression targets and regressor variables."""
tag = tags[idx]
tags = tags[regress]
X = d[:, idx].reshape(-1)
d1 = d[:, regress]
d2 = d[:, ~regress]
coeff = coeff[regress]
if mode == "t":
d1 = d1[:, ~coeff]
tags = tags[~coeff]
return X, tag, tags, d1, d2, coeff | e137d3790a6a6387f7f77cc3d1411edceb088e45 | 45,890 |
import pwd
def username_from_uid(uid: int) -> str:
"""Convert a UID to a username."""
try:
username = pwd.getpwuid(uid).pw_name.replace(".", "__")
except KeyError:
username = f"__unknown_uid_{uid}__"
return username | ae400888f3c89f8b26c413138c42b430285fe6f9 | 45,891 |
import subprocess
def get_output(command):
"""
Return STDOUT of the command.
:param command: a command to be executed to get an entity value.
"""
val = subprocess.getoutput(command)
if not val:
raise ValueError('Cannot get anything executing {}'.format(command))
return val | 17111a95db25cc399864b00769aca7e749f91b89 | 45,893 |
def return_array_at_node(grid, value):
"""Function to return an array stored at node or of shape `(n_nodes,)`.
This function exists to take advantange of the use_field_name_array_or_value
decorator which permits providing the surface as a field name or array.
Parameters
----------
grid : ModelGrid
value : field name, ndarray of shape `(n_nodes, )`, or single value.
Returns
-------
array : ndarray of shape `(n_nodes, )`
"""
return value | f532bc4db879db58f6c83848b91abf8e5af06a4b | 45,894 |
import pkg_resources
def maybe_get_pkg_resources():
"""
Return the pkg_resources module or None, depending on whether we think that whatever
the system has available is going to do an ok job at parsing requirements patterns.
We don't want to strictly require any particular version of setuptools to not force
the user to mess with their system.
"""
try:
try:
pkg_resources.Requirement.parse('foo[bar]==2.0;python_version>"2.7"')
except (ValueError, AttributeError):
return None
else:
return pkg_resources
except ImportError:
return None | bad28659f0120439ad40d1c18b5fe6da542402bd | 45,895 |
from pathlib import Path
def get_root():
"""Returns the absolute path of the main directory
Returns:
--------
root (pathlib.Path)
"""
cwd = str(Path.cwd())
root = Path(cwd[:cwd.find('rubrix')]) / 'rubrix'
return root | d4a469d785e97abf1b9e3cf73edecfea6e72d616 | 45,896 |
from typing import List
import re
import click
def check_descriptors(ctx, param, value: List[str]) -> List[str]:
"""
Check format of each MeSH descriptor passed as command line argument. Raise
exception if any has incorrect format.
:param ctx: required for click callback feature
:param param: required for click callback feature
:param value: tuple of MeSH descriptors as written on command line
:return: value if all descriptors are correctly formatted
"""
for des in value:
if not re.fullmatch(r'D(\d{6}|\d{9})', des):
raise click.BadParameter(
'Descriptor %s incorrect, should be D followed by 6 or 9 digits'
% des, param_hint='MeSH DESCRIPTORS')
return value | f2c1d25fe793c7f8d3dabdd72a30bb4be27a8dd6 | 45,897 |
def short(s):
"""return the first part of the string s up to a number."""
result = []
for d in list(s):
if d.isdigit():
break
else:
result.append(d)
return ''.join(result) | 467123994284838ed3d6bc4fde99cc4ef54e3645 | 45,898 |
def _AddFirmwareIdTag(image, id_name='RO_FRID'):
"""Returns firmware ID in '#NAME' format if available."""
if not image.has_section(id_name):
return ''
id_stripped = image.get_section(id_name).decode('utf-8').strip(chr(0))
if id_stripped:
return '#%s' % id_stripped
return '' | e070a445b9250208366a7eb43612f8d8650534d1 | 45,902 |
def compile_drop_materialized_view(element, compiler, **kw):
"""
Formats and returns the drop statement for materialized views.
"""
text = "DROP MATERIALIZED VIEW {if_exists}{name}{cascade}"
if_exists = "IF EXISTS " if element.if_exists else ""
cascade = " CASCADE" if element.cascade else ""
return text.format(if_exists=if_exists, name=element.name, cascade=cascade) | 1bedf4115edeaf33e96c7ff57b29e8809d8489c8 | 45,903 |
import re
def get_header_line(headr, proprty):
"""
:param headr: the header of the RINEX-file
:param proprty: string-like property to search for (e.g. 'delta-utc')
:return: the string of the ``headr`` containing ``property``
"""
pattern = re.compile(proprty, re.IGNORECASE)
for d in headr:
if pattern.search(d):
return d | 7c0a98156a4dda2ea24190c7516e488b3c162b5d | 45,904 |
def get_file_type(file_name):
"""
returns file type of given file name as string. For example, for a pdf file "pdf" will be returned
"""
for char in file_name: # checking if given path instead of file
if char == "/" or char == "\\":
return "ERROR IN FUNCTION 3: FILE PATH GIVEN INSTEAD OF FILE NAME"
for i in range(len(file_name)):
j = len(file_name) - 1 - i
if file_name[j] == ".":
return file_name[j + 1:]
return "ERROR: NO ENDING GIVEN TO FILE IN FUNCTION 3" | 7ae544794a07b69710c6784ca389fa82a751ea66 | 45,907 |
def get_image_names():
"""
Returns
-------
List of all available test image names
"""
return ["reference - 1 layer",
"10 percent strain - 1 layer",
"reference - 2 layers",
"10 percent strain - 2 layers"] | 713790323eca4a5139678ae26cfa525503784be6 | 45,910 |
import sys
import os
def check_exist(path, mode, flag_exit=True):
"""
Function to check for file existence
Args:
path(str): target file path
mode(int): 1(existence) / 2(existence for file) / 3(existence for dir)
flag_exit(bool): Exit if not present (Default: True)
Returns:
(bool) or exit(None)
"""
if path is None:
sys.stderr.write("ERROR: Path is not specified (None value).\n")
if flag_exit:
sys.exit(1)
else:
return False
if mode == 1:
if not os.path.exists(path):
sys.stderr.write("ERROR: No such path (%s)\n" % path)
if flag_exit:
sys.exit(1)
else:
return False
elif mode == 2:
if not os.path.isfile(path):
sys.stderr.write("ERROR: No such file (%s)\n" % path)
if flag_exit:
sys.exit(1)
else:
return False
elif mode == 3:
if not os.path.isdir(path):
sys.stderr.write("ERROR: No such directory (%s)\n" % path)
if flag_exit:
sys.exit(1)
else:
return False
else:
sys.stderr.write("ERROR: Subroutine error: Not specified mode\n")
sys.exit(1)
return True | 5bfb48a3e6ae082bd5f38da1b10c13e6dcd13c53 | 45,911 |
import os
def replace_ext(fn, suffix):
"""
Convinience function to replace an extension of a given file name with a given suffix
e.g.1: ('input.html', '.pdf') -> 'input.pdf'
e.g.2: ('input.html', '_tmp.pdf') -> 'input_tmp.pdf'
"""
assert '.' in suffix
return os.path.splitext(fn)[0] + suffix | 5f769307e3103f5f8a45d08d10f51be2fd9d79eb | 45,914 |
def to_json_default_handler(obj):
"""Gets an attribute from the object.
Notes:
This method is confusing and the name is confusing.
Args:
obj (object): An object of some nature.
Returns:
If the object has an attribute isoformat, then return it.
"""
if hasattr(obj, 'isoformat'):
return obj.isoformat() | 5fb3acaaa51a9494b1da4f1c8b41af850ffbdf3f | 45,915 |
def get_player_moves(game: tuple, player: int):
"""
Returns the sequence of moves of the given player
"""
moves = []
for i in range(player, len(game) - 1, 2):
moves.append((game[i], game[i + 1]))
return moves | 492327399ae39c8a3378730903dab422d2c3d7f5 | 45,916 |
from typing import List
def get_html(children: List) -> str:
""" Return the concatenated HTML string of a list of PyQuery elements """
ret = ""
for child in children:
if isinstance(child, str):
ret += child
else:
ret += child.outer_html()
return ret | 0e4dd4ca61d731932747751b1abbd5a23ebf5a7f | 45,917 |
def part_one():
"""Part one"""
step = 386
rounds = 2017
state = [0]
current_position = 0
for i in range(rounds):
boundary = len(state)
current_position = ((current_position + step) % boundary) + 1
state.insert(current_position, i + 1)
return state[current_position + 1] | dd1ad9567b7564b686960b97c1ef13d4f1a674e1 | 45,918 |
def index():
""" The app's main page.
"""
return "<h>This is O2+HR Trend Detection Service Engine</h>" | 1d2ad6d6398c184ab489d645ccbba29b8ddacb82 | 45,920 |
def decimal_to_int(decimal):
"""
Converts a json decimal to an integer.
Mostly used to convert chat_id
"""
integer = int(str(decimal))
return integer | 95fb142601547779113b4604ac9a760d44c3c131 | 45,921 |
def hexStr2Byte(str):
"""从16进制字符串转byte"""
return bytes.fromhex(str) | 506c1981c54e1d968ea7dfd99dc3ddd07e42c394 | 45,923 |
def floyd_warshall(graph):
"""
All Pairs Shortest Path problem.
The problem is to find shortest distances between every
pair of vertices in a given edge weighted directed Graph
"""
n = len(graph[0])
dist = [ [0]*n for i in range(n) ]
# Initialize the solution matrix same as input graph matrix
for i in range(n):
for j in range(n):
dist[i][j] = graph[i][j]
for k in range(n):
for i in range(n):
for j in range(n):
if dist[i][k] + dist[k][j] < dist[i][j]:
dist[i][j] = dist[i][k] + dist[k][j]
return dist | 1b9ea3058fa791f89e4b3f60b10df6a7e22b1ea0 | 45,924 |
import os
def get_folder(folder):
""" create a folder if not exists and return
"""
if not os.path.exists(folder):
os.makedirs(folder)
return folder | 94732c204554e747359af3d4f0bde550b77b6475 | 45,925 |
import math
def discretize_state(state):
"""Discretizes continuous state space to discrete state space with 14 physical values
Args:
state: current continuous state space
Returns:
discretized state space
"""
# discretize observation space
# https://github.com/openai/gym/wiki/BipedalWalker-v2
obs_state_bounds = [
(0, math.pi), # hull_angle
(-2, 2), # hull_angular_velocity
(-1, 1), # vel_x
(-1, 1), # vel_y
(0, math.pi), # hip_joint_1_angle
(-2, 2), # hip_joint_1_speed
(0, math.pi), # knee_joint_1_angle
(-2, 2), # knee_joint_1_speed
(0, 1), # leg_1_ground_contact_flag
(0, math.pi), # hip_joint_2_angle
(-2, 2), # hip_joint_2_speed
(0, math.pi), # knee_joint_2_angle
(-2, 2), # knee_joint_2_speed
(0, 1), # leg_2_ground_contact_flag
]
# create an empty obs_discrete_state array to store converted discrete state array
obs_discrete_state = []
for i in range(len(state)):
converted_i = int(
(state[i] - obs_state_bounds[i][0])
/ (obs_state_bounds[i][1] - obs_state_bounds[i][0])
* 19 # 19 is arbitrary integer
)
obs_discrete_state.append(converted_i)
ds = tuple(
obs_discrete_state
) # convert collected discrete state array into tuple to maintain same shape
return ds | 547dc5176a2d6ad0708279295f1abba76b1cc642 | 45,928 |
import unittest
from os.path import dirname
def test_suite():
"""Returns unittest.TestSuite for this package"""
basedir = dirname(dirname(__file__))
# print(desispec_dir)
return unittest.defaultTestLoader.discover(basedir,
top_level_dir=dirname(basedir)) | a7719148332d85ea6daa7194bcdf6a3112365176 | 45,930 |
def dict_max(dic):
"""
Returns maximum value of a dictionary.
"""
aux = dict(map(lambda item: (item[1],item[0]),dic.items()))
if aux.keys() == []:
return 0
max_value = max(aux.keys())
return max_value,aux[max_value] | 43f5907e5f4e70a97d3308d36683adbb68647b3d | 45,932 |
def get_denominator_mp2(eps, nocc, nvirt):
"""
build denominator of mp2 given energy levels
"""
# grep occ/virt energy levels
e1 = eps[:nocc]
e2 = e1.copy()
e3 = eps[nocc:]
e4 = e3.copy()
# reshape
e1 = e1.reshape(nocc, 1, 1, 1)
e2 = e2.reshape(nocc, 1, 1)
e3 = e3.reshape(nvirt, 1)
# build the denominator tensor
return 1. / (e1 + e2 - e3 - e4) | 0ef6dd76d5b15014f9ca74e0739cbbbd72ea67a3 | 45,934 |
def isotherm_parameters():
"""Create a dictionary with all parameters for an isotherm."""
return {
'material': 'TEST',
'temperature': 100.0,
'adsorbate': 'TA',
'date': '26/06/92',
't_act': 100.0,
'lab': 'TL',
'comment': 'test comment',
'user': 'TU',
'project': 'TP',
'machine': 'TM',
'is_real': True,
'iso_type': 'calorimetry',
# Units/bases
'material_basis': 'mass',
'material_unit': 'g',
'loading_basis': 'molar',
'loading_unit': 'mmol',
'pressure_mode': 'absolute',
'pressure_unit': 'bar',
# other properties
'DOI': 'dx.doi/10.0000',
'origin': 'test',
'test_parameter': 'parameter',
} | 052cc4ac917917c312d0cb9ad7a1cb12cd895fa7 | 45,935 |
import argparse
import os
def is_valid_file(parser: argparse.ArgumentParser, arg: str) -> str:
"""Check if file exists.
:param parser: Argparse parser object.
:type parser: argparse.ArgumentParser
:param arg: Argument containing path po file.
:type arg: str
:return: Path to file if it exists.
:rtype: str
"""
path, extention = os.path.splitext(arg)
if not os.path.exists(arg):
parser.error(f"The file {arg} does not exist!")
elif extention.lower() not in ['.png', '.jpg', '.jpeg']:
parser.error(f"Wrong file extension '{extention}'! Try '.png', '.jpg', or '.jpeg' file!")
else:
return arg | 7cc827a9361b5a14c214eff74a3b48b37e50b9be | 45,936 |
def format_proxies(proxy_options):
"""
Format the data from the proxy_options object into a format for use with the requests library
"""
proxies = {}
if proxy_options:
# Basic address/port formatting
proxy = "{address}:{port}".format(
address=proxy_options.proxy_address, port=proxy_options.proxy_port
)
# Add credentials if necessary
if proxy_options.proxy_username and proxy_options.proxy_password:
auth = "{username}:{password}".format(
username=proxy_options.proxy_username, password=proxy_options.proxy_password
)
proxy = auth + "@" + proxy
# Set proxy for use on HTTP or HTTPS connections
if proxy_options.proxy_type == "HTTP":
proxies["http"] = "http://" + proxy
proxies["https"] = "http://" + proxy
elif proxy_options.proxy_type == "SOCKS4":
proxies["http"] = "socks4://" + proxy
proxies["https"] = "socks4://" + proxy
elif proxy_options.proxy_type == "SOCKS5":
proxies["http"] = "socks5://" + proxy
proxies["https"] = "socks5://" + proxy
else:
# This should be unreachable due to validation on the ProxyOptions object
raise ValueError("Invalid proxy type: {}".format(proxy_options.proxy_type))
return proxies | 8fe22a92f5f54b98685d56de113081d79f85adee | 45,937 |
def distance(x1, y1, x2, y2):
"""
This calculates the distance between (x1, y1) and (x2, y2)
"""
return ((x1 - y1) ** 2 + (x2 - y2) ** 2) ** 0.5 | da73bc44cc69e01d41a32a1779cd50ea777f6fd3 | 45,941 |
def solve_parts(parts, key, substitution):
"""
Solve the place holders from the parts.
:param parts: the parts
:type parts: list[str]
:param key: the name of the place holder
:type key: str
:param substitution: the value of the place holder
:type substitution: str
:return: the solved parts
:rtype: list[str]
"""
solved_terms = []
join_terms = ""
for part in parts:
if part.startswith("{"):
if key == part[1:-1]:
join_terms += substitution
else:
if join_terms != "":
solved_terms.append(str(join_terms))
join_terms = ""
solved_terms.append(part)
else:
join_terms += part
if join_terms != "":
solved_terms.append(str(join_terms))
return solved_terms | cedf0b804b71013edecce68c979d6d11de739e63 | 45,943 |
import typing
import os
import pathlib
def promote_pathlike_directory(directory: typing.Union[os.PathLike, str, None], *,
default: typing.Union[os.PathLike, str, None] = None,
) -> pathlib.Path:
"""Return path-like object ``directory`` promoted into a path object (default to ``os.curdir``).
See also:
https://docs.python.org/3/glossary.html#term-path-like-object
"""
return pathlib.Path(directory if directory is not None
else default or os.curdir) | bee06ed2c9ecdaf3ab8a9a8e38fdde3da43e26cb | 45,944 |
def file(path):
"""
Read data from file.
Parameters
----------
path : str
Filepath to the file that is being read.
Returns
-------
data : bytes
File content.
Notes
-----
This function is included to simplify the file upload within the interactive
module where graphical dashboards are hosted on a webserver.
"""
with open(path, 'rb') as file:
data = file.read()
return data | a300e8bf309cc85b1b2032da3cd33d98707c1104 | 45,945 |
def prep_prony_ANSYS(df_prony, prony, E_0 = None):
"""
Prepare ANSYS Prony series parameters for further processing.
The ANSYS curve fitting routine for viscoelastic materials only stores
the Prony series parameters ('tau_i', 'alpha_i') in the material card file.
To calculate the master curve from the Prony series parameters the
instantenous modulus and frequency range are required and added to the
dataframe of the ANSYS Prony series parameters.
Parameters
----------
df_prony : pandas.DataFrame
Contains the ANSYS Prony series parameter.
prony : dict
Contains the Python Prony series parameter
E_0 : float, default = None
Instantaneous storage modulus; for either tensile (E_0) or shear (G_0)
loading. If E_0 is not provided the instantaneous storage modulus
identified during the Python curve fitting process will be used to
create the master curve with the ANSYS Prony series parameters.
Returns
-------
prony_ANSYS : dict
Contains the ANSYS Prony series parameter in the same format as the
Python implementation provides (see 'prony' Parameter above).
"""
m = prony['modul']
if E_0 == None:
E_0 = prony['E_0'] #use same estimate as Python curve fitting
f_min = prony['f_min'] #use same frequency range as Python curve fitting
f_max = prony['f_max']
prony_ANSYS = {'E_0': E_0, 'df_terms':df_prony, 'f_min':f_min,
'f_max':f_max, 'label':'ANSYS', 'modul' : m}
return prony_ANSYS | ab2f86e9d42ea78c7849d9db877892d22e47666e | 45,946 |
def grouper(n, L):
"""Group a flat list into a list of tuples of size n."""
# src: http://stackoverflow.com/questions/1624883/alternative-way-to-split-a-list-into-groups-of-n
return list(zip(*[L[i::n] for i in range(n)])) | c052a4e98c90a36ff2afd811d49bd8ad9da8830b | 45,947 |
import time
def GetTimeZone():
"""Returns the local time zone string."""
return time.tzname[time.localtime()[8]] | 8d1e3a7e4c2c68ddde8e24bdccc7f497b2add6a1 | 45,948 |
import argparse
def get_args():
"""parse the arguments to this script using argparse"""
parser = argparse.ArgumentParser(description=
"Liftover a TSV whose first two columns are CHROM and POS, respectively."
)
parser.add_argument(
'-o', '--out', default=None,
help='output file name (in tsv format) (default: stdout)'
)
parser.add_argument(
'-i', '--ignore-header', action='store_true',
help="pass the first line (ie the header) through verbatim without liftover"
)
parser.add_argument(
'chain', help="A chain file for performing the lift over"
)
parser.add_argument(
'counts', default=None, nargs='?', help=
"A TSV whose first two columns are the contig and position (default: stdin)"
)
args = parser.parse_args()
return args | fcd6d30246162dea28cb6a79beead73f014d3637 | 45,949 |
import json
def first_time_setup(CONFIG):
"""
Walks the user through for first time setup.
CONFIG: JSON dict
-> TOKEN: str
"""
token = input("Please input your discord bot token here: ")
CONFIG["token"] = token
with open("secrets/secrets.json", 'w') as config_file:
config_file.write(json.dumps(CONFIG, sort_keys=True,
indent=4, separators=(',', ': ')))
return token | 86f4cc1b707196928dae8cf5c04cbf4dff9395a3 | 45,950 |
def _cargo_home_path(repository_ctx):
"""Define a path within the repository to use in place of `CARGO_HOME`
Args:
repository_ctx (repository_ctx): The rules context object
Returns:
path: The path to a directory to use as `CARGO_HOME`
"""
return repository_ctx.path(".cargo_home") | 1961fc9a13128340d57b942ebb2bbc6b12ed6eea | 45,952 |
def service_tracker(volume, service, service_tracker):
"""
service_tracker: Going to create the search json file with volume names
"""
if service in service_tracker.keys():
service_tracker[service].append(volume)
# Keep Unique Values in list as there should not be same Search service on same volume
service_tracker[service] = list(set(service_tracker[service]))
else:
service_tracker[service]=[]
service_tracker[service].append(volume)
return service_tracker | 10cd4cac6afebd58ee7e216cf0fa18bf80188e97 | 45,953 |
def get_subset(options, dataset, class_a, class_b):
"""
Returns a subset of the dataset that only contains the
classes (class_a) and (class_b)
"""
x_train, y_train = dataset
train_idx = (y_train == class_a) ^ (y_train == class_b)
subset_train_x = x_train[train_idx]
subset_train_y = y_train[train_idx]
# relabel to +/-1
subset_train_y[subset_train_y == class_a] = -1
subset_train_y[subset_train_y == class_b] = 1
return subset_train_x, subset_train_y | 0081af880de54aded079edb46207ba46f94e0ea4 | 45,955 |
import requests
from bs4 import BeautifulSoup
def page_soup(url):
"""
Make BeautifulSoup of page
Args:
url:``str``
url of page
Returns:
BeautifulSoup of page:``soup``
"""
r = requests.get(url)
if r.status_code == 200:
soup = BeautifulSoup(r.content, "lxml")
return soup
else:
raise Exception("Please check website. Error code {}.".format(r.status_code)) | 3ebbc59055b6034371f06455cb182d7b81eaca64 | 45,956 |
import os
def initialize_experiment_folder(base_path: str, source_domain: str, target_domain: str, val_fold: int) -> str:
"""
Creates a Folder for the current experiment, which is defined by the training domain, training fold and the
number of data points, ued for training the model.
Parameters
----------
base_path: Path, in which the new path is created.
source_domain: Name of the source domain.
target_domain: Name of the target domain.
val_fold: Validation fold for the current experiment.
"""
# compute the path, which should be created
folder_name = base_path + source_domain + '/' + target_domain + '/' + str(val_fold) + '/'
# create the path, if it doesn't already exist
if not os.path.exists(folder_name):
os.makedirs(folder_name)
# create the path, if it doesn't already exist
if not os.path.exists(folder_name + 'parameters/'):
os.makedirs(folder_name + 'parameters/')
return folder_name | dd9456404f0d9cbeea5e56595bcdc45fd8dc7a53 | 45,957 |
def _minpoly_rootof(ex, x):
"""Returns the minimal polynomial of a ``RootOf`` object."""
domain = ex.poly.domain
if domain.is_IntegerRing:
return ex.poly(x)
else:
return ex.poly.sqf_norm()[-1](x) | 04d228a67a0d93e7fe4e50feacd352ffb44b75f1 | 45,958 |
def ncs2_to_format(df):
"""Format ncs2 results as to annette format
Args:
df ([type]): [description]
Returns:
[type]: [description]
"""
# print(df)
# filter only executed Layers
f = (df['ExecStatus'] == 'EXECUTED')
df = df[f]
# print(df)
df['LayerName'] = df['LayerName'].str.replace('/', '_', regex=True)
df['LayerName'] = df['LayerName'].str.replace('-', '_', regex=True)
ncs2 = df[['LayerName', 'LayerType', 'RunTime(ms)']]
ncs2 = ncs2.rename(
columns={'RunTime(ms)': 'measured', 'LayerName': 'name', 'LayerType': 'type'})
return ncs2 | 0a7de1e85f0c26a3edce5c034cc5328626559aca | 45,959 |
def largest_odd_times(L):
""" Assumes L is a non-empty list of ints
Returns the largest element of L that occurs an odd number
of times in L. If no such element exists, returns None """
l = L.copy()
l.sort()
l.reverse()
for x in l:
if l.count(x) % 2:
return x
return None | 280d071bebeec911efbcbbae2eb28090e2e7870e | 45,960 |
import os
def get_terminal_width():
"""Get the current width of the terminal window"""
return os.get_terminal_size().columns | 2b29320bd15d4365c8b3da92e2036cc08935239d | 45,961 |
import os
def load_bed(fname):
"""Return regions from BED file. If not a file, try to unload region(s) from a string"""
regions = []
if os.path.isfile(fname) or os.path.islink(fname):
for l in open(fname):
if l.startswith('#') or not l[:-1]:
continue
ldata = l[:-1].replace(',','').split('\t')#; print ldata
if len(ldata) >= 3:
ref, start, end = ldata[:3]
else:
ref, se = ldata[0].split(':')
start, end = se.split('-')
start, end = map(int, (start, end))
regions.append((ref, start, end)) #yield ref, start, end
else:
for region in fname.split():
if not region: continue
ref, se = region.replace(',','').split(':')
start, end = se.split('-')
start, end = map(int, (start, end))
regions.append((ref, start, end))
return regions | 8c57343067ed5a0a0720de512d249f6997223700 | 45,962 |
import os
def getDataFilepaths(data_dir):
"""
Get filepaths for issues, commits, and pull requests in the given data_dir.
GIVEN:
data_dir (str) -- absolute path to data
RETURN:
issues_file (str) -- absolute path to issues CSV
commits_file (str) -- absolute path to commits CSV
pull_requests_file (str) -- absolute path to pull requests CSV
"""
issues_file = os.path.join(data_dir, "issues/issues.csv")
commits_file = os.path.join(data_dir, "commits/commits.csv")
pull_requests_file = os.path.join(data_dir, "pull_requests/pull_requests.csv")
return [issues_file, commits_file, pull_requests_file] | f0cdd69ae09dd24835a714d5af8a0ddbd9fa2fb1 | 45,964 |
def calc_batch_MC_observables(d, l1_p4, l2_p4, k_p4):
""" MC ONLY batch observables.
Args:
d:
l1_p4:
l2_p4:
k_p4:
Returns:
x
"""
x = {
}
return x | 7debc0c84a5b55016e802ba962563e6b186f696b | 45,965 |
def FindOrphans(directory):
"""menu.FindOrphans
INPUTS:
- directory -- list containing the MemberHub directory families
OUTPUTS:
Prints to standard output the children in the directory who do not have an
parent associated with their entry.
ASSUMPTIONS:
None.
"""
##
## make copy of the argument so it is not accidentally modified,
## and initialize method variables
local_dir = directory.copy()
orphan_families = []
##
## loop over all families in the directory to find orphan families
for entry_family in local_dir:
if entry_family.IsOrphan():
orphan_families.append(entry_family)
return orphan_families | e8ad61c3430dbbfcd5680801652773ac2b7afd42 | 45,966 |
import json
def read_json(filename):
"""Read json file and return contents as dict."""
with open(filename) as f:
return json.load(f) | f8e5bf4cc76de18099a3736e96141ac849aca050 | 45,967 |
def AbortAcquisition():
""" This function aborts the current acquisition
if one is active. """
return None | 7fd3ac53e25c202d8e97854bb912cb836b4adae0 | 45,969 |
import subprocess
def ifconfig_get_up(iface):
"""
Check the iface status.
:param iface: Network interface, e.g. eth0
:type iface: string
:return: If the iface is up or not
:rtype: bool
"""
if subprocess.call(['ifconfig', iface]) != 0:
return False
return b'UP' in subprocess.check_output(['ifconfig', iface]) | d2f2ac1a0c2895be424752361dd38392ed31128a | 45,970 |
from typing import Callable
def get_string_renderer(v: str) -> Callable[[str, bytes], str]:
"""
Always renders arbitrary string
:param v:
:return:
"""
def renderer(oid, value):
return v
return renderer | 4b605640b06059b0a9014281d6ff17878e01c5e4 | 45,972 |
def matchingByName (theDictionary, firstLetter):
"""Identifies students a name starting with firstLetter.
Assumes student names are capitalized.
:param dict[str, str] theDictionary:
key: locker number / value: student name or "open"
:param str firstLetter:
The target letter by which to identify students. Currently does
not check for only a single letter.
:return:
The students with name starting with firstLetter
:rtype: list[str]
"""
studentsByName = []
firstLetter = firstLetter.upper()
for key in theDictionary:
if theDictionary[key][0] == firstLetter:
studentsByName.append(theDictionary[key])
return studentsByName | 8d492d93b58b7c4e7c2f8030fda9d01bd73d7352 | 45,973 |
from typing import List
from typing import Dict
from typing import Any
def build_params_dict(tags: List[str], attribute_type: List[str]) -> Dict[str, Any]:
"""
Creates a dictionary in the format required by MISP to be used as a query.
Args:
tags: List of tags to filter by
attribute_type: List of types to filter by
Returns: Dictionary used as a search query for MISP
"""
params: Dict[str, Any] = {
'returnFormat': 'json',
'type': {
'OR': attribute_type if attribute_type else [],
},
'tags': {
'OR': tags if tags else [],
},
}
return params | 0da2569aca037587a904c775330a684cef9d6f22 | 45,975 |
def compute_area(boxes):
"""
Computes the area of all the boxes.
Returns:
np.array: a vector with areas of each box.
"""
area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
return area | 5cd1ca945d8ffcbf989d1a8d65e952b99366968e | 45,976 |
def symbols_db(db):
""" return dictionary like version of database"""
return db if type(db) is dict else {symbol.name: symbol for symbol in db} | 76c5ade84691f1fc7634046d06d2a51e09f10c05 | 45,977 |
def matrix_scalar(m, sc):
""" Matrix multiplication by a scalar value (iterative algorithm).
The running time of the iterative matrix multiplication algorithm is :math:`O(n^{2})`.
:param m: input matrix
:type m: list, tuple
:param sc: scalar value
:type sc: int, float
:return: resultant matrix
:rtype: list
"""
mm = [[0.0 for _ in range(len(m[0]))] for _ in range(len(m))]
for i in range(len(m)):
for j in range(len(m[0])):
mm[i][j] = float(m[i][j] * sc)
return mm | 71bd6d1cfaaeb959623e3836ab48ccf7543aaaba | 45,978 |
from pathlib import Path
def get_probes_path(params, split):
"""Return path to directory where probe sentences are stored."""
directory = Path(params.path_to_probes)
path_p = f'propositions_{split}.json'
return directory / path_p | 80f7b6a98d6a714177f0ba8f1ee7105d499201fc | 45,979 |
import torch
def load_checkpoint(model, optimizer, PATH):
"""Utility function for checkpointing model + optimizer dictionaries
The main purpose for this is to be able to resume training from that instant again
"""
checkpoint_state_dict = torch.load(PATH, map_location=torch.device("cpu"))
#from train import model
model.network.module.load_state_dict(
checkpoint_state_dict['model_state_dict'])
#from train import optimizer
optimizer.load_state_dict(checkpoint_state_dict['optimizer_state_dict'])
epoch = checkpoint_state_dict['epoch']
last_global_step = checkpoint_state_dict['last_global_step']
del checkpoint_state_dict
return (epoch + 1, last_global_step) | 5fcfaf83c764ac269a6530287507944c2de0eab1 | 45,980 |
def enum( *sequential, **named ):
"""
Sequential Enumeration
@param *sequential
@param **named
@return Enumerator type object
"""
enums = dict( zip( sequential, range( len( sequential ) ) ), **named )
return type( 'Enum', (), enums ) | 3ca2d9970ec0d14525d949945f2fe0649243809f | 45,981 |
import os
def _find_all_g3_files(target):
"""Build list of .g3 files.
Parameters
----------
target : str
File or directory to scan.
Returns
-------
list
List of full paths to .g3 files.
"""
_file_list = []
if os.path.isfile(target):
_file_list.append(target)
elif os.path.isdir(target):
a = os.walk(target)
for root, _, _file in a:
for g3 in _file:
if g3[-2:] == "g3":
_file_list.append(os.path.join(root, g3))
return _file_list | a487292cf76632bcb54356ecdea45d03461eb599 | 45,982 |
import json
def get_valid_person(json_path):
"""Get the valid person object, which needs to satisfy the following conditions.
*: The label of the object is pedestrian.
*: The height of pedestrian is more than 50 pixel.
*: The ridio of pedestrain is less than 0.35 (not sure)
Args:
json_path: the path of json file that will be decoded
Retuens:
valid_person: a dictionary and the keys of this dict is 'valid' and 'bbox'
the value of 'valid' is a bool that valid == True means we get a valid person data
the value of 'bbox_list' is a list that contains y_min, x_min, y_max, x_max
"""
valid_person = dict()
bbox_list = list()
label_list = list()
person_label = {'pedestrian': 1, 'rider': 2}
with open(json_path, 'r') as label_json:
annot = json.load(label_json)
valid_index = 0
for obj in annot['objects']:
if obj['label'] == 'pedestrian':
x, y, w, h = obj['bbox']
if h > 60:
x_vis, y_vis, w_vis, h_vis = obj['bboxVis']
ratio = 1 - (w_vis * h_vis) / (w * h)
if ratio < 0.35:
valid_index += 1
bbox = [y-1, x-1, y-1+h, x-1+w]
bbox_list.append(bbox)
label_list.append(person_label[obj['label']])
if valid_index > 0:
valid = True
else:
valid = False
valid_person['valid'] = valid
valid_person['bbox_list'] = bbox_list
valid_person['label_list'] = label_list
return valid_person | 9ddebc05c3b776dc8d2c73b6c9090a0595e12401 | 45,983 |
def parse_line_number(line_str):
"""
In a line of the format "<line_num>: <text>"or "<line_num> <text>"
this grabs line_num.
>>> parse_line_number('5: def parse_line_number(line_str):')
'5'
>>> parse_line_number('43 line = view.line(s)')
'43'
>>> parse_line_number('136: line_num = parse_line_number(line_str)')
'136'
"""
parts = line_str.split()
line_num = parts[0].strip().replace(':', '')
return line_num | 1c25916ee3baae164e28f7ca37642901787a92fe | 45,984 |
def get_stft_shape(sample_rate, snippet_length, time_steps):
""" Gets the shape for the Short-Time Fourier Transform matrix corresponding to a sample_rate, snippet_length, and time_steps
:param sample_rate: the sample rate of the time signal in Hz
:param snippet_length: the length of the signal in seconds
:param time_steps: the number of time steps that the signal should be split into
:returns: the shape of the matrix with dim time steps times number of frequencies
:rtype: tuple(int, int)
"""
sample_length = snippet_length * sample_rate
n_fft = (time_steps - 1) * 2
win_length = int(n_fft/4)
return (time_steps, int(sample_length/win_length + 1)) | baf8b0bff872716c2f227b7174f2b46e16652fac | 45,986 |
def manner_equiv(y, y_hat):
"""
Checks if two cvs have equivalent manner.
"""
if ((y%19 in [0, 1, 3, 2, 11, 10, 6, 17]) and
(y_hat%19 in [0, 1, 3, 2, 11, 10, 6, 17])):
if (y%19 in [0, 1, 3]) and (y_hat%19 in [0, 1, 3]):
# b, d, g
return True
elif (y%19 in [2, 11]) and (y_hat%19 in [2, 11]):
# f, s
return True
elif (y%19 in [10, 6, 17]) and (y_hat%19 in [10, 6, 17]):
# r, l, y
return True
else:
return False
else:
return None | b6fcffe4c24d9d710cbba2fb21fc79d6c1dc2660 | 45,987 |
def format_restrict_dist_string(sym1, sym2, name):
""" build string that has the distance comparison
"""
restrict_string = (
" if (r{0}{1}.lt.rAB) then\n" +
" {2}_corr = 100.0\n" +
" return\n" +
" endif"
).format(sym1, sym2, name)
return restrict_string | e1fbed063788143ed1b96b264d190c08c10a71c2 | 45,989 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.