content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def get_namespace(node):
"""Return the namespace of the given node
If the node has not namespace (only root), ":" is returned.
Else the namespace is returned
:param node: the node to query
:type node: str
:returns: The top level namespace.
:rtype: str
:raises: None
"""
ns = node.rpartition('|')[2].rpartition(':')[0]
return ns or ':'
|
a608866c712f3d190cece4f3fd5ebd5cfda040b3
| 23,188
|
import os
import base64
import requests
def get_crawlera_session():
"""
get a crawlera session
"""
proxy_auth = f"{os.environ.get('ZYTE_KEY')}:"
proxy_auth = base64.b64encode(proxy_auth.encode("utf-8"))
auth_header = "Basic " + proxy_auth.decode("utf-8")
res = requests.post(
"http://proxy.zyte.com:8011/sessions",
headers={"Authorization": auth_header},
data={},
)
return res.text
|
e364e7e4772989ae237c833c3eea23f272019698
| 23,191
|
import math
def get_damage_output(discrete_TP_distribution, tp, wse, koloss=0, tpm=0, hit_probability=1):
"""
discrete_TP_distribution: List of (k,p) tuples where k is a natural number and p the probability to get said k. Zero values are not listed.
tp: weapon damage + extra damage by strength (KK)
btp: bonus tp given by maneuvers
"""
return hit_probability*sum([math.floor((TP+tp+tpm)*probability)/(wse*2**koloss) for TP,probability in discrete_TP_distribution])
|
7dda0d53eab72c3a5f86253a52187176ab1a6071
| 23,193
|
def find_uncontested_claim(claims, coordinates_claimed):
"""Find the single claim that is uncontested"""
# first, eliminate all shared claims
unshared_claims = set(
key for key, val in coordinates_claimed.items() if len(val) == 1)
# then we can use simple set theory:
# if a claim's coordinates are a direct subset of the set of unshared
# coordinates, its territory is solely its own
isolated = [
claim_number
for claim_number, coordinate_set in claims.items()
if coordinate_set.issubset(unshared_claims)
]
assert len(isolated) == 1
return isolated[0]
|
38b7fbdb795e1bdc6983893ab913db97675a2b80
| 23,194
|
def diag_line(ax, **linekwds):
""" Draw a diagonal line x=y"""
linekwds.setdefault('ls', ':') #Dotted ...
linekwds.setdefault('color', 'k') #black
linekwds.setdefault('linewidth', 1)
# Bisecting line
ax.plot(ax.get_xlim(),
ax.get_ylim(),
**linekwds)
return ax
|
0f333533f788d96a1dd845de7b1af8ee632584fb
| 23,195
|
def split_sentence(sentence: str) -> list:
"""
Takes a sentence in IPA and parses it to individual words by breaking according to
the " # " IPA string pattern.
:sentence: sentence to parse
:returns: list of individual words
:rtype: list
"""
words = sentence.split(" # ")
return words
|
71df9a977c16fab57c373b5772e62d3760f51d15
| 23,196
|
import torch
def sample_for_each_category(vqg, image, args):
"""Sample a question per category.
Args:
vqg: Question generation model.
image: The image for which to generate questions for.
args: Instance of ArgumentParser.
Returns:
A list of questions per category.
"""
if args.no_category_space:
return None
categories = torch.LongTensor(range(args.num_categories))
if torch.cuda.is_available():
categories = categories.cuda()
images = image.unsqueeze(0).expand((
args.num_categories, image.size(0), image.size(1), image.size(2)))
outputs = vqg.predict_from_category(images, categories)
return outputs
|
de39985a7eddfe00b6345d9f5c5515803e432fb3
| 23,197
|
def merge_arg(cmd_arg, ini_arg):
"""
Merge command line argument and configure file argument.
The cmd_args has higher priority than ini_arg.
Only none-empty argument will be considered.
"""
if isinstance(cmd_arg, (list, tuple)):
cmd = cmd_arg[0]
return cmd if cmd else ini_arg
else:
return cmd_arg if cmd_arg else ini_arg
|
f896463fad7a00096e9a1e3b730ad96614a6e966
| 23,199
|
def maxSize(image, maxSize, method = 3):
""" im = maxSize(im, (maxSizeX, maxSizeY), method = Image.BICUBIC)
Resizes a PIL image to a maximum size specified while maintaining
the aspect ratio of the image. Similar to Image.thumbnail(), but allows
usage of different resizing methods and does NOT modify the image in place."""
imAspect = float(image.size[0])/float(image.size[1])
outAspect = float(maxSize[0])/float(maxSize[1])
if imAspect >= outAspect:
#set to maxWidth x maxWidth/imAspect
return image.resize((maxSize[0], int((float(maxSize[0])/imAspect) + 0.5)), method)
else:
#set to maxHeight*imAspect x maxHeight
return image.resize((int((float(maxSize[1])*imAspect) + 0.5), maxSize[1]), method)
|
5902bf94747e5438942eadbb0c733b981410bf7b
| 23,202
|
import os
def schema_dir():
"""Get the path to the directory where schemas are stored
Returns
-------
str
Path to the schema directory
"""
schema_dir = os.path.normpath(os.path.join(
os.path.dirname(__file__), "../schema"
))
return schema_dir
|
841fbdde9577b854e3af28f22a66230a03d89a73
| 23,203
|
def print_same_line(s: str, fill_num_chars: int, done: bool = False) -> int:
"""A helper to repeatedly print to the same line.
Args:
s: The text to be printed.
fill_num_chars: This should be `0` on the first call to
print_same_line() for a series of prints to the same output line. Then
it should be the return value of the previous call to
print_same_line() repeatedly until `done` is True, at which time the
cursor will be moved to the next output line.
done: On the final call to print_same_line() for a given line of output,
pass `True` to have the cursor move to the next line.
Returns:
The number of characters that were written, which should be passed as
`fill_num_chars` on the next call. At the end of printing over the same
line, finish by calling with `done` set to true, which will move to the
next line."""
s += " " * (fill_num_chars - len(s))
if not done:
print("\r" + s, end="")
else:
print("\r" + s)
return len(s)
|
e9926b538473dbfba11fab0d121b58dd845e5d4c
| 23,204
|
def load_config(config, parser):
"""Load configuration settings from the configuration file"""
for name, value in parser.items('config'):
config[name] = value
return config
|
06ac67d0045417cc1c27b6678fd63cd581454f07
| 23,206
|
import os
def isFileExists(fileName):
"""Return true if the file is exist"""
if (not fileName.endswith(".jar")):
return False
if (os.access(fileName, os.R_OK)):
return True
else:
return False;
|
36a0caf9856b8a30229aec70696070d91bde173d
| 23,208
|
def strip_control(in_bytes: bytes) -> str:
"""Strip control characters from byte string"""
return in_bytes.strip(b"\x00").decode()
|
4cefa25b58e8ba68a20aca3c10ecc8aebb2697a0
| 23,209
|
def coerce(string):
"""Changing strict JSON string to a format that satisfies eslint"""
# Double quotes to single quotes
coerced = string.replace("'", r"\'").replace("\"", "'")
# Spaces between brace and content
coerced = coerced.replace('{', '{ ').replace('}', ' }')
return coerced
|
fcfea9b2aa1852f189d783e912b828f946311a95
| 23,210
|
def _get_line_element_value(element, line, current_exception):
"""
Given an element to search for in a line of text,
return the element's value if found.
Otherwise, raise the appropriate exception.
"""
if element in line:
return line[line.rfind('>')+1:]
else:
raise current_exception('Couldn\'t find ' + element + ' in '
+ line)
|
6843472532300410f8d35011600e2ca1c1522f73
| 23,212
|
import subprocess
import time
import sys
def execute_command(command, shell=True, max_seconds=None, robust=False, output=None):
""" Uses `subprocess` to execute `command`. Has a few added bells and whistles.
if command returns non-zero exit status:
if robust:
returns as normal
else:
raise CalledProcessError
Parameters
----------
command: str
The command to execute.
Returns
-------
returncode, stdout, stderr
"""
p = None
try:
assert isinstance(command, str)
if output == "loud":
print("\nExecuting command: " + (">" * 40) + "\n")
print(command)
if not shell:
command = command.split()
stdout = None if output == "loud" else subprocess.PIPE
stderr = None if output == "loud" else subprocess.PIPE
start = time.time()
sys.stdout.flush()
sys.stderr.flush()
p = subprocess.Popen(command, shell=shell, universal_newlines=True,
stdout=stdout, stderr=stderr)
interval_length = 1
while True:
try:
p.wait(interval_length)
except subprocess.TimeoutExpired:
pass
if p.returncode is not None:
break
if output == "loud":
print("\nCommand took {} seconds.\n".format(time.time() - start))
_stdout = "" if p.stdout is None else p.stdout.read()
_stderr = "" if p.stderr is None else p.stderr.read()
if p.returncode != 0:
if isinstance(command, list):
command = ' '.join(command)
print("The following command returned with non-zero exit code "
"{}:\n {}".format(p.returncode, command))
if output is None or (output == "quiet" and not robust):
print("\n" + "-" * 20 + " stdout " + "-" * 20 + "\n")
print(_stdout)
print("\n" + "-" * 20 + " stderr " + "-" * 20 + "\n")
print(_stderr)
if robust:
return p.returncode, _stdout, _stderr
else:
raise subprocess.CalledProcessError(p.returncode, command, _stdout, _stderr)
return p.returncode, _stdout, _stderr
except BaseException as e:
if p is not None:
p.terminate()
p.kill()
raise e
|
4012763da331f092656cd61492cd306d7611abe1
| 23,213
|
import re
def search(regex, fullpath):
"""
Return True if and only if the given regex matches any line of the given file.
"""
p = re.compile(regex)
for line in open(fullpath):
if p.search(line):
return True
return False
|
cde96d6cb976d25aca953467abe4420030fecc65
| 23,214
|
from typing import TextIO
from typing import Tuple
import string
def run(inp: TextIO) -> Tuple[int, int]:
"""Returns nice count """
naughty_words = ["ab", "cd", "pq", "xy"]
count_nice_1 = 0
count_nice_2 = 0
for line in inp:
line = line.strip()
if line == "":
continue
contains_no_naughty_words = all(word not in line for word in naughty_words)
contains_3_or_more_vowels = len([letter for letter in line if letter in "aeiou"]) >= 3
contains_doubles = any(letter*2 in line for letter in string.ascii_lowercase)
nice_1 = all([contains_no_naughty_words, contains_3_or_more_vowels, contains_doubles])
if nice_1:
count_nice_1 += 1
twice_pair = False
for x,y in zip(line[:-1],line[1:]):
twice_pair = line.count(f"{x}{y}") >= 2
if twice_pair:
break
divided_repeat = False
for x,y,z in zip(line[:-2], line[1:-1], line[2:]):
if x == z != y:
divided_repeat = True
nice_2 = divided_repeat and twice_pair
if nice_2:
count_nice_2 += 1
return (count_nice_1, count_nice_2)
|
ba58defda3f2fe992ea1945e3b6a3f51f287debf
| 23,215
|
def _proj_freq_bands():
"""Get frequency band names and ranges."""
bands = {
'BOLD bandpass': (.01, .1), # Our infraslow range
'Delta': (1.5, 4),
'Theta': (4, 8),
'Alpha': (8, 12),
'Beta': (12, 30),
'Gamma': (30, 55)}
return bands
|
7b5c6b3290fd7837ff3ea42642568ce2bea91bb5
| 23,217
|
def get_line_count(filename):
"""
count number of lines in file.
taken from
https://stackoverflow.com/a/27518377
:param filename: file name
:return: number of lines in file
"""
def _make_gen(reader):
b = reader(1024 * 1024)
while b:
yield b
b = reader(1024 * 1024)
f = open(filename, 'rb')
f_gen = _make_gen(f.raw.read)
return sum(buf.count(b'\n') for buf in f_gen)
|
c9222f24e22bc61f0fefcfcc3eb367bb96ed5d96
| 23,219
|
import csv
def cfgvaltolistlist(val, extend=False):
"""a,b,c \\n d,e,f -> [ [a, b, c] , [d,e,f] ] (extend: [ a,b,c,d,e,f ] )"""
ll = []
for cc in val.split("\n"): # by end of lines
cc = cc.strip()
llcsv = csv.reader([cc], skipinitialspace=True)
llcsv = [x for x in llcsv.__next__() if x.strip()] #list(llcsv)[0]
if not extend:
ll.append(llcsv)
else:
ll.extend(llcsv)
return ll
|
f582bddf4e86f86c39334f868973185aa14fba9b
| 23,221
|
def read_lines_from_text_file(file_path):
"""Read lines from a text file."""
with open(file_path) as f:
lines = [line.strip() for line in f.readlines()]
return lines
|
95a1592a20d4e83a62def2f8aa8f20633e1024a6
| 23,223
|
def compute_loss(predictions, targets, criterion, perplexity=False):
"""Compute our custom loss"""
#print("Compute loss: ")
#print("inputs, preds, targets", predictions.shape, targets.shape)
predictions = predictions[:, :-1, :].contiguous()
targets = targets[:, 1:]
#print("preds, targets", predictions.shape, targets.shape)
rearranged_output = predictions.view(predictions.shape[0]*predictions.shape[1], -1)
rearranged_target = targets.contiguous().view(-1)
#print(rearranged_output.shape, rearranged_target.shape)
#print(rearranged_target)
loss = criterion(rearranged_output, rearranged_target)
if(not perplexity):
#means that criterion passed in mean reduction, and currently training is going on.
return loss
else:
#eval mode is going on...criterion has sum reduction currently.
return loss, (rearranged_target != 0).sum()
|
bf9470bf4e8c870053c95ac11c06686c36486ad1
| 23,224
|
def match(tokens, rule):
"""Checks if a token stream matches a rule.
Expects the rule part of the rule tuple (rule[1]).
Returns 0 if it doesn't match, 1 if it matches the begin, and 2 if it
matches entirely.
"""
for r in rule:
if len(tokens) > len(r):
continue
for i in range(len(tokens)):
if not tokens[i] or tokens[i].bnf() != r[i]:
break
else: # executed if the loop ends without break
return 2 if len(tokens) == len(r) else 1
return 0
|
9eb9dccddaf31017388fc1e93400349b5f0d2fa0
| 23,225
|
import numpy
def build_mix_covariance_matrix(covariance, points_sampled, points_to_sample):
"""Compute the "mix" covariance matrix, ``Ks``, of ``Xs`` and ``X`` (``points_to_sample`` and ``points_sampled``, respectively).
.. NOTE:: These comments are copied from BuildMixCovarianceMatrix() in gpp_math.cpp.
Matrix is computed as:
``A_{i,j} = covariance(X_i, Xs_j).``
Result is not guaranteed to be SPD and need not even be square.
Generally, this is called from other functions with "points_sampled" and "points_to_sample" as the
input lists and not any arbitrary list of points; hence the very specific input name. But this
is not a requirement.
Point lists cannot contain duplicates with each other or within themselves.
:param covariance: the covariance function encoding assumptions about the GP's behavior on our data
:type covariance: interfaces.covariance_interface.CovarianceInterface subclass
:param points_sampled: points, ``X_i``
:type points_sampled: array of float64 with shape (points_sampled.shape[0], dim)
:param points_to_sample: points, ``Xs_i``
:type points_to_sample: array of float64 with shape (points_to_sample.shape[0], dim)
:return: "mix" covariance matrix
:rtype: array of float64 with shape (points_sampled.shape[0], points_to_sample.shape[0]), order='F'
.. Note:: Fortran ordering is important here; scipy.linalg factor/solve methods
(e.g., cholesky, solve_triangular) implicitly require order='F' to enable
overwriting. This output is commonly overwritten.
"""
cov_mat = numpy.empty((points_sampled.shape[0], points_to_sample.shape[0]), order='F')
for j, point_two in enumerate(points_to_sample):
for i, point_one in enumerate(points_sampled):
cov_mat[i, j] = covariance.covariance(point_one, point_two)
return cov_mat
|
423a75074e53a18e1fa8f53ddf4b96331503075b
| 23,226
|
def greedy_find_block(list_str, expected_digit=None):
"""
Scan the string and return the substring, index, and type of the next block.
A block is defined as a contiguous set of numeric or alpha characters. The point at which the string
converts from one to another is the edge of the block.
Will pop elements of the list to consume them during processing
:param list_str: a string in list form: ['a', 'c', 'd', '1', '0', '.']
:return: (bool, list) tuple, where bool is isdigit() for first char of string
"""
# True for digits, false for alpha
chr_type = list_str[0].isdigit()
if expected_digit is not None and expected_digit != chr_type:
# An explicit type request and the head of this string doesn't match, so return the other type and an empty list
return expected_digit, []
result = []
while list_str and chr_type == list_str[0].isdigit():
result += list_str.pop(0)
return chr_type, result
|
6357861ff619ccba974ed083e208c282c67781b6
| 23,227
|
def pyramidal_nums(n):
"""
Returns a list of all pyramidal numbers less than or equal to n
A pyramidal number is defined as: f(num) = (num**3 - num) / 6
"""
res = [1]
for i in range(3, n):
p = (i**3 - i) // 6
if p < n:
res.append(p)
else:
return res
|
2a4c2625014ea0f19d99cef9fc97ccafaf91eff7
| 23,229
|
import re
def is_domain(text):
"""
Determine if a text string is a valid domain name.
Args:
text - the string to test
"""
if text.find('.') == -1 :
return False
if re.match('^[A-Za-z0-9][\.\-A-Za-z0-9]*[A-Za-z0-9]$', text) == None:
return False
return True
|
7b5a79990bb61ecc46c53ea40fd4067180771ee0
| 23,230
|
def get_directions(filename):
"""gets the directions from the file
Args:
filename: name of the file
Returns:
a list containing lists of the directions eg. return[0] is the list of
directions from the first line of the file. return[1] is the list of
directions from the second line of the file.
"""
text_file = open(filename, "r")
input_strs = text_file.readlines()
text_file.close()
directions = []
for input_str in input_strs:
directions.append(input_str.split(','))
return directions
|
8e904e405e47b645ffdc444a2a2bd970a7a84e3e
| 23,231
|
def program_info():
"""
Get the program version number, etc.
Returns:
:returns: str program_output: a string for the print of the program information
"""
program_output = "\nSMILESClickChem Version 1.0.1\n"
program_output = program_output + " ================== \n"
program_output = (
program_output
+ "If you use SMILESClickChem in your research, please cite the following references:\n"
)
program_output = program_output + "Spiegel, J.O., Durrant, J.D. \n"
program_output = program_output + "SMILESClickChem: an open-source program for "
program_output = program_output + "automated de novo ligand design using in silico reactions"
program_output = program_output + ". (2020) \n"
program_output = program_output + "[doi: 10.5281/zenodo.4087691]\n\n"
program_output = program_output + "Spiegel, J.O., Durrant, J.D. \n"
program_output = program_output + "GlauconiteFilter: an open-source program "
program_output = program_output + "for automated ADME-PK filtering. (2020) \n"
program_output = program_output + "[doi: 10.5281/zenodo.4087647]\n\n"
program_output = program_output + "Spiegel, J.O., Durrant, J.D. \n"
program_output = program_output + "AutoGrow4: an open-source genetic algorithm "
program_output = program_output + "for de novo drug design and lead optimization. \n"
program_output = program_output + "J Cheminform 12, 25 (2020). \n"
program_output = program_output + "[doi: 10.1186/s13321-020-00429-4]\n"
program_output = program_output + " ================== \n\n"
return program_output
|
74bd83eb10e766de3cf2724a19ec4a5ae5545e3c
| 23,232
|
def _offset_to_tzname(offset):
"""
Converts an offset in minutes to an RFC 3339 "time-offset" string.
>>> _offset_to_tzname(0)
'+00:00'
>>> _offset_to_tzname(-1)
'-00:01'
>>> _offset_to_tzname(-60)
'-01:00'
>>> _offset_to_tzname(-779)
'-12:59'
>>> _offset_to_tzname(1)
'+00:01'
>>> _offset_to_tzname(60)
'+01:00'
>>> _offset_to_tzname(779)
'+12:59'
"""
offset = int(offset)
if offset < 0:
tzsign = '-'
else:
tzsign = '+'
offset = abs(offset)
tzhour = offset / 60
tzmin = offset % 60
return '%s%02d:%02d' % (tzsign, tzhour, tzmin)
|
9e94b2f7ab70a001db2ebcf569c0a8305230a322
| 23,234
|
def calculate_wire_drag_const(wd_over_d, h_over_d):
"""Calculate the wire-drag constant for the Cheng-Todreas (or
Upgraded Cheng-Todreas) friction factor constant calculation"""
wd = {}
if wd_over_d == 0.0:
wd['turbulent'] = 0.0
wd['laminar'] = 0.0
else:
wd['turbulent'] = ((29.5 - 140.0 * wd_over_d
+ 401.0 * wd_over_d**2) / h_over_d**0.85)
wd['laminar'] = 1.4 * wd['turbulent']
return wd
|
d7fa44e9423a14e25d9af4bbeb60db92236298f0
| 23,236
|
from typing import Type
def singleton(cls) -> Type:
"""
Decorator for defining singleton classes.
The resulting singleton class can then be instantiated at most once. The first instance is reused for subsequent
instantiations and the arguments provided in subsequent instantiations are simply discarded.
"""
new_function = cls.__new__
def get_instance(_cls, *args, **kwargs):
if cls._Singleton__instance is None:
cls.__new__ = new_function
cls._Singleton__instance = cls(*args, **kwargs)
cls.__new__ = get_instance
def get_none(*_args, **_kwargs) -> None:
pass
cls.__init__ = get_none
cls.__call__ = get_instance
return cls._Singleton__instance
def exists_instance() -> bool:
"""Get whether an instance of this singleton class exists."""
return cls._Singleton__instance is not None
cls.__new__ = get_instance
cls.exists_instance = exists_instance
cls._Singleton__instance = None
return cls
|
6685aed06fdec665016b7d8c7b076fa94ace348f
| 23,237
|
def create_section(title, content=[], version=None, date=None):
"""Each section has a title and a list of content objects.
Each content object is either a text or a list object.
"""
result = dict(title=title, content=content)
if version:
result['version'] = version
result['date'] = date
return result
|
610cfa71190ae813aa7e5fdffa9e91f42fa0511d
| 23,238
|
def make_search_summary(self, keyword, results, joiner="\n "):
"""Create string for str dunder of search classes."""
if self._n != 1:
appendix = "s"
verb = "have"
else:
appendix = ""
verb = "has"
s = f"Search '{self.query}' yielded {self._n:,} {keyword}{appendix}"
if results:
s += ":" + joiner + joiner.join(results)
elif self._n:
s += f", which {verb} not been downloaded"
return s
|
bc9534822c45ffb3eb8359ca3363c37b373919a6
| 23,240
|
def is_attendee_or_speaker(user, presentation):
"""
Return True if the user is either a speaker or atendee.
:param user: User instance
:param presentation: Presentation instance
"""
speakers = [x.user for x in presentation.speakers()]
registrants = presentation.proposal.registrants.all()
return user in speakers or user in registrants
|
57a40b65608983fc61b6735b77b4d2f75b8a9d20
| 23,242
|
def get_relative_path(path, locale):
"""Get relative path to repository file."""
locale_directory = locale.code
if 'templates' in path:
locale_directory = 'templates'
# Also check for locale variants with underscore, e.g. de_AT
underscore = locale.code.replace('-', '_')
if '/' + underscore + '/' in path:
locale_directory = underscore
return path.split('/' + locale_directory + '/')[-1]
|
8aaff5052274dd877c99f2b5bbf103e863c3d1ff
| 23,243
|
import re
def display_to_origin(display):
"""
from the display value, an stencila article version,
trim it to be the SWH origin value
e.g. for display value
https://elife.stencila.io/article-30274/v99/
return https://elife.stencila.io/article-30274/
"""
if not display:
return None
match_pattern = re.compile(r"^(https://elife.stencila.io/.*?/).*$")
return match_pattern.sub(r"\1", display)
|
1ff22328590dd2926a83406f82a2fd920b7e2f90
| 23,246
|
def _non_empty_lines(output):
"""Helper to turn a string into a list of not
empty lines and returns it.
"""
return [line for line in
output.splitlines() if line.strip()]
|
3034775bb1d629321f13417b2f843986dcdb6408
| 23,247
|
def _prefix_expand(prefix):
"""Expand the prefix into values for checksum computation."""
retval = bytearray(ord(x) & 0x1f for x in prefix)
# Append null separator
retval.append(0)
return retval
|
463930264c3ada545ce03e35e8e2502caf9348c9
| 23,248
|
def get_branch(g, node, visited=None):
"""Return the full list of nodes that branch *exclusively*
from the given node. The starting node is included in
the list.
"""
if visited is None:
visited = set()
visited.add(node)
branch = [node]
for succ in g.successors(node):
for p in g.predecessors(succ):
if p not in visited:
break
else:
branch.extend(get_branch(g, succ, visited))
return branch
|
21fde89ed3cc5eb9d4883e68e759a38386e32fce
| 23,249
|
import re
def smart_truncate(text, length=100, suffix='...'):
"""Truncates `text`, on a word boundary, as close to
the target length it can come.
"""
slen = len(suffix)
pattern = r'^(.{0,%d}\S)\s+\S+' % (length - slen - 1)
if len(text) > length:
match = re.match(pattern, text)
if match:
length0 = match.end(0)
length1 = match.end(1)
if abs(length0 + slen - length) < abs(length1 + slen - length):
return match.group(0) + suffix
else:
return match.group(1) + suffix
return text
|
3cce932b3a4e32c3aa83ddddc61f4dd660b333e1
| 23,250
|
import time
def timeit(func):
"""
Simple wrapper to time a function. Prints the execution time after the
method finishes.
"""
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
print('Execution Time ({0}): {1:.5f} seconds'.format(
func.__name__, end_time - start_time))
return result
return wrapper
|
37a657ac013739329a84b619153fdfa781181bd8
| 23,251
|
import os
def results_not_combined(path_run_ABC, files_sim_results):
"""
Return false if the combined results file hasn't already been created or it doesn't have all of the simulations.
:param path_run_ABC: full or relative path to directory to run ABC in.
:param files_sim_results: list of full paths of simulation results files.
:return: True or False
"""
file_sim_combined_name = '{}/results_combined.txt'.format(path_run_ABC)
if os.path.isfile(file_sim_combined_name):
sim_num = len(files_sim_results)
line_num = sum(1 for line in open(file_sim_combined_name))
if line_num >= sim_num:
return False
else:
return True
else:
return True
|
9a455e2c5b8446374a34b462b7e84ba116aee9c6
| 23,252
|
def check_repeat_x(V):
"""
Check if exists repeated x value
Parameters
----------
V : dict
dictionary which contains X and Y values
Returns
-------
bool
Returns True if there are repeated x's values or
False if there are no repeated x's values
"""
xlabel, _ = V.keys()
return not(len(V[xlabel]) == len(set(V[xlabel])))
|
e840a596353fc01523f94ba9fc03d7940206b01a
| 23,253
|
from typing import Dict
import jinja2
def render(path: str, template_name: str, parameters: Dict[str, str]) -> str:
"""Returns a rendered Dockerfile.
path indicates where in the filesystem the Dockerfiles are.
template_name references a Dockerfile.<template_name> to render.
"""
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(path), undefined=jinja2.StrictUndefined
)
template = "Dockerfile"
if template_name is not None:
template = "Dockerfile.{}".format(template_name)
return env.get_template(template).render(parameters)
|
11850e6a093eb72d970462745184946e9500c440
| 23,255
|
def goal(state):
"""Returns whether all node are in a district."""
return state[1] == []
|
adf2076fd4ac40b1e21bf8866b7c7374118c035e
| 23,256
|
from typing import Dict
import random
def randomValue(interval: Dict[str, int]) -> int:
"""Generate a random integer value from a given interval."""
if not isinstance(interval, dict):
raise ValueError('value has to be dict')
return random.randrange(interval['min'], interval['max'], 1) // 1
|
d2445ab2127065fa5586080270757e9049246a6d
| 23,262
|
def _categorizeVector(v):
"""
Takes X,Y vector v and returns one of r, h, v, or 0 depending on which
of X and/or Y are zero, plus tuple of nonzero ones. If both are zero,
it returns a single zero still.
>>> _categorizeVector((0,0))
('0', (0,))
>>> _categorizeVector((1,0))
('h', (1,))
>>> _categorizeVector((0,2))
('v', (2,))
>>> _categorizeVector((1,2))
('r', (1, 2))
"""
if not v[0]:
if not v[1]:
return '0', v[:1]
else:
return 'v', v[1:]
else:
if not v[1]:
return 'h', v[:1]
else:
return 'r', v
|
3532e4920eb7d58aca2dbf360cea6939d94ab730
| 23,263
|
def get_comp_mandatory_depends(comp_info, comps):
""" Get comp mandatory depends from comp index """
depends = []
for comp in comps:
if comp in comp_info:
depends += comp_info[comp]["dependencies"]
# print("add mandatory depend:", comp_info[comp]["dependencies"], "for", comp)
if depends:
depends += get_comp_mandatory_depends(comp_info, depends)
return list(set(depends))
|
6a24a9c8a753329667993d58b0ccbf1ec108e5aa
| 23,264
|
def _NormalizeString(string):
"""Normalizes a string to account for things like case."""
return string.strip().upper() if string else None
|
24b8c525df9b080716119ee013cf45eb8c7b892a
| 23,267
|
def calculate_position_and_depth(input: list[list[str]]) -> tuple[int, int]:
"""Function to horizontal and position from direction instructions."""
horizontal_location = 0
depth = 0
aim = 0
for x in input:
instruction = x[0]
magnitude = int(x[1])
match instruction:
case "forward":
horizontal_location += magnitude
depth += aim * magnitude
case "down":
aim += magnitude
case "up":
aim -= magnitude
case _:
raise ValueError(f"unexpected instruction; {instruction}")
return horizontal_location, depth
|
ba304427d3471eb7237c99c78b6cf9b42b496fa2
| 23,268
|
def mock_config_external():
"""Mock the Config class such that it returns self.param.
In addition, querying for encoder results in a dict that contains
an empty list in the key self.param.
"""
class MockConfig:
def __init__(self, param):
self.param = param
self.encoder = {self.param: []}
self.post = [{self.param: []}]
def get(self, name):
# Maybe we should write a fake config file but there are
# Huge issues with mocking the config module...
if name == 'encoder':
return self.encoder
if name in ('post_rip', 'post_encode', 'post_finished'):
return self.post
return self.param
return MockConfig
|
0d135ba471ceeaa60aed2bc1b13c5b2eacc775db
| 23,270
|
import time
def get_my_posts(api):
"""Retrieve all posts from own profile """
my_posts = []
has_more_posts = True
max_id = ''
while has_more_posts:
api.getSelfUserFeed(maxid=max_id)
if api.LastJson['more_available'] is not True:
has_more_posts = False # stop condition
max_id = api.LastJson.get('next_max_id', '')
my_posts.extend(api.LastJson['items']) # merge lists
time.sleep(2) # slows down to avoid flooding
if has_more_posts:
print(str(len(my_posts)) + ' posts retrieved so far...')
print('Total posts retrieved: ' + str(len(my_posts)))
return my_posts
|
343f3ed55c751311a3a1de2188055f4b342624f5
| 23,271
|
def median_iqr(series):
"""
The interquartile range (Q3-Q1) and median are computed on a pandas series
:param df:
:return:
"""
iqr_median = [.25, .5, .75]
series = series.quantile(iqr_median)
iqr = series.iloc[2] - series.iloc[0]
median = series.iloc[1]
return median, iqr
|
095f4d33fd4069cf888eedbfb3570099fc592772
| 23,272
|
def zerocross(eigenvec):
"""
Compute the amount of zero-crossing of an eigenvector matrix (for each eigenvector).
Parameters
----------
eigenvec : numpy.ndarray
The eigenvectors from a decomposition.
Returns
-------
numpy.ndarray
A 1D array with the amount of zero-crossing for each eigenvector.
"""
return (eigenvec[:-1, ...] * eigenvec[1:, ...] < 0).sum(axis=0)
|
79fd04940261167336c088027a3550abad45a464
| 23,273
|
def compare_two_values(obj1, obj2):
"""
Args:
obj1:object1 It can be a data type in Python,
and can be converted by using the str() method
obj2:object2 same as obj1
Returns: True or False
"""
# With the help of the str() method provided by python,It's so powerful
return obj1 == obj2 or (isinstance(obj1, type(obj2)) and
"".join(sorted(str(obj1))) == "".join(sorted(str(obj2))))
|
52df52776a8c86a06c38f54c171828ef420dad8e
| 23,274
|
import os
def tests_pins(request):
"""
Need to improve this logic for supplying test pins
using @pytest.mark.parametrize
"""
_, file_name = os.path.split(request.module.__file__)
if file_name == "test_dcpower.py": # overriding the pin_select as it is inside dcpower module
# for SMU driver i.e. nidcpower Testing
smu_system_pins = ["SMU_VI_VCC"]
input_dut_pins = ["SMU_VI_V_In"]
output_dut_pins = ["SMU_VI_V_Out"]
all_smu_pins = ["SMU_PG_Logic"] # pin group name
pins_selected = [smu_system_pins, input_dut_pins, output_dut_pins, all_smu_pins]
elif file_name == "test_digital.py":
# for Digital pattern instrument driver i.e. nidigital Testing
input_dut_pins = ["DPI_DO_SCL", "DPI_DO_SDA"]
output_dut_pins = ["DPI_DI_SCL", "DPI_DI_SDA"]
all_dut_pins = input_dut_pins + output_dut_pins
dpi_system_pins = ["DPI_PM_VDD", "DPI_PM_VDDIO"]
pins_selected = [input_dut_pins, output_dut_pins, all_dut_pins]
elif file_name == "test_scope.py":
# for scope driver i.e. niscope testing
input_dut_pins = ["OSC_xA_P_In"]
output_dut_pins = ["OSC_xA_P_Out"]
all_dut_pins = input_dut_pins + output_dut_pins
pins_selected = [input_dut_pins, output_dut_pins, all_dut_pins]
elif file_name == "test_daqmx.py":
# for daqmx driver i.e. nidaqmx testing
pins_selected = [["DAQ_Pins1"], ["DAQ_Pins2"]]
elif file_name == "test_abstract.py":
# for daqmx driver i.e. niabstract testing
pins_selected = [["BUCK_TLOAD_CTRL"], ["eN_Digital"]]
elif file_name == "test_fgen.py":
# for function generator driver i.e. nifgen testing
input_dut_pins = ["FGN_SI_SGL_In"]
pins_selected = [input_dut_pins]
elif file_name == "test_switch.py":
# for function generator driver i.e. niswitch testing
input_dut_pins = ["Pin1", "Pin2", "Pin3", "Pin4", "Pin5", "Pin6", "Pin7", "Pin8", "Pin9"]
pins_selected = input_dut_pins
elif file_name == "test_fpga.py":
# for function generator driver i.e. nifpga testing
input_dut_pins = ["RIO_Pins"]
pins_selected = [input_dut_pins]
elif file_name == "test_dmm.py":
# for function generator driver i.e. nifpga testing
input_dut_pins = ["CH0"]
pins_selected = [input_dut_pins]
else:
pins_selected = ["dummy", "pins", "to_fail"]
return pins_selected
|
425d32d1a5cd4994bb6c5b12e79886a070bf19b5
| 23,275
|
import io
import re
def outside(*exceptions):
"""
A decorator which allows to apply the transformation only to areas where
a set of given regular expressions does not match. Here, this is mostly
used to apply deobfuscations only to code outside of strings.
"""
exclusion = '|'.join(F'(?:{e})' for e in exceptions)
def excluded(method):
def wrapper(self, data):
with io.StringIO() as out:
cursor = 0
for m in re.finditer(exclusion, data, re.DOTALL):
out.write(method(self, data[cursor:m.start()]))
out.write(m[0])
cursor = m.end()
out.write(method(self, data[cursor:]))
return out.getvalue()
return wrapper
return excluded
|
fc7a0bf3fb004b87e70202af340634704a5380a2
| 23,276
|
def calculate_tdew_from_rh(rh, T, temperature_metric="celsius", verbose=False):
"""Calculate dew point temperature from relative humidity and temperature.
Args:
rh (pd series): air relative humidity in %
T (pd series): air temperature in °C
temperature_metric (str, optional): Input temperature unit. Defaults to "celsius".
Returns:
pandas series: dew point temperature timeseries (in °C or K)
"""
if verbose:
print(
"Calculating dew point temperature (dewp_temp) from relative humidity and temp."
)
if temperature_metric != "celsius":
if verbose:
print("Assuming input temperature unit is Kelvin for rh calculation.")
T = T - 273 # K to °C
# inspired from humidity.to.dewpoint in:
# https://github.com/geanders/weathermetrics/blob/master/R/moisture_conversions.R
Tdew = (rh / 100) ** (1 / 8) * (112 + (0.9 * T)) - 112 + (0.1 * T) # in °C
if temperature_metric != "celsius":
Tdew = Tdew + 273 # °C to K
return Tdew
|
079e4a871e8343378e8d0e95dc3ddeed9366e874
| 23,277
|
import os
def get_file_name(file):
"""Return the file name without its extension"""
base = os.path.basename(file)
return os.path.splitext(base)[0]
|
1b872b160e5b344b20f0184930016180b6d57e74
| 23,278
|
def to_pc(val):
"""Convert float value in [0, 1] to percentage"""
return r'%.2f\%%' % (val * 100)
|
ca6b59b437537beef85018089c6448572ece2b32
| 23,279
|
def simpson_index(species_num_array):
"""Calculate the Simpson's Diversity Index: 1 - ∑pi**2
The Simpson index is a dominance index because it gives more weight to
common or dominant species. In this case, a few rare species with only
a few representatives will not affect the diversity. p is the proportion
(n/N) of individuals of one particular species found (n) divided by the
total number of individuals found (N). The value of this index ranges
between 0 and 1, the greater the value, the greater the sample
diversity.
Args:
species_num_array: An array that store the number of different kind
of species.
Returns:
Simpson's diversity index of this population.
"""
ratio_ = species_num_array / species_num_array.sum()
simpson_index_diversity = 1 - sum(ratio_**2)
return float('%0.4f' % simpson_index_diversity)
|
22c0c2074a2d389ef6e245ce0b3aa27aea0590b5
| 23,280
|
def can_embed(bin_msg: str, width: int, height: int) -> bool:
"""Determines whether the image can hold the message.
Parameters:
-----------
bin_msg:
string: A string of 1's and 0's representing the characters in the msg.
width:
int: The width of the image.
height:
int: The height of the image.
Returns:
--------
embed
boolean: States whether the message can fit in the specified image.
"""
embed = len(bin_msg) + 8 < 3 * width * height # + 8 for terminating 00000000
return embed
|
8489994fe239a920ece07dd69d97e85b9bc7ce60
| 23,281
|
def addition(*args):
"""Addition an inifite number of integer arguments
Args:
number (int): Numbers to addition
Returns:
int: The result of the addition
"""
result = 0
for arg in args:
result += int(arg)
return result
|
0c0f9bc333cbcb1ce55e692cd2f6c1bb07461396
| 23,282
|
def web_template_to_frontend_top_message(template: dict) -> dict:
"""Transforms a frontend top level message to the web template."""
top_level_message = {}
top_level_message['header_content'] = str(template['header']).strip()
menu_items = list(template['menuItems'])
top_level_message['top_level_options'] = []
for idx, menu_item in enumerate(menu_items):
title = str(menu_item['title']).strip()
content = str(menu_item['content'])
top_level_option = {
'position': idx,
'title': title,
'content': content,
}
secondary_options = []
for idx, secondary_option in enumerate(menu_item['footerItems']):
content = str(secondary_option)
secondary_options.append({
'position': idx,
'content': content,
})
top_level_option['secondary_options'] = secondary_options
top_level_message['top_level_options'].append(top_level_option)
return top_level_message
|
fa96d2708a8b503064f8eb84d1ea385780d092e5
| 23,285
|
def precision_single_class(correctly_assigned, total_assigned):
"""
Computes the precision for a single class
:rtype : float
:param correctly_assigned: Samples correctly assigned to the class
:param total_assigned: Total samples assigned to the class
:return: The precision value
"""
# simply returning the precision value
return float(correctly_assigned) / float(total_assigned)
|
49ab9693c4b0a59384a55b89e9ecd45dbe1da028
| 23,286
|
def catH(mat1, mat2):
"""Concatenate 2 matrices (horizontal concatenation)"""
return mat1.catH(mat2)
|
fc2ce20e71ff4f594deea2c59a59c56d41daed38
| 23,287
|
def get_bucket_ix(seq_length, bucket_range):
"""
Returns index of a bucket for a sequence with q given length when bucket range is bucket_range
Args:
seq_length: lengh of sequence
bucket_range: range of bucket
Returns: index of a bucket
"""
return seq_length // bucket_range + (1 if seq_length % bucket_range != 0 else 0)
|
705dfbfeeb87adb7b5f39d6b1db46b380d58b276
| 23,288
|
def parse_response(response):
"""
Utility function to parse a response into a list.
"""
elements = []
for element in response['responses'][0]['labelAnnotations']:
elements.append(element['description'].capitalize())
return elements
|
f8b40a43ad00af68d5d13d3b78f00f33cec85270
| 23,289
|
def _predict_estimator(clf, X):
"""Helper tor predict"""
return clf.predict(X)
|
d194879a1f2664d068444e4215c61db2f5efae30
| 23,290
|
import os
def check_cache(cache_path):
"""Check if this file is in the cache.
:param cache_path: `str` The file to check in the cache
:returns: `bool` True if in the cache, False otherwise.
"""
return False if cache_path is None else os.path.exists(cache_path)
|
ed2a745932dc8bb50692fea0860c81caa701120f
| 23,291
|
def billing_mode_from_summary(billing_mode_summary):
"""Extract BillingMode field from BillingModeSummary object."""
return billing_mode_summary["BillingMode"]
|
9ada170f42e1f0f1eec3378a5f25f0fc884b033e
| 23,292
|
def process_image(OcrService, blob_path):
"""
Function to run OCR against an input blob, and return the text as a string.
"""
ocr_analysis = OcrService.get_ocr_results(blob_path)
output_text = OcrService.format_ocr_text(ocr_analysis)
return output_text
|
f95fc422c216c98356b222cf82e3aec3a748c4d7
| 23,293
|
def cartesian(lst1, lst2):
"""Return cartesian of 2 lists"""
lst = []
for item1 in lst1:
for item2 in lst2:
lst.append([item1, item2])
return lst
|
57bf192770d65143b0cf6c669b18c3baee6cd360
| 23,294
|
def sample_category(address_book, CategoryFactory):
"""A sample event category to be used in tests."""
return CategoryFactory(address_book, u'calendar-export-test')
|
79cb5236d1c8bcbe70d1c25b7da67abf60c1bebc
| 23,295
|
import re
def check_column_name(column_name):
""" 检查一列是否是温度相关列
:param column_name:
:return: BOOL
"""
# 2019年之前的数据,如P1_T,代表不覆膜温度
pat1 = re.compile(r'P\d+_T', re.I)
# 2019年之前的数据,如P11m_T,代表覆膜温度
pat2 = re.compile(r'P\d+m_T', re.I)
# 2020年的数据,如P1_8,代表不覆膜温度
pat3 = re.compile(r'P\d+-\d+', re.I)
# 2020年的数据,如P1_8m,代表覆膜温度
pat4 = re.compile(r'P\d+-\d+m', re.I)
if (pat1.match(column_name) is not None or
pat2.match(column_name) is not None or
pat3.match(column_name) is not None or
pat4.match(column_name) is not None):
return True
else:
return False
|
63d501c028beb12be983892383873ffb166e387f
| 23,297
|
def is_null_str(value):
"""
Indicate if a string is None or 'None' or 'N/A'
:param value: A string value
:return: True if a string is None or 'None' or 'N/A'
"""
return not value or value == str(None) or value == 'N/A'
|
e8edb22c77ddf712a039f92529d453b7a4947173
| 23,298
|
import math
def solveQuad(a, b, c):
"""
Solve a quadratic equation. Returns a list of solutions from length 0 to 2
:param a:
:param b:
:param c:
:return:
"""
discriminant = (b ** 2) - (4 * a * c)
divisor = 2 * a
if discriminant < 0.0:
return []
elif divisor == 0.0:
if b == 0.0:
return []
else:
return [-c / b]
elif discriminant > 0.0:
sdiscriminant = math.sqrt(discriminant)
return [(-b - sdiscriminant) / divisor,
(-b + sdiscriminant) / divisor]
else:
return [-b / divisor]
|
199041cb7a6e6da8787511e29bdfbcee7d73640b
| 23,301
|
def is_point_inside_object(obj, obj_BVHtree, point):
""" Checks whether the given point is inside the given object.
This only works if the given object is watertight and has correct normals
:param obj: The object
:param obj_BVHtree: A bvh tree of the object
:param point: The point to check
:return: True, if the point is inside the object
"""
# Look for closest point on object
nearest, normal, _, _ = obj_BVHtree.find_nearest(point)
# Compute direction
p2 = nearest - point
# Compute dot product between direction and normal vector
a = p2.normalized().dot((obj.rotation_euler.to_matrix() @ normal).normalized())
return a >= 0.0
|
d838aece6338858fc4d7dd945418bd6db3a48c42
| 23,305
|
def preparation_time_in_minutes(layers):
"""Calculate prep time
:parm layers: Number of layers in the cake
:return: int Total number of minutes to prepare the cake
"""
return 2 * layers
|
b088c09e5ea43d6fc924d9aecb6b90d91c46fcf2
| 23,306
|
def get_soup_tables_on_single_table(soup, table_tag_value, table_tag="class", columns_to_keep=[]):
""" Search for all the objects in the current soup object with this specific HTML dom and tag value"""
""" ie: get_soup_tables_on_single_table(soup, "tabla-123", [0,1]) """
new_table_array = []
tables_array = soup.find_all("table", attrs={table_tag: table_tag_value})
for current_table in tables_array:
table_body = current_table.find('tbody')
table_rows = table_body.find_all('tr')
table_rows.pop(0) # Remove the column names
table_rows.pop(-1) # Remove the last column names
for row in table_rows:
if not columns_to_keep:
new_table_array.append(row)
else:
new_row = []
table_fields = row.find_all('td')
for index in columns_to_keep:
if table_fields[index]:
new_row.append(table_fields[index])
new_table_array.append(new_row)
return new_table_array
|
07757cee220297d742fe23804c9a14647f3d0ab1
| 23,307
|
def const(input, **params):
"""
Return constant value
:param input:
:param params:
:return:
"""
PARAM_CONSTANT_VALUE = 'value'
return params.get(PARAM_CONSTANT_VALUE)
|
514f4475592e89d9e2ec80f8f695f763456b5f9e
| 23,310
|
import torch
def pred_label(output, maxk = 3):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
batch_size = output.size(0)
_, pred = output.topk(maxk, 1, True, True)
return pred
|
3d36b671096b4a2402db9dd7ba798a8630c6d7d9
| 23,311
|
import argparse
def create_arg_parser():
""""Creates and returns the ArgumentParser object."""
parser = argparse.ArgumentParser(description='Description of Huffman encoding/deconding.')
parser.add_argument('-mode','-m','-M', choices=['e','d'], help='Mode (Encoding or Decoding).', required=True)
parser.add_argument('-output','-o','-O', help='Path to store result files.', default='./')
parser.add_argument('-file','-f','-F', help='Path to the image file or encoded file.')
parser.add_argument('-color','-c','-C', choices=['rgb','gray'], help='Color (rgb or gray).', default='gray')
return parser
|
e72390318f1e5b5831be26d5647bea3e0d341cf4
| 23,312
|
def get_product_from_time_scale(time_scale):
"""
get the the USGS nwis product that is appropriate for the time scale
:param time_scale: str - Pandas like time string for the time scale at which
the data will be aggregated (e.g., 'H' for hour or 'D' for daily)
:return:
"""
iv_scales = ['15T', 'T', 'H']
dv_scale = ['D']
if time_scale in iv_scales:
return 'iv'
elif time_scale in dv_scale:
return 'dv'
else:
raise ValueError("time scale must be '15T', 'T', 'H', or 'D'")
|
425b0cbec0b20b79493dd53805cf3fd6f31fae95
| 23,313
|
def identity(arg):
"""
This function simply returns its argument. It serves as a
replacement for ConfigParser.optionxform, which by default
changes arguments to lower case. The identity function is a
better choice than str() or unicode(), because it is
encoding-agnostic.
"""
return arg
|
a5a5adfbc87ec25619eb4540dda995e49a03ba7a
| 23,314
|
def node2entity_labels(node):
"""
Ignores width 1.
"""
entity_labels = []
def func(node, pos=0):
if isinstance(node.parse, str):
# if not ignore_unary:
# entity_labels.append((pos, 1, node.label))
return 1
sofar = 0
for x in node.parse:
xsize = func(x, pos + sofar)
sofar += xsize
size = sofar
entity_labels.append((pos, size, node.label))
return size
func(node)
return entity_labels
|
bef2149ecffab63004eb4b99c810a43f2189980f
| 23,315
|
from pathlib import Path
import argparse
def exists(input_file: str) -> Path:
"""Check if the input file exists."""
path = Path(input_file)
if not path.exists():
raise argparse.ArgumentTypeError(f"{input_file} doesn't exist!")
return path
|
33fa9cddcc7a33d9b943f657658a1ada5fb7d832
| 23,316
|
def _do_request(client, method, path, data=None, query=None):
"""Make a request to the endpoint with `data` in the body.
`client` is a flask test client
`method` is the request method
`path` is the path of the request
`data` a dictionary containing body arguments for non-GET methods that will
be converted to JSON -- the server will expect valid json, but we want to
write test cases with invalid input as well.
`query` a dictionary containing query arguments (ie - those after the ?)
for the GET method.
"""
# The arguments for this method are documented at:
# http://werkzeug.pocoo.org/docs/0.11/test/#werkzeug.test.EnvironBuilder
return client.open(method=method, path=path, query_string=query, data=data)
|
b9409ad20674f5fc21c2e21e0580d5c837272767
| 23,317
|
def max_in_array(arr):
"""
Funkce nalezne největší hodnotu v poli předaném jako parametr.
Parametry:
----------
arr - Pole čísel k prohledání
Vrací:
------
maximální hodnotu
"""
if len(arr) == 0:
return -1
maxNum = arr[0]
index = 0
for i, element in enumerate(arr):
if maxNum < int(element):
maxNum = int(element)
index = i
return maxNum, index
|
18efae951c568cc9ffbb5e3bc9a4f12454686372
| 23,318
|
def convert(nano: int) -> str:
"""Convert nano seconds to a formatted string."""
kilo, mega, giga = 1e3, 1e6, 1e9
if nano < kilo:
return f"{nano} ns"
if nano < mega:
return f"{nano / kilo:.2f} µs"
if nano < giga:
return f"{nano / mega:.2f} ms"
return f"{nano / giga:.2f} s"
|
f54cc89243766bb54bf0c8e8d54089f549a9dc3d
| 23,319
|
import sys
import os
def buildPropertyTypePath(propertyTypeNameString:str,targetProjectPath:str = ''):
"""
buildPropertyTypePath(propertyTypeNameString:str,parentRQLClassName:str = '',targetProjectPath:str = '')
is a function to create new propertyType class file paths. The created path will be in the path:
RiskQuantLib.Property. If path already exists, it won't be overwritten.
Parameters
----------
propertyTypeNameString : str
The propertyType name that you want to create attribute type class by.
targetProjectPath : str
The location of RiskQuantLib project where you want to create propertyType class.
Returns
-------
filePath : str
"""
# create a dictionary path to hold new python script.
c_propertyTypeNameString = propertyTypeNameString[0].capitalize()+propertyTypeNameString[1:]
if targetProjectPath == '':
targetProjectPath = sys.path[0]+os.sep+'RiskQuantLib'
else:
pass
# find type class path
filePath = targetProjectPath+os.sep+'Property'+os.sep+c_propertyTypeNameString+os.sep+propertyTypeNameString+'.py'
# create type class base dictionary
filePathWD = "".join([i+os.sep for i in filePath.split(os.sep)[:-1]]).strip(os.sep)
if os.path.exists(filePathWD):
with open(filePathWD + os.sep + '__init__.py', 'w+') as f:
f.truncate() # clear all contents
else:
os.mkdir(filePathWD)
with open(filePathWD+os.sep+'__init__.py', 'w+') as f:
f.truncate() # clear all contents
# add type path to pathObj
pathObjPath = targetProjectPath+os.sep+'Build'+os.sep+'pathObj.py'
# write file path
with open(pathObjPath, 'r') as f:
content = f.read()
if content.find('#-<attributeTypeDictBegin>') == -1 or content.find('#-<attributeTypeDictEnd>') == -1:
print("Source file must have a #-<Begin> and #-<End> tag to be built")
exit(-1)
former = content.split('#-<attributeTypeDictBegin>')[0]
middle = content.split('#-<attributeTypeDictBegin>')[-1].split('#-<attributeTypeDictEnd>')[0]
ender = content.split('#-<attributeTypeDictEnd>')[-1]
add_code = r''' attributeTypeDict["'''+c_propertyTypeNameString+'''"] = "'''+filePath.split('RiskQuantLib')[-1].strip(os.sep).replace(os.sep,'" + os.sep + "')+'''"'''
newContent = former + '#-<attributeTypeDictBegin>\n' + middle.strip('\t').strip(' ') + add_code + '\n #-<attributeTypeDictEnd>' + ender
with open(pathObjPath, 'w') as f:
f.truncate() # clear all contents
f.write(newContent.strip(' ').strip('\t\n'))
return filePath
|
3ea1160223d65deecc33f96c041d17f48ee920d4
| 23,320
|
def int_to_binary(int_num):
""" Converts int to binary """
return bin(int_num)
|
9d72474454b0cdd8dee6cfecc3fba02410cdf959
| 23,322
|
def csv_to_json(csv_data):
"""Converts a Matrix to an Array of Json's"""
json_records = []
try:
json_keys=csv_data[0]
for csv_row in csv_data[1:]:
csv_dict = dict()
index = 0
for key in json_keys:
if not csv_row[index]:
field = ""
else:
field = csv_row[index]
csv_dict.update({key: field})
index += 1
json_records.append(csv_dict)
except Exception as e:
print(e)
return json_records
|
06f9c606944c8626e6ddb18149ea3e43d1d19252
| 23,324
|
from typing import Sequence
from typing import List
def read_present_files(paths: Sequence[str]) -> str:
"""Read the content of those files that are present."""
contents: List[str] = []
for path in paths:
try:
with open(path, "r") as f:
contents += ["\n".join(map(str.strip, f.readlines()))]
except FileNotFoundError:
continue
return "\n\n".join(contents)
|
7dce316db22405e8482b8bbd2890b2213a931cfd
| 23,325
|
import os
import yaml
def load_era5_tables():
"""Return tables of variable names.
Returns:
tables (dict): Dictionary containing the table information.
tables['header_row'] contains the table header row as a list of strings,
tables['rows'] contains a list of lists which make up the main table
content, and
tables['caption'] contains the table caption.
common_table_header (iterable of strings): The unordered header columns common
to all tables.
"""
tables_dir = os.path.join(os.path.dirname(__file__), "tables")
with open(os.path.join(tables_dir, "common_table_header.yaml"), "r") as f:
common_table_header = yaml.safe_load(f)
with open(os.path.join(tables_dir, "tables.yaml"), "r") as f:
tables = yaml.safe_load(f)
return tables, common_table_header
|
e2b0de7b1f89eadd96dca2b2febe6a8b6a2a0e15
| 23,328
|
def mocked_check_call(args, cwd):
""""
mock subprocess.check_call
"""
return True
|
c777a2f1e22a0d15320e1debb768a0b874328f61
| 23,329
|
def remapping_id(file_, start_index, node_type, separator="\t"):
"""Mapp the ID and name of nodes to index.
"""
node_types = []
id2index = {}
name2index = {}
index = start_index
with open(file_, encoding="ISO-8859-1") as reader:
for line in reader:
tokens = line.strip().split(separator)
id2index[tokens[0]] = str(index)
if len(tokens) == 2:
name2index[tokens[1]] = str(index)
node_types.append((str(index), node_type))
index += 1
return id2index, name2index, node_types
|
6e84a2a52a4a0f1ee01b6b02388c0a7cec618393
| 23,330
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.