content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def to_ordinal(number):
"""Return the "ordinal" representation of a number"""
assert isinstance(number, int)
sr = str(number) # string representation
ld = sr[-1] # last digit
try:
# Second to last digit
stld = sr[-2]
except IndexError:
stld = None
if stld != '1':
if ld == '1':
return sr + 'st'
if ld == '2':
return sr + 'nd'
if ld == '3':
return sr + 'rd'
return sr + 'th'
|
5974aa3abf05c9e200ec1d6fc05bdecc231d2b22
| 25,390
|
def mutate(codon, alt, index):
"""
Replace (mutate) a base in a codon with an
alternate base.
Parameters
----------
codon : str
three letter DNA sequence
alt : str
alternative base
index : int
index of the alt base in codon (0|1|2).
Returns
-------
str
codon with alternative base
Raises
------
AssertionError
error if index is not valid (0|1|2)
AssertionError
error if base is not valid (A|T|C|G)
"""
assert index in [0,1,2], "Not a valid index."
assert alt in ["A", "T", "C", "G"], "Not a valid base."
return "".join([alt if i == index else b for i,b in enumerate(codon)])
|
6db054a599846e104aa7bc8a4c565d37dad3b56e
| 25,393
|
import functools
def vec2int (vec):
"""Converts `vec`tor to an integer.
Each element in `vec` represents a bit in the integer. The bit is
`1` if the element is greater than zero, otherwise the bit is `0`.
"""
return functools.reduce(lambda n, elem: (n << 1) | (elem > 0), vec, 0)
|
e1f077d1250a698432fb2a330ce1bfa614b4772e
| 25,394
|
def sumOfLeftLeaves(self, root): # ! 利用深度优先搜索算法
"""
:type root: TreeNode
:rtype: int
"""
self.sum = 0
def depthTral(node): # ! 深度优先遍历
if node:
if node.left is not None and node.left.right is None and node.left.left is None:
self.sum += node.left.val
depthTral(node.left)
depthTral(node.right)
depthTral(root)
return self.sum
|
0260273bf1d72071b523c4204b8094affcd2e240
| 25,395
|
def rel_error(deriv, orig):
"""
Relative Error.
Calculating the relative error after approximating the
derivative.
Parameters:
deriv : approximation
orig : actual value
Returns: Relative Error
"""
return abs(orig - deriv) / abs(orig)
|
f0322c2d4ea5f66c28492e81424f053177d9133b
| 25,396
|
import calendar
def get_date_label(time):
"""Returns a nice label for timestamped months-years"""
split = [int(x) for x in time.split("-")]
return f"{calendar.month_abbr[split[1]]} {str(split[0])}"
|
6495818e104e88f66119ab2cfdfcf6ac1756bc0d
| 25,397
|
def _modify_payloads(checks, values, clear):
"""
Set or add payloads to checks.
:param checks: list of check instance
:param values: list of payloads with keys
:param clear: boolean flag if clearing the predefined payloads
:return: list of check instance
"""
for key, payloads in values.items():
for check in checks:
if check.key == key:
if clear:
check.set_payloads(payloads)
else:
check.add_payloads(payloads)
return checks
|
799c93803131f32cfa6a7524d1ed20873c129f27
| 25,398
|
import os
def __listdir(directory:str,extensions:list)->list: #list files with specified extensions (filter for tif/png/jpeg etc)
"""Returns a list of files that match the given extensions .
Args:
directory (str): [description]
extensions (list): [description]
Returns:
list: [description]
"""
Files = os.listdir(directory) #list all files
files = []
for file in Files:
if (file.lower().rsplit('.',1)[1] in extensions) or ('all' in extensions): #find extension and check membership in requested
files.append(file)
return files #return file names that match requested extensions
|
db68d385a0d1bc6f753f6c9bdb9e50db078bc2eb
| 25,399
|
import ntpath
import os
import subprocess
import sys
import shutil
def compress_corpus_gz(methods, indir, tmpdir):
"""
compress all files in folder 'indir' using each method
Parameters
----------
methods : list of str
names of compression executables
indir : str
folder with files to compress/decompress
tmpdir : str
temporary folder for storing files compress/decompress
"""
size = 0
for method in methods:
meth = ntpath.basename(method)
for lvl in range(1, 10):
for f in os.listdir(indir):
if not os.path.isfile(os.path.join(indir, f)):
continue
if f.startswith('.'):
continue
if not f.endswith('.zst') and not f.endswith('.gz') \
and not f.endswith('.bz2'):
fnm = os.path.join(indir, f)
size = size + os.stat(fnm).st_size
cmd = method + ' -f -k -' + str(lvl) + ' "' + fnm \
+ '"'
subprocess.call(cmd, shell=True)
# outnm=os.path.splitext(ntpath.basename(fnm))[0]
outnm = ntpath.basename(fnm)
fnm = fnm + '.gz'
if not os.path.isfile(fnm):
sys.exit('Unable to find ' + fnm)
outnm = os.path.join(tmpdir, meth + str(lvl) + '_'
+ outnm + '.gz')
shutil.move(fnm, outnm)
bytes_per_mb = 1000000
return size / bytes_per_mb
|
7c68322d3ad923779952c58e4968fbaf386c4365
| 25,401
|
def split(arr, splits=2):
"""Split given array into `splits` smaller, similar sized arrays"""
if len(arr) < splits:
raise ValueError("Can't find more splits than array has elements")
new_size = int(len(arr) / splits)
return ([arr[n * new_size:(n + 1) * new_size] for n in range(splits - 1)]
+ [arr[(splits - 1) * new_size:]])
|
13d75bd5a15013e4d91fab9cca2d21e8bcc5e5f8
| 25,402
|
def float_to_digits_list(number):
"""Convert a float into a list of digits, without conserving exponant"""
# Get rid of scientific-format exponant
str_number = str(number)
str_number = str_number.split("e")[0]
res = [int(ele) for ele in str_number if ele.isdigit()]
# Remove trailing 0s in front
while len(res) > 1 and res[0] == 0:
res.pop(0)
# Remove training 0s at end
while len(res) > 1 and res[-1] == 0:
res.pop(-1)
return res
|
34a0f2e54899d7410bc6a3035c0fb3d599b9b3eb
| 25,403
|
def wavelength_RGB(wlen):
""" wlen: wavelength in nm
needs single value, np.array fails
returns: (R,G,B) triplet of integers (0-255)
Credits: Dan Bruton http://www.physics.sfasu.edu/astro/color.html"""
# first pass at an RGB mix
if 380 <= wlen and wlen < 440:
red = (440 - wlen) / (440 - 380)
green = 0.0
blue = 1.0
elif 440 <= wlen and wlen < 490:
red = 0.0
green = (wlen - 440)/(490 - 440)
blue = 1.0
elif 490 <= wlen and wlen < 510:
red = 0.0
green = 1.0
blue = (510 - wlen) / (510 - 490)
elif 510 <= wlen and wlen < 580:
red = (wlen - 510)/(580 - 510)
green = 1.0
blue = 0.0
elif 580 <= wlen and wlen < 645:
red = 1.0
green = (645 - wlen)/(645 - 580)
blue = 0.0
elif 645 <= wlen and wlen < 780:
red = 1.0
green = 0.0
blue = 0.0
else:
red = 0.0
green = 0.0
blue = 0.0
# reduce brightness towards the extremes where our eyes are less sensitive
if 380 <= wlen and wlen < 420:
factor = 0.3 + 0.7 * (wlen - 380)/(420 - 380)
elif 420 <= wlen and wlen < 700:
factor = 1.0
elif 700 <= wlen and wlen < 780:
factor = 0.3 + 0.7 * (780 - wlen)/(780 - 700)
else:
factor = 0.0
gamma = 0.8
intensity_max = 255
R = int(intensity_max * (red * factor)**gamma)
G = int(intensity_max * (green * factor)**gamma)
B = int(intensity_max * (blue * factor)**gamma)
return (R,G,B)
|
53a8bd102b4d68ee5e58bdc3ae71be3ee5523a7b
| 25,404
|
def getListOfFileListsWithIdenticalHashes(files, justPropagateExistingHashes):
"""Get list of file lists where each inner list is a list of files with identical hashes.
"""
# If we have no files at all we return an empty list.
if len(files) == 0:
return []
# If we have just one file (for this size) we do not need to calculate the hash.
if len(files) == 1:
return [files]
# Create map from inode to entry with the most recent mtime.
# Also clear all outdated or questionable hashes.
# (Missing hashes will be recalulated (or copied) in the next step.)
inodeToEntry = {}
for entry in files:
if entry.inode not in inodeToEntry:
inodeToEntry[entry.inode] = entry
else:
if entry.mtime > inodeToEntry[entry.inode].mtime:
# Entries with newer mtime always have priority. Potential hashes of old entries are ignored (and cleared) since they are most likely outdated.
inodeToEntry[entry.inode].clearHash()
inodeToEntry[entry.inode] = entry
elif entry.mtime == inodeToEntry[entry.inode].mtime:
# Entries with identical size and mtime:
if entry.hasHash():
if inodeToEntry[entry.inode].hasHash():
if entry.hash != inodeToEntry[entry.inode].hash:
# Inconsistent hashes for the same inode, same size an the same mtime: This indicates trouble and is worth a warning.
# To be conservative we remove the hashes from both entries since we do not know which one to trust.
print("Warning: Inconsistent hashes for two files with the same inode, same size and same mtime: Will ignore and re-calculate hashes:")
entry.dump()
inodeToEntry[entry.inode].dump()
entry.clearHash()
inodeToEntry[entry.inode].clearHash()
else:
# Identical hashes for identical inodes and identical mtimes:
# We arbitrarily use the entry which is already in the map. It does not matter.
pass
else:
# Prefer entries which have a hash over those which do not have a hash.
inodeToEntry[entry.inode] = entry
else:
# Entry does not have a hash yet. It does not matter whether the entry in the map already has a hash or not.
# We arbitrarily keep the entry which is already in the map.
pass
else:
# entry.mtime < inodeToEntry[entry.inode].mtime:
# Ignore outdated entry and clear hash.
entry.clearHash()
# For --update do not calculate new hashes (yet). Just re-use existing hashes.
# Copy hashes from entries having the same inode, size and mtime.
if justPropagateExistingHashes:
for entry in files:
if not entry.hasHash():
if inodeToEntry[entry.inode].hasHash():
entry.setHashAndMtime(inodeToEntry[entry.inode].hash, inodeToEntry[entry.inode].mtime)
else:
if entry.hash != inodeToEntry[entry.inode].hash:
raise RuntimeError("Internal error: Inconsistent hashes!")
# Return None to make sure the result is not used (as a list), because the following code will generate invalid file lists (for example a list of all files which do not yet have a hash.).
return None
if len(inodeToEntry) > 1:
# Calculate missing hashes for all inodes which do not yet have a hash.
for (inode, entry) in inodeToEntry.items():
if not entry.hasHash():
entry.calcHash()
# Update the hashes of all files according to the map.
# Copy hashes from entries having the same inode, size and mtime.
for entry in files:
if not entry.hasHash():
entry.setHashAndMtime(inodeToEntry[entry.inode].hash, inodeToEntry[entry.inode].mtime)
else:
if entry.hash != inodeToEntry[entry.inode].hash:
raise RuntimeError("Internal error: Inconsistent hashes for different files pointing to the same inode!")
# Sort by hash, mtime and then inode
files = sorted(files, key = lambda x: (x.hash, x.mtime, x.inode))
# Split list into lists with the same hashes.
currentList = []
r = []
for entry in files:
if (len(currentList) > 0) and (entry.hash != currentList[0].hash):
# Emit currentList.
r.append(currentList)
# Create new list.
currentList = [entry]
else:
currentList.append(entry)
# Emit last currentList.
if len(currentList) > 0:
r.append(currentList)
return r
|
83276021ca5aece93f2d6f5c60c630f7f3e72460
| 25,405
|
def make_cmd_invocation(invocation, args, kwargs):
"""
>>> make_cmd_invocation('path/program', ['arg1', 'arg2'], {'darg': 4})
['./giotto-cmd', '/path/program/arg1/arg2/', '--darg=4']
"""
if not invocation.endswith('/'):
invocation += '/'
if not invocation.startswith('/'):
invocation = '/' + invocation
cmd = invocation
for arg in args:
cmd += str(arg) + "/"
rendered_kwargs = []
for k, v in kwargs.items():
rendered_kwargs.append("--%s=%s" % (k,v))
return ['./giotto-cmd', cmd] + rendered_kwargs
|
19b969dc5a6536f56ba1f004b5b1bdc97ca0812f
| 25,407
|
def Sparkline(values):
"""Returns an unicode sparkline representing values."""
unicode_values = '▁▂▃▄▅▆▇█'
if not values:
return ''
elif len(values) == 1:
# Special case a single value to always return the middle value instead of
# the smallest one, which would always be the case otherwise
return unicode_values[len(unicode_values) // 2]
min_value = min(values)
# Really small offset used to ensure top bin includes max(values).
value_range = max(values) - min_value + 1e-10
bucket_size = value_range / len(unicode_values)
bucketized_values = [int((v - min_value) / bucket_size) for v in values]
return ''.join(unicode_values[v] for v in bucketized_values)
|
99583b5b0f5308957d4d9b4b78473bdb4dd46c04
| 25,408
|
def f_call_1_1_1_kwds(a, /, b, *, c, **kwds):
"""
>>> f_call_1_1_1_kwds(1,2,c=3)
(1, 2, 3, {})
>>> f_call_1_1_1_kwds(1,2,c=3,d=4,e=5) == (1, 2, 3, {'d': 4, 'e': 5})
True
"""
return (a,b,c,kwds)
|
8ce4616af2ca6985590c705d291f3408efcb8d34
| 25,409
|
def get_required_availability_type_modules(scenario_id, c):
"""
:param scenario_id: user-specified scenario ID
:param c: database cursor
:return: List of the required capacity type submodules
Get the required availability type submodules based on the database inputs
for the specified scenario_id. Required modules are the unique set of
generator availability types in the scenario's portfolio. Get the list
based on the project_availability_scenario_id of the scenario_id.
This list will be used to know for which availability type submodules we
should validate inputs, get inputs from database , or save results to
database.
Note: once we have determined the dynamic components, this information
will also be stored in the DynamicComponents class object.
"""
project_portfolio_scenario_id = c.execute(
"""SELECT project_portfolio_scenario_id
FROM scenarios
WHERE scenario_id = {}""".format(
scenario_id
)
).fetchone()[0]
project_availability_scenario_id = c.execute(
"""SELECT project_availability_scenario_id
FROM scenarios
WHERE scenario_id = {}""".format(
scenario_id
)
).fetchone()[0]
required_availability_type_modules = [
p[0]
for p in c.execute(
"""SELECT DISTINCT availability_type
FROM
(SELECT project FROM inputs_project_portfolios
WHERE project_portfolio_scenario_id = {}) as prj_tbl
INNER JOIN
(SELECT project, availability_type
FROM inputs_project_availability
WHERE project_availability_scenario_id = {}) as av_type_tbl
USING (project)""".format(
project_portfolio_scenario_id, project_availability_scenario_id
)
).fetchall()
]
return required_availability_type_modules
|
fbcdb1954c0364dd967a82d3d5eb968597c1db0a
| 25,410
|
import torch
def ipca_transform(dataloader, components):
"""
Transform data using incremental PCA.
RH 2020
Args:
dataloader (torch.utils.data.DataLoader):
Data to be decomposed.
components (torch.Tensor or np.ndarray):
The components of the decomposition.
2-D array.
Each column is a component vector. Each row is a
feature weight.
"""
out = []
for iter_batch, batch in enumerate(dataloader):
out.append(batch[0] @ components.T)
return torch.cat(out, dim=0)
|
ed0cca90c0cfe2bd5cb2f4b0fed19aa320744410
| 25,411
|
def _size_in_blocks(size, block_size):
"""Return the size in terms of data blocks"""
return int((size + block_size - 1) / block_size)
|
8c596403a289dcedc19429685aeace204580fa68
| 25,412
|
def _position_is_valid(position):
"""
Checks if given position is a valid. To consider a position as valid, it
must be a two-elements tuple, containing values from 0 to 2.
Examples of valid positions: (0,0), (1,0)
Examples of invalid positions: (0,0,1), (9,8), False
:param position: Two-elements tuple representing a
position in the board. Example: (2, 2)
Returns True if given position is valid, False otherwiseself.
"""
return (True if isinstance(position, tuple) and
#check length of tuple
len(position) == 2 and
#check height
position[0] in range(3) and
#check width
position[1] in range(3)
else False)
|
8ac576f8bc856fdb587ade30d43ee86e7c7be1c1
| 25,413
|
import math
def number2degree(number):
"""
convert radius into degree
Keyword arguments:
number -- radius
return degree
"""
return number * 180 / math.pi
|
22a02199e45fa5f0b6314762d6f9c10ca530135b
| 25,416
|
def generate_numbers(partitions):
"""Return a list of numbers ranging from [1, partitions]."""
return list(range(1, partitions + 1))
|
8c1ad09496c4cbddb53c8b93d162d78e3e41b60e
| 25,417
|
def locate (transaction, sql, cursor) :
"""Locates an intersection using the nearest location lookup function in
the posgresql geospatial database containing the intersection data. Takes
a preset location specifier and a postgresql cursor linking to the database"""
result = { }
cursor.execute (sql, transaction)
row = cursor.fetchone ()
for item in zip (cursor.description, row):
name = item [0] [0]
value = item [1]
result [name] = value
return result
|
d87b2ea46bfc697fe7a47dad4ccaca082e819b49
| 25,418
|
import os
def check_for_ct(orthanc_id: str) -> bool:
"""
Checks to see if a study from orthanc is a CT scan or not
by seeing if there are multiple slices in the dicomdir
Args:
orthanc_id (str): the study id of the study from orthanc
Returns:
:bool: a boolean of whether or not it is a CT scan
"""
# Check to see if the dicomdir has multiple images
path = f'/tmp/{orthanc_id}'
return len(os.listdir(path)) > 1
|
4579480dfa0cb69223e7022a39f2b94950b1cd1f
| 25,419
|
def _optimal_SVHT_coef_sigma_unknown(beta):
"""Implement Equation (5)."""
return 0.56 * beta**3 - 0.95 * beta**2 + 1.82 * beta + 1.43
|
e5b14098495e767d1c68e749d5f4beb2eb238699
| 25,422
|
def yes_or_no(question):
"""Creates y/n question with handling invalid inputs within console
:param question: required question string
:type question: String
:return: True or False for the input
:rtype: bool
"""
while "the answer is invalid":
reply = str(input(question + " (y/n): ")).lower().strip()
if reply[0] == "y":
return True
if reply[0] == "n":
return False
|
e2ff75c0fdd40ef015ae703fc54d0e6d215fb0e5
| 25,424
|
def evaluate_ser_errors(entities, outputs):
"""
# For queries that the ser gets incorrect, group them into different types
:param entities: In the form [(5, 6, 'number')], list of entities for each query
:param outputs: PARSED mallard/duckling responses
:return:
"""
missed_entity_indices = []
incorrect_span_indices = []
correct_indices = []
for i, (entity_info, output) in enumerate(zip(entities, outputs)):
missed_entity = False
incorrect_span = False
for entity in entity_info:
span = (entity[0], entity[1])
entity_type = entity[2]
if entity_type not in output:
# Completely not predicted
if i not in missed_entity_indices:
missed_entity_indices.append(i)
missed_entity = True
else:
if span not in output[entity_type]:
if i not in incorrect_span_indices:
incorrect_span_indices.append(i)
incorrect_span = True
if not missed_entity and not incorrect_span:
correct_indices.append(i)
return missed_entity_indices, incorrect_span_indices, correct_indices
|
8ada4a3d91dc9381a7d91360e069fcb9aaceb12c
| 25,425
|
from datetime import datetime
import pytz
def date_block_key_fn(block):
"""
If the block's date is None, return the maximum datetime in order
to force it to the end of the list of displayed blocks.
"""
return block.date or datetime.max.replace(tzinfo=pytz.UTC)
|
33c4553704200e5355cd7d6807cd596192a2264b
| 25,428
|
import os
def is_valid_db_location(database):
"""Validate permissions for a database"""
return os.access(os.path.dirname(os.path.abspath(database)), os.W_OK)
|
cd06a77b43d2f8222cf8754a922178e6e6d6d530
| 25,429
|
def refine_get_context_template(original):
"""
Refines ``ape.helpers.get_context_template`` and append postgres-specific context keys.
:param original:
:return:
"""
def get_context():
context = original()
context.update({
'DB_HOST': '',
'DB_PASSWORD': '',
'DB_NAME': '',
'DB_USER': ''
})
return context
return get_context
|
85ba983cebba38240d8bf56c42bd62db72faad7e
| 25,432
|
def read(f, size=1):
"""
读文件,在遇到 EOF 时抛出 EOFError
判断的依据用 b''
"""
data = f.read(size)
if data == b'':
raise EOFError
else:
return data
|
503f6353591728f44a1b95d570c1cae5d3a34fc9
| 25,433
|
def extract_entities(input_data_tokens, entity_dict):
"""Extracts valid entities present in the input query.
Parses the tokenized input list to find valid entity values, based
on the given entity dataset.
Args:
input_data_tokens: A list of string tokens, without any punctuation,
based on the input string.
entity_dict: A dictionary of dictionary, of entity values for a
particular entity type.
Returns:
A list of valid entity values and their start, stop token index
locations in the tokenized input query.
[(['comedy', 'action'], 5, 7), (['suspense'], 9, 10)]
Always returns a list. If no valid entities are detected, returns
an empty list.
"""
detected_entities = []
length = len(input_data_tokens)
for i, word in enumerate(input_data_tokens):
if word in entity_dict:
start = i
stop = -1
loc = i # keeps track of the current cursor posiiton
current_dict = entity_dict
# keeps track of the current dictionary data
while(loc <= length and current_dict):
if 1 in current_dict:
# tag index of a potential entity value if a
# longer entity is not present
stop = loc
if len(current_dict) == 1:
detected_entities.append(
(input_data_tokens[start:stop], start, stop)
)
stop = -1 # reset
# if end of query reached or mismatch in entity values,
# discard and move on to the next word
if loc == length or input_data_tokens[loc] not in current_dict:
# save a shorter entity, if it exists in the already \
# parsed query
if stop != -1:
detected_entities.append(
(input_data_tokens[start:stop], start, stop)
)
break
else:
# entity matches up until current word, continue
current_dict = current_dict[input_data_tokens[loc]]
loc += 1
return detected_entities
|
516d0d9ae0df4a318808125b7e44bc327ecb8cff
| 25,434
|
def truncateToPROMOTER(strand, exons, ups_bp, down_bp):
""" Truncates the gene to only target promoter +-bp TSS """
if strand == "+":
first_exon = exons[0]
first_exon[2] = first_exon[1] + down_bp
first_exon[1] = first_exon[1] - ups_bp
return [first_exon]
else:
first_exon = exons[-1]
first_exon[1] = first_exon[2] - down_bp
first_exon[2] = first_exon[2] + ups_bp
return [first_exon]
return exons
|
9f1cd8af6364590ade2dd603b68568de6b3302a1
| 25,435
|
from functools import reduce
def import_object(import_name):
"""Import the named object and return it.
The name should be formatted as package.module:obj.
"""
module_name, expr = import_name.split(':', 1)
mod = __import__(module_name)
mod = reduce(getattr, module_name.split('.')[1:], mod)
globals = __builtins__
if not isinstance(globals, dict):
globals = globals.__dict__
return eval(expr, globals, mod.__dict__)
|
c0dcfb87d992b3e127a2d2a7a4e88d3d53708981
| 25,436
|
from typing import Tuple
from typing import Any
def createResolutionCallbackFromEnv(lookup_base):
"""
Creates a resolution callback that will look up qualified names in an
environment, starting with `lookup_base` for the base of any qualified
names, then proceeding down the lookup chain with the resolved object.
You should not use this directly, it should only be used from the other
createResolutionCallbackFrom* functions.
"""
def lookupInModule(qualified_name, module):
if '.' in qualified_name:
parts = qualified_name.split('.')
base = parts[0]
remaining_pieces = '.'.join(parts[1:])
module_value = getattr(module, base)
return lookupInModule(remaining_pieces, module_value)
else:
return getattr(module, qualified_name)
def parseNestedExpr(expr, module) -> Tuple[Any, int]:
i = 0
while i < len(expr) and expr[i] not in (',', '[', ']'):
i += 1
base = lookupInModule(expr[:i].strip(), module)
assert base is not None, f"Unresolvable type {expr[:i]}"
if i == len(expr) or expr[i] != '[':
return base, i
assert expr[i] == '['
parts = []
while expr[i] != ']':
part_len = 0
i += 1
part, part_len = parseNestedExpr(expr[i:], module)
parts.append(part)
i += part_len
if len(parts) > 1:
return base[tuple(parts)], i + 1
else:
return base[parts[0]], i + 1
def parseExpr(expr, module):
try:
value, len_parsed = parseNestedExpr(expr, module)
assert len_parsed == len(expr), "whole expression was not parsed, falling back to c++ parser"
return value
except Exception:
"""
The python resolver fails in several cases in known unit tests, and is intended
to fall back gracefully to the c++ resolver in general. For example, python 2 style
annotations which are frequent in our unit tests often fail with types e.g. int not
resolvable from the calling frame.
"""
return None
return lambda expr: parseExpr(expr, lookup_base)
|
17ba8861e445eef4753e4ba992afcdcc767007f0
| 25,437
|
import math
def spiral(radius, step, resolution=.1, angle=0.0, start=0.0, direction=-1):
"""
Generate points on a spiral.
Original source:
https://gist.github.com/eliatlarge/d3d4cb8ba8f868bf640c3f6b1c6f30fd
Parameters
----------
radius : float
maximum radius of the spiral from the center.
Defines the distance of the tail end from the center.
step : float
amount the current radius increases between each point.
Larger = spiral expands faster
resolution : float
distance between 2 points on the curve.
Defines amount radius rotates between each point.
Larger = smoother curves, more points, longer time to calculate.
angle : float
starting angle the pointer starts at on the interior
start : float
starting distance the radius is from the center.
direction : {-1, 1}
direction of the rotation of the spiral
Returns
-------
coordinates : List[Tuple[float, float]]
"""
dist = start + 0.0
coords = []
while dist * math.hypot(math.cos(angle), math.sin(angle)) < radius:
cord = []
cord.append(dist * math.cos(angle) * direction)
cord.append(dist * math.sin(angle))
coords.append(cord)
dist += step
angle += resolution
return coords
|
cf7f6e22ef1f776bba9ed827b7f7243a45dde21b
| 25,438
|
def us_census_formatter(data):
"""Formatter for US Census.
Arguments:
data(pandas.DataFrame): Data as returned by us_census_connector.
Description:
- Drop unnecessary columns and set index to county
- Make column values more readable
Returns:
pandas.DataFrame
"""
data.columns = data.iloc[0]
data.drop(0, inplace=True)
data.drop("id", axis=1, inplace=True)
data = data.set_index('Geographic Area Name')
cols = [c for c in data.columns if '2018' in c]
data = data[cols]
data.columns = [x.split("!!")[-1] for x in data.columns]
data = data.replace("N", 0.0)
data.columns = [x.lower() for x in data.columns]
data.drop(data.columns[-1], axis=1, inplace=True)
data.drop(data.columns[-1], axis=1, inplace=True)
return data
|
40553ed62355d6f43c0d0c22ffeba122eeb5b2ea
| 25,439
|
import argparse
def getarg():
""" read time period and station name from argument(std input)
specHr.py -b 2071231 -e 20180101 -s PM2.5 -a plot -t wanli,xianxi,puzi,xiaogang """
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--STNAM", required=True, type=str, help="station name,sep by ,")
ap.add_argument("-s", "--SPNAM", required=True, type=str, help="spec name")
ap.add_argument("-b", "--BEGD", required=True, type=int, help="yyyymmdd")
ap.add_argument("-e", "--ENDD", required=True, type=int, help="yyyymmdd")
ap.add_argument("-a", "--ACTION", required=True, type=str, help="save or plot")
args = vars(ap.parse_args())
return [args['STNAM'], args['SPNAM'], args['BEGD'], args['ENDD'], args['ACTION']]
|
0ba2c43ae991edfb868d38f0af7b829291054bcd
| 25,442
|
def hash_combine_zmw(zmw):
"""
Generate a unique hash for a ZMW, for use in downsampling filter.
"""
mask = 0xFFFF
upper = (zmw >> 16) & mask
lower = zmw & mask
result = 0
result ^= upper + 0x9e3779b9 + (result << 6) + (result >> 2)
result ^= lower + 0x9e3779b9 + (result << 6) + (result >> 2)
return result
|
d26ddb5c11a555eb3072fc2db23a3876d1751db4
| 25,443
|
def RecMult(num_1, num_2):
"""
Takes in two nonnegative numbers and return the multiplication result of the two numbers without using the multiplication operator *
Examples:
>>> RecMult(0,500)
0
>>> RecMult(500,0)
0
>>> RecMult(1,500)
500
>>> RecMult(500,1)
500
>>> RecMult(78,16)
1248
"""
if num_1 == 0 or num_2 == 0:
return 0
else:
return RecMult(num_1, num_2 - 1) + num_1
|
5012a5ca27a263d7f26da26842e62ba9d0e5c7ab
| 25,445
|
def csv_list(csv_str):
"""
Parser function to turn a string of comma-separated values into a list.
"""
return [int(i) for i in csv_str.split(",")]
|
674c75f980bc8d7b47c5ab28e9afd7a586d1c917
| 25,446
|
def read_config_file(fname):
"""
Reads the config file in and outputs a dictionary for the
program to run through.
"""
d = {}
with open(fname, 'r') as ptr:
for line in ptr:
splitline = line.split()
key = splitline[0]
value = ' '.join(splitline[1:])
temp = {key: value}
d.update(temp)
return d
|
942175fae143b87ff57df08041b94465ac5c8eb1
| 25,447
|
def connectable(cls: type) -> type:
""" Decorator for connectable types """
cls.__connectable__ = True
return cls
|
cbeb9094c83923f5c5f231eb38a175bbacf1ec84
| 25,448
|
def cp_max_calc(Ma):
"""
Calculates the maximum pressure coefficient for modified Newtonian flow
Inputs:
Ma: Free stream mach number
Outputs:
CpMax: Maximum pressure coefficient
"""
k = 1.4
PO2_pinf = (((k+1)**2 * Ma**2)/(4*k*Ma**2 - 2*(k-1)))**(k/(1-k)) * \
((1-k+2*k*Ma**2)/(k+1))
CpMax = (2/(k*Ma**2))*(PO2_pinf-1)
return CpMax
|
61d91f52234347baaa3f04bd2b82f97ffa6a9fb2
| 25,451
|
def safe_decode(txt):
"""Return decoded text if it's not already bytes."""
try:
return txt.decode()
except AttributeError:
return txt
|
2952daf31e29f45a25b6bb70aab89db08280e848
| 25,452
|
def dev(dic,data):
"""
Development Function (Null Function)
"""
return dic,data
|
5ca4d28f20135e76fff296033d12d0f27a5f4eb9
| 25,453
|
def isPermutation(n1, n2):
""" Returs if two input numbers are permutation of each other"""
if len(str(n1)) != len(str(n2)):
return False
l1 = list(str(n1))
for i in list(str(n2)):
if i not in l1:
return False
l1.pop(l1.index(i))
if len(l1) == 0:
return True
else:
return False
|
49f2cad14b847188b223ce916186219e44c90d46
| 25,454
|
def get_access_dependant_object(model_name, obj):
"""If the given object depends on another object for access, return the object that it depends on."""
if model_name in ["PostgresDatastore", "AzureDatastore", "OneDriveDatastore", "GoogleDriveDatastore",
"DropboxDatastore", "GoogleCloudStorageDatastore", "AzureBlobStorageDatastore",
"AzureDataLakeDatastore", "AmazonS3Datastore"]:
obj = obj.get_connection
elif model_name == "Note" or model_name == "DatasetRun":
obj = obj.dataset
elif model_name == "JobRun":
obj = obj.job
return obj
|
977f6021992bad59efc4bef537bc36b2391a04f9
| 25,456
|
import os
def clean_path(path):
""" Return a clean, absolute path. """
return os.path.abspath(os.path.normpath(path))
|
798b04f18d526751460cc91b694557393020c1db
| 25,457
|
import yaml
import jinja2
def parse_meta_yaml(filename):
"""Parse a conda build meta.yaml file, including jinja2 variables
"""
with open(filename, "r") as fobj:
raw = fobj.read()
return yaml.load(jinja2.Template(raw).render(), Loader=yaml.BaseLoader)
|
cb3e0e95a2580b3cb341abf4a1dd72a9e964b183
| 25,458
|
def fromHex( h ):
"""Convert a hex string into a int"""
return int(h,16)
|
ffae24cdade04d3ab4098f13643098dff2c69ef2
| 25,459
|
import json
import requests
def groupRemove(apikey,groupid):
"""
Removes the group and moves all containers to group 0 (ungrouped)
apikey: Your ApiKey from FileCrypt
groupid: the group ID(!) you want to delete
"""
data={"api_key":apikey,"fn":"group","sub":"remove","id":str(groupid)}
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
|
ce5e444624d4b071261212f901b364c06169dcfb
| 25,460
|
def get_lane_list(lane_object, scale_x, scale_y):
"""Scale lane by x ratio and y ratio.
:param lane_object: source lanes
:type lane_object: dict
:param scale_x: scale at x coordiante
:type scale_x: float
:param scale_y: scale at y coordiante
:type scale_y: float
:return: list of points list
:rtype: list
"""
gt_lane_list = []
for line in lane_object["Lines"]:
one_line = []
set_y = []
for point_index in range(len(line)):
if line[point_index]["x"] == "nan" or line[point_index]["y"] == "nan":
continue
if not line[point_index]["y"] in set_y:
set_y.append(line[point_index]["y"])
one_line.append((float(line[point_index]["x"]) * scale_x,
float(line[point_index]["y"]) * scale_y))
if len(one_line) >= 2:
if one_line[0][1] < one_line[1][1]:
one_line = one_line[::-1]
gt_lane_list.append(one_line)
return gt_lane_list
|
23c9cc8f3562305fa3eb4129d6499b83393e09f1
| 25,462
|
from typing import List
def inner(v: List[float], w: List[float]) -> float:
"""
Computes the inner product of two vectors.
Args:
v: The first vector.
w: The second vector.
Returns:
The inner product.
"""
output: float = sum([i * j for i, j in zip(v, w)])
return output
|
ed15537cee3c4f3daacdd395e7dd5c74b7b800bf
| 25,463
|
def insert_doc(repset, args_array):
"""Function: insert_doc
Description: Stub holder for mongo_db_data.insert_doc function.
Arguments:
(input) repset -> Replication set instance.
(input) args_array -> Array of command line options and values.
"""
status = True
if repset and args_array:
status = True
return status
|
862073a34051eea123bd7f457dd71e26eb5728f9
| 25,464
|
import urllib.request
import shutil
import os
import sys
def get_state_dict(version="human_v1", verbose=True):
"""
Download a pre-trained model if not already exists on local device.
:param version: Version of trained model to download [default: human_1]
:type version: str
:param verbose: Print model download status on stdout [default: True]
:type verbose: bool
:return: Path to state dictionary for pre-trained language model
:rtype: str
"""
state_dict_basename = f"dscript_{version}.pt"
state_dict_basedir = os.path.dirname(os.path.realpath(__file__))
state_dict_fullname = f"{state_dict_basedir}/{state_dict_basename}"
state_dict_url = f"http://cb.csail.mit.edu/cb/dscript/data/models/{state_dict_basename}"
if not os.path.exists(state_dict_fullname):
try:
if verbose: print(f"Downloading model {version} from {state_dict_url}...")
with urllib.request.urlopen(state_dict_url) as response, open(state_dict_fullname, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
except Exception as e:
print("Unable to download model - {}".format(e))
sys.exit(1)
return state_dict_fullname
|
bed5a2943f7420ddd7f990e88e00729724f70501
| 25,465
|
def display_plan(plan):
"""Print out the payment plan name and details from stripe API plan object"""
return (f"{plan.metadata.display_name} - ${plan.amount / 100:.2f} "
f"USD per {plan.interval}")
|
34831cc91f141a5254d76af793f0527a2cdee403
| 25,466
|
def r_sum_calc(im, x_f, y_f):
"""
x × y ピクセルのr値の合計を求める関数
"""
r_sum_f = 0
for i in range(x_f):
for j in range(y_f):
r, g, b = im.getpixel((i, j))
r_sum_f = r_sum_f + r
return r_sum_f
|
aec24d10db9b80365380e52d795481ea861c6eda
| 25,467
|
import requests
def tell_joke():
"""
Tell a joke to ease the PR tension!
"""
response = requests.get(
"https://official-joke-api.appspot.com/jokes/programming/random"
)
if response.status_code == 200:
joke = response.json()[0]
return "> %s\n *%s*\n😄️" % (joke["setup"], joke["punchline"])
return "I'm a little tired now for a joke, but hey, you're funny looking! 😄️"
|
95b65d6a7faeb48030b0b82b2109bdd0718248ee
| 25,468
|
import itertools
def _str_eval_append(eval, act, ctxt, *xs) :
"""Treats the args as lists and appends them."""
return itertools.chain.from_iterable(xs)
|
6da7bd03e3d788dbefaa320f19aacbbf862e6329
| 25,469
|
def world_coords_to_image_cad60(coords_3d):
"""
Formula given by Cornell 60 Dataset which seems to be wrong for a 640*480 image.
:param coords_3d:
:return:
"""
u = 0.0976862095248 * coords_3d[0] - 0.0006444357104 * coords_3d[1] + 0.0015715946682 * coords_3d[2] \
+ 156.8584456124928
v = 0.0002153447766 * coords_3d[0] - 0.1184874093530 * coords_3d[1] - 0.0022134485957 * coords_3d[2] \
+ 125.5357201011431
return int(round(u)), int(round(v))
|
8b75dfae6073270e466eefc9c36d91d837c244af
| 25,470
|
def input_float(prompt=''):
"""Ask for a human float input.
Args:
prompt (string): Text to prompt as input.
"""
# try:
# return raw_input(prompt)
# except NameError:
# return input(prompt)
while True:
try:
float_input = float(input(prompt))
except ValueError:
print('Please enter a float.\n')
continue
else:
break
return float_input
|
af7088a5bef456e7f2e3cd994b28665f15e2c9d2
| 25,472
|
def gen_random_3(fineness=10):
"""
For three variables, a Cartesian product enumeration with one
condition is equivalent to geometric construction by an equilateral
triangle to uniformly sample the composition space (create a uniform
grid).
"""
sqrt3 = 3**(1/2)
det = 2*sqrt3
xs = []
linspace = [x / fineness for x in range(fineness + 1)]
for d1 in linspace:
for d2 in linspace:
t1 = -sqrt3*(1-d1-2*d2) + sqrt3*(1-d1)
t1 /= det
t2 = sqrt3*(1-d1-2*d2) + sqrt3*(1-d1)
t2 /= det
assert round(t1, 3) == round(d2, 3)
assert round(t2, 3) == round(1 - d1 - d2, 3)
if 0 <= round(t1,3) <= 1 and 0 <= round(t2,3) <= 1:
ycoord = (1 - d1 - d2)*sqrt3
t3 = (ycoord * 2 / sqrt3) / 2
assert round(t3, 3) == round(1 - d1 - d2, 3)
xs.append((d1, d2, round(1 - d1 - d2, 3)))
return xs
|
7f9550d3e5df13b954b62eb2ec8c2597c4914feb
| 25,474
|
from typing import Any
from pathlib import Path
def sanitize_path(v: Any) -> Any:
"""Sanitize path.
Parameters:
v : Maybe a Path. If ``v`` is a ``Path`` object, it is converted to a string.
Returns:
The sanitized object.
"""
if isinstance(v, Path):
return str(v)
else:
return v
|
581c235cdf3c9099103bf5820b578cf25b9392ca
| 25,476
|
def remove_key(vglist,key):
"""
Accepts a list of dictionaries (vglist) and a list of keys.
Returns a list of dictionaries with each of the specified keys removed for all element of original list.
"""
new_list = []
for row in vglist:
for item in key:
row.pop(item,None)
new_list.append(row)
return new_list
|
4bb1410b21829478851b68fce36158a9080f016f
| 25,477
|
def keys_to_camel_case(value):
"""
Transform keys from snake to camel case (does nothing if no snakes are found)
:param value: value to transform
:return: transformed value
"""
def str_to_camel_case(snake_str):
components = snake_str.split("_")
return components[0] + "".join(x.title() for x in components[1:])
if value is None:
return None
if isinstance(value, (list, set)):
return list(map(keys_to_camel_case, value))
if isinstance(value, dict):
return dict(
(
keys_to_camel_case(k),
keys_to_camel_case(v) if isinstance(v, (list, dict)) else v,
)
for (k, v) in list(value.items())
)
return str_to_camel_case(value)
|
cefb6f4cb75e3d39ae8742b44239ee5a3f2b7b87
| 25,478
|
def home():
""" Route to the homepage. """
return 'Hello to the homepage!'
|
debfc5f5a663412ec499e44f582e45ef9829c17d
| 25,480
|
def prune_completions(prefix, all_test_names):
"""Filter returning only items that will complete the current prefix."""
completions = set()
for test_name in all_test_names:
if test_name.startswith(prefix):
next_break = test_name.find('.', len(prefix) + 1)
if next_break >= 0:
# Add only enough to complete this level; don't drown
# the user with all the possible completions from
# here.
completions.add(test_name[:next_break])
else:
# If there are no more levels, then add the full name
# of the leaf.
completions.add(test_name)
return completions
|
ad75ecc065dadddfb277329320ae888cf1e3535a
| 25,481
|
import click
def validate_jaccard(ctx, param, value):
"""Ensure Jaccard threshold is between 0 and 1"""
if value is None:
return value
try:
jaccard = float(value)
assert jaccard <= 1
assert jaccard >= 0
return jaccard
except (ValueError, AssertionError):
raise click.BadParameter(
"--jaccard-threshold needs to be a number"
" between 0 and 1, but {} was provided".format(value)
)
|
78dd5ca99f4fc2b5cdc50b3eaea6adb33a789c0a
| 25,482
|
import re
import json
def _DeviceSpecsToTFCRunTarget(device_specs):
"""Convert device specs to TFC run target format."""
groups = []
for spec in device_specs:
attrs = []
for match in re.finditer(r'([^\s:]+):(\S+)', spec):
key = match.group(1)
value = match.group(2)
attrs.append({'name': key, 'value': value})
groups.append({'run_targets': [{'name': '*', 'device_attributes': attrs}]})
obj = {'host': {'groups': groups}}
return json.dumps(obj)
|
7d983f591237e015b8c4cc5ebd8eb17d5e934425
| 25,483
|
def save_v1_timecodes_to_file(filepath, timecodes, videos_fps, default_fps=10 ** 10):
"""
:param filepath: path of the file for saving
:param timecodes: timecodes in format
[[start0, end0, fps0], [start1, end1, fps1], ... [start_i, end_i, fps_i]]
:param videos_fps: float fps of video
:param default_fps: fps of uncovered pieces
:return: closed file object in which timecodes saved
"""
with open(filepath, "w") as file:
file.write("# timecode format v1\n")
file.write(f"assume {default_fps}\n")
for elem in timecodes:
elem = [int(elem[0] * videos_fps), int(elem[1] * videos_fps), elem[2]]
elem = [str(n) for n in elem]
file.write(",".join(elem) + "\n")
return file
|
c873f9d0d4f2e4cdbfa83e2c5c2d5b48d3ca2713
| 25,488
|
import logging
import sys
def _check_handler(h, lev=None, loc=None):
"""
Check properties of a logging handler.
:param logging.StreamHandler | logging.FileHandler h: handler to inspect
:param str | int lev: expected handler level
:param str | file loc: log output destination
:return list[str]: any failure messages
"""
fails = []
if lev is not None:
if isinstance(lev, str):
lev = getattr(logging, lev)
elif not isinstance(lev, int):
raise TypeError("Expected logging level is neither string nor int: "
"{} ({})".format(lev, type(lev)))
if h.level != lev:
fails.append(
"Wrong level (expected {} but got {})".format(lev, h.level))
if loc is not None:
if loc in [sys.stderr, sys.stdout]:
exp_type = logging.StreamHandler
obs_loc = h.stream
exp_name = loc.name
obs_name = h.stream.name
if not isinstance(h, logging.StreamHandler):
fails.append("Expected a stream handler but found {}".
format(type(h)))
elif h.stream != loc:
fails.append("Unexpected handler location; expected {} but "
"found {}".format(loc.name, h.stream.name))
elif isinstance(loc, str):
exp_type = logging.FileHandler
obs_loc = h.stream.name
exp_name = loc
obs_name = h.stream.name
else:
raise TypeError(
"Handler location to check is neither standard stream nor "
"filepath: {} ({})".format(loc, type(loc)))
if not isinstance(h, exp_type):
fails.append("Expected a file handler but found {}".format(type(h)))
if loc != obs_loc:
fails.append("Unexpected handler location; expected {} but found {}".
format(exp_name, obs_name))
return fails
|
d9caaea0a12404ae8c89a0023f251b2411660a53
| 25,489
|
def _recursive_walk(obj, func):
"""
Calls a given method recursively for given objects
Parameters
----------
func : string
Name of the method / function be called
Notes
-----
- This function is for internal consumption.
"""
result = []
for i in list(obj):
m_call = getattr(i, func)
result.append(m_call())
return result
|
43feeeec969494791f57926ba55093bfbd01de81
| 25,491
|
def get_circular_polarization_rate(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
II = (beam.Jss + beam.Jpp)
II[II <= 0] = 1.
cpr = 2. * beam.Jsp.imag / II
cpr[II <= 0] = 0.
return cpr
|
a3852be251a69312603d111ce7564237715aa6d3
| 25,492
|
def get_cigar(gappy_q, gappy_r):
"""
:param gappy_q: gapped query sequence
:param gappy_r: gapped reference sequence
:return: returns a tuple, including a list of [operation, length] CIGAR, and the [start,end) 0-based coordinates of
the alignment.
"""
assert len(gappy_q) == len(gappy_r)
cigar = []
for q, r in zip(gappy_q, gappy_r):
if q == "-": # deletion
if cigar and cigar[-1][0] == 2:
cigar[-1][1] += 1
else:
cigar.append([2, 1])
elif r == "-": # insertion
if cigar and cigar[-1][0] == 1:
cigar[-1][1] += 1
else:
cigar.append([1, 1])
else:
if cigar and cigar[-1][0] == 0:
cigar[-1][1] += 1
else:
cigar.append([0, 1])
start, end = 0, 0
if cigar[0][0] == 2:
start = cigar[0][1]
cigar.pop(0)
else:
start = 0
if cigar[-1][0] == 2:
cigar.pop()
end = start + \
sum(length for op, length in cigar if op == 0) + \
sum(length for op, length in cigar if op == 2)
return cigar, start, end
|
fb5ad4324bfbfbce8da2bc0110365e7e60ecaa2e
| 25,494
|
def blocks(text):
"""Split the text into blocks deliminated by a blank line."""
return text.split("\n\n")
|
bda99561d35b729203fb7fe945c23147c62ebc24
| 25,495
|
import argparse
def parse_cmd():
"""
Parses command line flags that control how pinterest will be scraped.
Start the script with the '-h' option to read about all the arguments.
:returns a namespace populated with the arguments supplied (or default arguments, if given).
"""
parser = argparse.ArgumentParser(description="""Download a pinterest board or tag page. When downloading a tag page,
and no maximal number of downloads is provided, stop the script with CTRL+C.""")
# Required arguments
parser.add_argument(dest="link", help="Link to the pinterest page you want to download.")
parser.add_argument(dest="dest_folder",
help="""Folder into which the board will be downloaded.
Folder with board name is automatically created or found inside this folder, if it already exists.
If this folder is named like the page to be downloaded, everything will be directly in this folder.""")
# Optional arguments
parser.add_argument("-n", "--name", default=None, required=False, dest="board_name",
help="""The name for the downloaded page. If not given, will try to extract board name from pinterest.
This will also be the name for the folder in which the images are stored.""")
parser.add_argument("-c", "--count", default=None, type=int, required=False, dest="num_pins",
help="""Download only the first 'num_pins' pins found on the page.
If bigger than the number of pins on the board, all pins in the board will be downloaded.
The default is to download all pins. If you do not specifiy this option on a tag page, where there are more or less infinite pins,
just stop the script with CTRL+C.""")
parser.add_argument("-j", "--threads", default=4, type=int, required=False, dest="nr_threads",
help="Number of threads that download images in parallel. Defaults to 4.")
parser.add_argument("-r", "--resolution", default="0x0", required=False, dest="min_resolution",
help="""Minimal resolution for a download image. Input as 'WIDTHxHEIGHT'.""")
parser.add_argument("-m", "--mode", default="individual", required=False, choices=["individual", "area"],
dest="mode",
help="""Pick how the resolution limit is treated:
'individual': Both image dimensions must be bigger than the given resolution, i.e x >= WIDTH and y >= HEIGHT.
'area': The area of the image must be bigger than the provided resolution, i.e. x*y >= WIDTH * HEIGHT.""")
parser.add_argument("-s", "--skip-limit", default=float("inf"), type=int, required=False, dest="skip_limit",
help="""Abort the download after so many pins have been skipped. A pin is skipped if it was already present in the download folder.
This way you can download new pins that have been added after your last download. Defaults to infinite.
You should not set this to 1, but rather something like 10,
because the page is not scraped exactly in the same order as the pins are added.""")
parser.add_argument("-t", "--timeout", default=15, type=int, required=False, dest="timeout",
help="Set the timeout in seconds after which loading a pinterest board will be aborted, if unsuccessfull. Defaults to 15 seconds.")
parser.add_argument("-v", "--verbose", default=False, action="store_true", dest="verbose", required=False,
help="Display more detailed output and progress reports.")
args = parser.parse_args()
return args
|
7d5947282ef7b6ab1418a4112b93dec16a5094b8
| 25,497
|
def get_lang_start_ind(doc_wordlens, doc_names):
"""Takes in a Pandas DataFrame containing the word length histograms
for ALL documents, doc_wordlens, and a dictionary that maps each column
name (keys) to a string that describes each document (values). Returns
a list of each unique language in the document description, doc_langs,
and the corresponding starting row index for each language, doc_lrsinds.
"""
doc_langs = []
doc_lrsinds = []
for ci, cn in enumerate(doc_wordlens.columns):
lang = doc_names[cn].split("_")[0]
if lang not in doc_langs:
doc_lrsinds.append(ci)
doc_langs.append(lang)
return (doc_langs, doc_lrsinds)
|
24cdee2c7527f25f50dc4feb75f3958723d6684b
| 25,499
|
import configparser
def get_signatures(config_parser):
"""Lists the configured signatures
:return: list of signatures
"""
if not isinstance(config_parser, configparser.ConfigParser):
raise TypeError("Wrong arguments given")
options = config_parser.sections()
return options
|
00e6e1d1126f975f5d9850d2d88eaa2a7dad3bda
| 25,500
|
def convert_string(x):
"""
Convert the string to lower case and strip all non [z-z0-9-_] characters
:param str x: the string to convert
:return: the converted string
:rtype: str
"""
# we define the things to keep this way, just for clarity and in case we want to add other things.
wanted = set()
# lower case letters
wanted.update(set(range(97, 123)))
# numbers
wanted.update(set(range(48, 58)))
# - and _
wanted.update({45, 95})
# space
wanted.add(32)
s = ''
for c in x:
if ord(c) in wanted:
s += c
elif 65 <= ord(c) <= 90:
s += chr(ord(c) + 32)
return s
|
e43a5da3815aac5a59bbb91a97727e257e831f14
| 25,504
|
def string_to_list(s):
"""
Convert argument string (of potentially a list of values) to a list of strings
:param s: str
:return: list[str]
"""
if s is not None:
s = [c.strip() for c in s.split(',')]
return s
|
a5fd5b7349f3450805157e2de88a1a1b15974390
| 25,506
|
async def fetch_keywords(soup):
"""Get the keywords"""
if soup.find_all("keywords"):
keywords = [term.string.rstrip() for keywords in soup.find_all("keywords") for term in keywords if
(term.string.rstrip())]
return keywords
else:
return []
|
915ab2dc4c0232582b5f3a204050e2badea7c134
| 25,508
|
def restructure_aggregate_data(data_entry):
"""
Restructures data from aggregate JSON feed
Returns:\n
restructured data entry
"""
restructured_data = data_entry['data'][0]
data_entry['data'] = restructured_data
data_entry['uuid'] = data_entry['data']['*meta-instance-id*']
return data_entry
|
f9820daeab5fe7c5093baad8078f70058530cf3f
| 25,511
|
import site
import sysconfig
def create_pathlist():
"""
Create list of Python library paths to be skipped from module
reloading and Pdb steps.
"""
# Get standard installation paths
try:
paths = sysconfig.get_paths()
standard_paths = [paths['stdlib'],
paths['purelib'],
paths['scripts'],
paths['data']]
except Exception:
standard_paths = []
# Get user installation path
# See spyder-ide/spyder#8776
try:
if getattr(site, 'getusersitepackages', False):
# Virtualenvs don't have this function but
# conda envs do
user_path = [site.getusersitepackages()]
elif getattr(site, 'USER_SITE', False):
# However, it seems virtualenvs have this
# constant
user_path = [site.USER_SITE]
else:
user_path = []
except Exception:
user_path = []
return standard_paths + user_path
|
bb75f5f19f946d8ac3760f2f2076785082a9a114
| 25,512
|
def image_name() -> str:
""" This function asks the user the image name """
img_name = input("Image name: ")
if img_name == "":
img_name = "new_image"
return img_name
|
4b2be7c5a1d2498e3d66a9f32a6a7acd0c926708
| 25,514
|
def change_text_properties(text):
"""
:param text: DrawingText
:return:
"""
text_properties = text.text_properties
text_properties.font_name = 'SSS1'
text_properties.font_size = 3.5
return text
|
2676f81d2673314162e89b1e10007fd7ed525c00
| 25,515
|
def _freeze_layers(model):
"""Freeze layers in the model
Args:
model: Model
"""
for layer in model.layers:
layer.trainable = False
return model
|
799a135ba47b11e32e76d9f7a124176f35eb87cd
| 25,516
|
def normalize_repr(v):
"""
Return dictionary repr sorted by keys, leave others unchanged
>>> normalize_repr({1:2,3:4,5:6,7:8})
'{1: 2, 3: 4, 5: 6, 7: 8}'
>>> normalize_repr('foo')
"'foo'"
"""
if isinstance(v, dict):
items = [(repr(k), repr(v)) for k, v in list(v.items())]
items.sort()
return "{" + ", ".join([
"%s: %s" % itm for itm in items]) + "}"
return repr(v)
|
aa6f5576ab5478013a850bc69e808a9a5f6958e1
| 25,517
|
def polylineAppendCheck(currentVL, nextVL):
"""Given two polyline vertex lists, append them if needed.
Polylines to be appended will have the last coordinate of
the first vertex list be the same as the first coordinate of the
second vertex list. When appending, we need to eliminate
one of these coordinates.
Args:
currentVL (list): First of two lists of vertexes to check.
nextVL (list): Second of two lists of vertexes to check.
Returns:
tuple: Tuple:
1. ``True`` if the vertexes were appended, otherwise ``False``.
2. New vertex list if appended (otherwise ignored).
"""
wasAppended = False
appendedList = []
# In a polyline, the last coord of the first list will be the
# first coord of the new list
last = currentVL[-1]
first = nextVL[0]
if (last[0] == first[0]) and (last[1] == first[1]) and (last[2] == first[2]):
# It is to be appended
wasAppended = True
appendedList = currentVL[:-1] + nextVL
return (wasAppended, appendedList)
|
b668114d825f393e1043d402ff456ab98981966e
| 25,519
|
def get_teacher_feature(model, batch, _type, device):
"""
model: teacher model
batch: input_ids, input_segment_ids, input_mask if unsup else add label_ids
"""
if _type == 'unsup':
input_ids, segment_ids, input_mask = batch
input_ids = input_ids.to(device)
segment_ids = segment_ids.to(device)
input_mask = input_mask.to(device)
logits, transformer_feature, attention = model(input_ids, segment_ids, input_mask)
return logits, transformer_feature, attention
else:
input_ids, segment_ids, input_mask, _ = batch
input_ids = input_ids.to(device)
segment_ids = segment_ids.to(device)
input_mask = input_mask.to(device)
# label_ids = label_ids.to(device)
logits, transformer_feature, attention = model(input_ids, segment_ids, input_mask)
return logits, transformer_feature, attention
|
dc01231695ac4bf2832a98f7de63c488e0512685
| 25,521
|
from typing import Optional
import torch
def calc_trace_bwd(k_mn: Optional[torch.Tensor],
k_mn_zy: Optional[torch.Tensor],
solve2: torch.Tensor,
kmm: torch.Tensor,
X: Optional[torch.Tensor],
t: Optional[int],
trace_type: str):
"""Nystrom kernel trace backward pass"""
if trace_type == "ste":
assert k_mn_zy is not None and t is not None, "Incorrect arguments to trace_bwd"
return -(
2 * (k_mn_zy[:, :t].mul(solve2)).sum(0).mean() -
(solve2 * (kmm @ solve2)).sum(0).mean()
)
elif trace_type == "direct":
assert k_mn is not None, "Incorrect arguments to trace_bwd"
return -(
2 * (k_mn.mul(solve2)).sum() -
(solve2 * (kmm @ solve2)).sum()
)
elif trace_type == "fast":
assert k_mn_zy is not None and t is not None and X is not None, "Incorrect arguments to trace_bwd"
k_subs = k_mn_zy
norm = X.shape[0] / t
return -norm * (
2 * k_subs.mul(solve2).sum() -
(solve2 * (kmm @ solve2)).sum()
)
|
fe0c89794549cb01329d9b05d9e3261b104d9162
| 25,522
|
def binarySearch(arr, val):
"""
Searches through an array for search value and returns index
Input <--- array & search value
Output <--- index of search value if found. If not returns -1
"""
if len(arr) < 1:
return -1
first, last = 0, len(arr) - 1
while first <= last and first != last:
if (first + last) % 2:
middle = (first + last + 1) // 2
else:
middle = (first + last) // 2
if arr[middle] == val:
return middle
if arr[middle] > val:
last = middle - 1
else:
first = middle
return -1
|
e04819f4127bd63d316f4795e8772fcd8f025fae
| 25,523
|
def _HasReservation(topic):
"""Returns whether the topic has a reservation set."""
if topic.reservationConfig is None:
return False
return bool(topic.reservationConfig.throughputReservation)
|
ba10b5a0b2899a66a708d4fe746300b77e31235b
| 25,524
|
def density_from_pressure(temperature, pressure, RH):
"""returns atmospheric density, (kg/m3)
for a single point given:
Pressure (Pascals, multiply mb by 100 to get Pascals)
temperature ( deg K)
RH (from 0 to 1 as fraction) """
# R = specific gas constant , J/(kg*degK) = 287.05 for dry air
Rd = 287.05
# http://www.baranidesign.com/air-density/air-density.htm
# http://wahiduddin.net/calc/density_altitude.htm
# Evaporation into the Atmosphere, Wilfried Brutsaert, p37
# saturation vapor pressure is a polynomial developed by Herman Wobus
e_so = 6.1078
c0 = 0.99999683
c1 = -0.90826951e-2
c2 = 0.78736169e-4
c3 = -0.61117958e-6
c4 = 0.43884187e-8
c5 = -0.29883885e-10
c6 = 0.21874425e-12
c7 = -0.17892321e-14
c8 = 0.11112018e-16
c9 = -0.30994571e-19
p = (c0 + temperature*(
c1 + temperature*(
c2 + temperature*(
c3 + temperature*(
c4 + temperature*(
c5 + temperature*(
c6 + temperature*(
c7 + temperature*(
c8 + temperature*(
c9))))))))))
sat_vp = e_so / p**8
Pv = sat_vp * RH
density = (pressure / (Rd * temperature)) * (1 - (0.378 * Pv / pressure))
return density
|
d5df5d06b1c8d8b6423d12b1325f11c7fd665af0
| 25,525
|
def has_glob_metachar(s):
"""We also check for backreferences."""
return '*' in s or '?' in s or '[' in s or '(?P=' in s
|
495ac3d82e72ab932ec8f3c607f29618425d3fd5
| 25,526
|
def build_srcdict(gta, prop):
"""Build a dictionary that maps from source name to the value of a source property
Parameters
----------
gta : `fermipy.GTAnalysis`
The analysis object
prop : str
The name of the property we are mapping
Returns
-------
odict : dict
Dictionary that maps from source name to the value of the specified property
"""
o = {}
for s in gta.roi.sources:
o[s.name] = s[prop]
return o
|
daa069bde5642f947f212d50173db5b99be2eed4
| 25,527
|
import re
def cqlstr(string):
"""Makes a string safe to use in Cassandra CQL commands
Args:
string: The string to use in CQL
Returns:
str: A safe string replacement
"""
return re.sub('[-:.]', '_', string)
|
e505d855e374109edee0a1d5e76ff0cdeab64581
| 25,529
|
import os
def make_rel_path(path, basedir):
"""
If a basedir is specified, ignore everything outside of it and drop the
basedir prefix from a file path.
"""
if basedir is not None:
if basedir == os.path.commonpath([basedir, path]):
return path[len(basedir) + 1:]
else:
return None
else:
return path
|
875eec047ab5a54f4a7334f74b88424ab1d26a55
| 25,530
|
def _qualname(obj):
"""Get the fully-qualified name of an object (including module)."""
return obj.__module__ + '.' + obj.__qualname__
|
34c251612104afff79b2b6cd3580a4a939cd01d2
| 25,531
|
import subprocess
def get_snap_version(snap, fatal=True):
"""Determine version for an installed snap.
:param package: str Snap name to lookup (ie. in snap list)
:param fatal: bool Raise exception if snap not installed
:returns: str version of snap installed
"""
cmd = ['snap', 'list', snap]
try:
out = subprocess.check_output(cmd).decode('UTF-8')
except subprocess.CalledProcessError:
if not fatal:
return None
# the snap is unknown to snapd
e = ('Could not determine version of snap: {} as it\'s'
' not installed'.format(snap))
raise Exception(e)
lines = out.splitlines()
for line in lines:
if snap in line:
# Second item in list is version or a codename
return line.split()[1]
return None
|
9e9e943ce43c0c6d7ca5b9f8958ecb7c4624d4e1
| 25,532
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.